Lely core libraries  2.2.5
fd_loop.c
Go to the documentation of this file.
1 
24 #include "io.h"
25 
26 #if _POSIX_C_SOURCE >= 200112L
27 
28 #if !LELY_NO_THREADS
29 #include <lely/libc/stdatomic.h>
30 #endif
31 #include <lely/ev/std_exec.h>
32 #include <lely/ev/task.h>
33 #include <lely/io2/posix/fd_loop.h>
34 #include <lely/util/sllist.h>
35 #include <lely/util/util.h>
36 
37 #include <assert.h>
38 #include <errno.h>
39 #include <stdint.h>
40 #include <stdlib.h>
41 
42 #if !LELY_NO_THREADS
43 #include <pthread.h>
44 #endif
45 #include <unistd.h>
46 
47 #ifdef __linux__
48 #include <sys/eventfd.h>
49 #else
50 #include "fd.h"
51 #endif
52 
53 static void io_fd_loop_std_exec_impl_on_task_init(ev_std_exec_impl_t *impl);
54 static void io_fd_loop_std_exec_impl_on_task_fini(ev_std_exec_impl_t *impl);
55 static void io_fd_loop_std_exec_impl_post(
56  ev_std_exec_impl_t *impl, struct ev_task *task);
57 static size_t io_fd_loop_std_exec_impl_abort(
58  ev_std_exec_impl_t *impl, struct ev_task *task);
59 
60 // clang-format off
61 static const struct ev_std_exec_impl_vtbl io_fd_loop_std_exec_impl_vtbl = {
62  &io_fd_loop_std_exec_impl_on_task_init,
63  &io_fd_loop_std_exec_impl_on_task_fini,
64  &io_fd_loop_std_exec_impl_post,
65  &io_fd_loop_std_exec_impl_abort
66 };
67 // clang-format on
68 
69 static int io_fd_loop_svc_notify_fork(struct io_svc *svc, enum io_fork_event e);
70 static void io_fd_loop_svc_shutdown(struct io_svc *svc);
71 
72 // clang-format off
73 static const struct io_svc_vtbl io_fd_loop_svc_vtbl = {
74  &io_fd_loop_svc_notify_fork,
75  &io_fd_loop_svc_shutdown
76 };
77 // clang-format on
78 
80 struct io_fd_loop {
86  struct io_svc svc;
97  struct ev_std_exec exec;
99  struct io_poll_watch watch;
101  int fd[2];
102 #ifndef __linux__
103  int wfd;
104 #endif
105 #if !LELY_NO_THREADS
107  pthread_mutex_t mtx;
108 #endif
109  unsigned shutdown : 1;
110  unsigned stopped : 1;
111  unsigned running : 1;
118 #if LELY_NO_THREADS || LELY_NO_ATOMICS
119  size_t ntasks;
120 #else
121  atomic_size_t ntasks;
122 #endif
124  struct sllist queue;
125 };
126 
127 static inline io_fd_loop_t *io_fd_loop_from_impl(
128  const ev_std_exec_impl_t *impl);
129 static inline io_fd_loop_t *io_fd_loop_from_svc(const struct io_svc *svc);
130 
131 static void io_fd_loop_watch_func(struct io_poll_watch *watch, int events);
132 
133 static struct ev_task *io_fd_loop_do_pop(io_fd_loop_t *loop);
134 
135 static int io_fd_loop_open(io_fd_loop_t *loop);
136 static int io_fd_loop_close(io_fd_loop_t *loop);
137 static int io_fd_loop_read(io_fd_loop_t *loop);
138 static int io_fd_loop_write(io_fd_loop_t *loop);
139 
140 void *
141 io_fd_loop_alloc(void)
142 {
143  return malloc(sizeof(io_fd_loop_t));
144 }
145 
146 void
147 io_fd_loop_free(void *ptr)
148 {
149  free(ptr);
150 }
151 
152 io_fd_loop_t *
153 io_fd_loop_init(io_fd_loop_t *loop, io_poll_t *poll)
154 {
155  assert(loop);
156  assert(poll);
157 
158  int errsv = 0;
159 
160  loop->poll = poll;
161 
162  loop->svc = (struct io_svc)IO_SVC_INIT(&io_fd_loop_svc_vtbl);
163  loop->ctx = io_poll_get_ctx(loop->poll);
164  assert(loop->ctx);
165 
166  loop->impl_vptr = &io_fd_loop_std_exec_impl_vtbl;
167  ev_std_exec_init(io_fd_loop_get_exec(loop), &loop->impl_vptr);
168 
169  loop->watch = (struct io_poll_watch)IO_POLL_WATCH_INIT(
170  &io_fd_loop_watch_func);
171 
172  loop->fd[1] = loop->fd[0] = -1;
173 
174 #if !LELY_NO_THREADS
175  if ((errsv = pthread_mutex_init(&loop->mtx, NULL)))
176  goto error_init_mtx;
177 #endif
178 
179  loop->shutdown = 0;
180  loop->stopped = 0;
181  loop->running = 0;
182 
183 #if LELY_NO_THREADS || LELY_NO_ATOMICS
184  loop->ntasks = 0;
185 #else
186  atomic_init(&loop->ntasks, 0);
187 #endif
188  sllist_init(&loop->queue);
189 
190  if (io_fd_loop_open(loop) == -1) {
191  errsv = errno;
192  goto error_open;
193  }
194 
195  return loop;
196 
197  io_ctx_insert(loop->ctx, &loop->svc);
198 
199  // io_fd_loop_close(loop);
200 error_open:
201 #if !LELY_NO_THREADS
202  pthread_mutex_destroy(&loop->mtx);
203 error_init_mtx:
204 #endif
205  errno = errsv;
206  return NULL;
207 }
208 
209 void
210 io_fd_loop_fini(io_fd_loop_t *loop)
211 {
212  assert(loop);
213 
214  io_ctx_remove(loop->ctx, &loop->svc);
215 
216  io_fd_loop_close(loop);
217 
218 #if !LELY_NO_THREADS
219  pthread_mutex_destroy(&loop->mtx);
220 #endif
221 }
222 
223 io_fd_loop_t *
225 {
226  int errsv = 0;
227 
228  io_fd_loop_t *loop = io_fd_loop_alloc();
229  if (!loop) {
230  errsv = errno;
231  goto error_alloc;
232  }
233 
234  io_fd_loop_t *tmp = io_fd_loop_init(loop, poll);
235  if (!tmp) {
236  errsv = errno;
237  goto error_init;
238  }
239  loop = tmp;
240 
241  return loop;
242 
243 error_init:
244  io_fd_loop_free(loop);
245 error_alloc:
246  errno = errsv;
247  return NULL;
248 }
249 
250 void
252 {
253  if (loop) {
254  io_fd_loop_fini(loop);
255  io_fd_loop_free(loop);
256  }
257 }
258 
259 ev_poll_t *
261 {
262  assert(loop);
263 
264  return io_poll_get_poll(loop->poll);
265 }
266 
267 ev_exec_t *
269 {
270  assert(loop);
271 
272  return &loop->exec.exec_vptr;
273 }
274 
275 int
277 {
278  assert(loop);
279 
280  return loop->fd[0];
281 }
282 
283 void
285 {
286  assert(loop);
287 
288 #if !LELY_NO_THREADS
289  while (pthread_mutex_lock(&loop->mtx) == EINTR)
290  ;
291 #endif
292  loop->stopped = 1;
293 #if !LELY_NO_THREADS
294  pthread_mutex_unlock(&loop->mtx);
295 #endif
296 }
297 
298 int
300 {
301 #if !LELY_NO_THREADS
302  while (pthread_mutex_lock(&loop->mtx) == EINTR)
303  ;
304 #endif
305  int stopped = loop->stopped;
306 #if !LELY_NO_THREADS
307  pthread_mutex_unlock(&loop->mtx);
308 #endif
309  return stopped;
310 }
311 
312 void
314 {
315  assert(loop);
316 
317 #if !LELY_NO_THREADS
318  while (pthread_mutex_lock(&loop->mtx) == EINTR)
319  ;
320 #endif
321  loop->stopped = 0;
322 #if !LELY_NO_THREADS
323  pthread_mutex_unlock(&loop->mtx);
324 #endif
325 }
326 
327 size_t
329 {
330  size_t n = 0;
331  while (io_fd_loop_run_one(loop))
332  n += n < SIZE_MAX;
333  return n;
334 }
335 
336 size_t
337 io_fd_loop_run_one(io_fd_loop_t *loop)
338 {
339  assert(loop);
340 
341 #if !LELY_NO_THREADS
342  while (pthread_mutex_lock(&loop->mtx) == EINTR)
343  ;
344 #endif
345  struct ev_task *task = io_fd_loop_do_pop(loop);
346 #if !LELY_NO_THREADS
347  pthread_mutex_unlock(&loop->mtx);
348 #endif
349  if (!task)
350  return 0;
351 
352  assert(task->exec);
353  ev_exec_run(task->exec, task);
354 
355  return 1;
356 }
357 
358 static void
359 io_fd_loop_std_exec_impl_on_task_init(ev_std_exec_impl_t *impl)
360 {
361  io_fd_loop_t *loop = io_fd_loop_from_impl(impl);
362 
363 #if LELY_NO_THREADS || LELY_NO_ATOMICS
364  loop->ntasks++;
365 #else
366  atomic_fetch_add_explicit(&loop->ntasks, 1, memory_order_relaxed);
367 #endif
368 }
369 
370 static void
371 io_fd_loop_std_exec_impl_on_task_fini(ev_std_exec_impl_t *impl)
372 {
373  io_fd_loop_t *loop = io_fd_loop_from_impl(impl);
374 
375 #if LELY_NO_THREADS || LELY_NO_ATOMICS
376  if (!--loop->ntasks) {
377 #else
378  if (atomic_fetch_sub_explicit(&loop->ntasks, 1, memory_order_release)
379  == 1) {
380  atomic_thread_fence(memory_order_acquire);
381 #endif
382 #if !LELY_NO_THREADS
383  while (pthread_mutex_lock(&loop->mtx) == EINTR)
384  ;
385 #endif
386  if (sllist_empty(&loop->queue))
387  loop->stopped = 1;
388 #if !LELY_NO_THREADS
389  pthread_mutex_unlock(&loop->mtx);
390 #endif
391  }
392 }
393 
394 static void
395 io_fd_loop_std_exec_impl_post(ev_std_exec_impl_t *impl, struct ev_task *task)
396 {
397  io_fd_loop_t *loop = io_fd_loop_from_impl(impl);
398  assert(task);
399 
400 #if !LELY_NO_THREADS
401  while (pthread_mutex_lock(&loop->mtx) == EINTR)
402  ;
403 #endif
404  sllist_push_back(&loop->queue, &task->_node);
405  int running = !loop->shutdown && loop->running;
406 #if !LELY_NO_THREADS
407  pthread_mutex_unlock(&loop->mtx);
408 #endif
409 
410  if (!running) {
411  int errsv = errno;
412  if (io_fd_loop_write(loop) == -1)
413  errno = errsv;
414  }
415 }
416 
417 static size_t
418 io_fd_loop_std_exec_impl_abort(ev_std_exec_impl_t *impl, struct ev_task *task)
419 {
420  io_fd_loop_t *loop = io_fd_loop_from_impl(impl);
421 
422  struct sllist queue;
423  sllist_init(&queue);
424 
425 #if !LELY_NO_THREADS
426  while (pthread_mutex_lock(&loop->mtx) == EINTR)
427  ;
428 #endif
429  if (!task)
430  sllist_append(&queue, &loop->queue);
431  else if (sllist_remove(&loop->queue, &task->_node))
432  sllist_push_back(&queue, &task->_node);
433 #if !LELY_NO_THREADS
434  pthread_mutex_unlock(&loop->mtx);
435 #endif
436 
437  size_t n = 0;
438  while (sllist_pop_front(&queue))
439  n += n < SIZE_MAX;
440  return n;
441 }
442 
443 static int
444 io_fd_loop_svc_notify_fork(struct io_svc *svc, enum io_fork_event e)
445 {
446  io_fd_loop_t *loop = io_fd_loop_from_svc(svc);
447 
448  if (e != IO_FORK_CHILD || loop->shutdown)
449  return 0;
450 
451  int result = 0;
452  int errsv = errno;
453 
454  if (io_fd_loop_close(loop) == -1 && !result) {
455  errsv = errno;
456  result = -1;
457  }
458 
459  if (io_fd_loop_open(loop) == -1 && !result) {
460  errsv = errno;
461  result = -1;
462  }
463 
464  if (!sllist_empty(&loop->queue) && io_fd_loop_write(loop) == -1
465  && !result) {
466  errsv = errno;
467  result = -1;
468  }
469 
470  errno = errsv;
471  return result;
472 }
473 
474 static void
475 io_fd_loop_svc_shutdown(struct io_svc *svc)
476 {
477  io_fd_loop_t *loop = io_fd_loop_from_svc(svc);
478 
479 #if !LELY_NO_THREADS
480  while (pthread_mutex_lock(&loop->mtx) == EINTR)
481  ;
482 #endif
483  if (!loop->shutdown) {
484  loop->shutdown = 1;
485  // Stop monitoring I/O events.
486  io_poll_watch(loop->poll, loop->fd[0], 0, &loop->watch);
487  }
488 #if !LELY_NO_THREADS
489  pthread_mutex_unlock(&loop->mtx);
490 #endif
491 }
492 
493 static inline io_fd_loop_t *
494 io_fd_loop_from_impl(const ev_std_exec_impl_t *impl)
495 {
496  assert(impl);
497 
498  return structof(impl, io_fd_loop_t, impl_vptr);
499 }
500 
501 static inline io_fd_loop_t *
502 io_fd_loop_from_svc(const struct io_svc *svc)
503 {
504  assert(svc);
505 
506  return structof(svc, io_fd_loop_t, svc);
507 }
508 
509 static void
510 io_fd_loop_watch_func(struct io_poll_watch *watch, int events)
511 {
512  assert(watch);
513  io_fd_loop_t *loop = structof(watch, io_fd_loop_t, watch);
514  (void)events;
515 
516  int errsv = errno;
517 
518  io_fd_loop_read(loop);
519 
520 #if !LELY_NO_THREADS
521  while (pthread_mutex_lock(&loop->mtx) == EINTR)
522  ;
523 #endif
524  assert(!loop->running);
525  loop->running = 1;
526  struct ev_task *task;
527  while (!loop->shutdown && (task = io_fd_loop_do_pop(loop))) {
528 #if !LELY_NO_THREADS
529  pthread_mutex_unlock(&loop->mtx);
530 #endif
531  assert(task->exec);
532  ev_exec_run(task->exec, task);
533 #if !LELY_NO_THREADS
534  while (pthread_mutex_lock(&loop->mtx) == EINTR)
535  ;
536 #endif
537  }
538  if (!loop->shutdown)
539  io_poll_watch(loop->poll, loop->fd[0], IO_EVENT_IN, watch);
540  loop->running = 0;
541 #if !LELY_NO_THREADS
542  pthread_mutex_unlock(&loop->mtx);
543 #endif
544 
545  errno = errsv;
546 }
547 
548 static struct ev_task *
549 io_fd_loop_do_pop(io_fd_loop_t *loop)
550 {
551  assert(loop);
552 
553  if (loop->stopped)
554  return NULL;
555 
556  struct ev_task *task =
558 #if LELY_NO_THREADS || LELY_NO_ATOMICS
559  if (!task && !loop->ntasks)
560 #else
561  // clang-format off
562  if (!task && !atomic_load_explicit((atomic_size_t *)&loop->ntasks,
563  memory_order_relaxed))
564  // clang-format on
565 #endif
566  loop->stopped = 1;
567  return task;
568 }
569 
570 static int
571 io_fd_loop_open(io_fd_loop_t *loop)
572 {
573  assert(loop);
574 
575  int errsv = 0;
576 
577  if (io_fd_loop_close(loop) == -1) {
578  errsv = errno;
579  goto error_close;
580  }
581 
582 #ifdef __linux__
583  loop->fd[1] = loop->fd[0] = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
584  if (!loop->fd[0]) {
585  errsv = errno;
586  goto error_eventfd;
587  }
588 #else
589  if (pipe(loop->fd) == -1) {
590  errsv = errno;
591  goto error_pipe;
592  }
593 
594  if (io_fd_set_cloexec(loop->fd[0]) == -1
595  || io_fd_set_cloexec(loop->fd[1]) == -1) {
596  errsv = errno;
597  goto error_set_cloexec;
598  }
599 
600  if (io_fd_set_nonblock(loop->fd[0]) == -1
601  || io_fd_set_nonblock(loop->fd[1]) == -1) {
602  errsv = errno;
603  goto error_set_nonblock;
604  }
605 #endif
606 
607  if (io_poll_watch(loop->poll, loop->fd[0], IO_EVENT_IN, &loop->watch)
608  == -1) {
609  errsv = errno;
610  goto error_poll_watch;
611  }
612 
613  return 0;
614 
615 error_poll_watch:
616 #ifdef __linux__
617  close(loop->fd[0]);
618 error_eventfd:
619 #else
620 error_set_nonblock:
621 error_set_cloexec:
622  close(loop->fd[1]);
623  close(loop->fd[0]);
624 error_pipe:
625 #endif
626  loop->fd[1] = loop->fd[0] = -1;
627 error_close:
628  errno = errsv;
629  return -1;
630 }
631 
632 static int
633 io_fd_loop_close(io_fd_loop_t *loop)
634 {
635  assert(loop);
636 
637  if (loop->fd[0] == -1)
638  return 0;
639 
640  int result = 0;
641  int errsv = errno;
642 
643  // clang-format off
644  if (!loop->shutdown && io_poll_watch(loop->poll, loop->fd[0], 0,
645  &loop->watch) == -1 && !result) {
646  // clang-format on
647  errsv = errno;
648  result = -1;
649  }
650 
651 #ifndef __linux__
652  if (close(loop->fd[1]) == -1 && !result) {
653  errsv = errno;
654  result = -1;
655  }
656 #endif
657 
658  if (close(loop->fd[0]) == -1 && !result) {
659  errsv = errno;
660  result = -1;
661  }
662 
663  loop->fd[1] = loop->fd[0] = -1;
664 
665  errno = errsv;
666  return result;
667 }
668 
669 static int
670 io_fd_loop_read(io_fd_loop_t *loop)
671 {
672  assert(loop);
673 
674  int errsv = errno;
675  for (;;) {
676  errno = 0;
677 #ifdef __linux__
678  uint64_t buf;
679 #else
680  char buf;
681 #endif
682  ssize_t result = read(loop->fd[0], &buf, sizeof(buf));
683  if (result < 0 && errno != EINTR) {
684  if (errno != EAGAIN || errno != EWOULDBLOCK)
685  return -1;
686  errno = errsv;
687  return 0;
688  }
689  }
690 }
691 
692 static int
693 io_fd_loop_write(io_fd_loop_t *loop)
694 {
695  assert(loop);
696 
697  int errsv = errno;
698  ssize_t result;
699  do {
700  errno = 0;
701 #ifdef __linux__
702  uint64_t buf = 1;
703 #else
704  char buf = 0;
705 #endif
706  result = write(loop->fd[1], &buf, sizeof(buf));
707  } while (result < 0 && errno == EINTR);
708  if (result < 0 && errno != EAGAIN && errno != EWOULDBLOCK)
709  return -1;
710  errno = errsv;
711  return result > 0 ? 1 : 0;
712 }
713 
714 #endif // _POSIX_C_SOURCE >= 200112L
void io_ctx_insert(io_ctx_t *ctx, struct io_svc *svc)
Registers an I/O service with an I/O context.
Definition: ctx.c:121
io_fork_event
The type of event generated by an I/O context before and after a process fork.
Definition: ctx.h:37
@ IO_FORK_CHILD
The event generated after the fork in the child process.
Definition: ctx.h:43
#define IO_SVC_INIT(vptr)
The static initializer for io_svc.
Definition: ctx.h:57
void io_ctx_remove(io_ctx_t *ctx, struct io_svc *svc)
Unregisters an I/O service with an I/O context.
Definition: ctx.c:136
const struct ev_poll_vtbl *const ev_poll_t
The abstract polling interface.
Definition: poll.h:32
@ IO_EVENT_IN
Data (other than priority data) MAY be read without blocking.
Definition: event.h:35
void ev_exec_run(ev_exec_t *exec, struct ev_task *task)
Invokes the task function in *task as if the task is being executed by *exec.
Definition: exec.h:144
int io_fd_set_cloexec(int fd)
Sets the FD_CLOEXEC flag of the file descriptor fd.
Definition: fd.c:33
int io_fd_set_nonblock(int fd)
Sets the O_NONBLOCK flag of the file descriptor fd.
Definition: fd.c:44
This is the internal header file of the common file descriptor functions.
void io_fd_loop_restart(io_fd_loop_t *loop)
Restarts a file descriptor event loop.
Definition: fd_loop.c:313
void io_fd_loop_destroy(io_fd_loop_t *loop)
Destroys a file descriptor event loop.
Definition: fd_loop.c:251
void io_fd_loop_stop(io_fd_loop_t *loop)
Stops the file descriptor event loop.
Definition: fd_loop.c:284
int io_fd_loop_get_fd(const io_fd_loop_t *loop)
Returns the file descriptor corresponding to the event loop.
Definition: fd_loop.c:276
io_fd_loop_t * io_fd_loop_create(io_poll_t *poll)
Creates a new file descriptor event loop.
Definition: fd_loop.c:224
ev_poll_t * io_fd_loop_get_poll(const io_fd_loop_t *loop)
Returns a pointer to the polling instance used by the event loop.
Definition: fd_loop.c:260
ev_exec_t * io_fd_loop_get_exec(const io_fd_loop_t *loop)
Returns a pointer to the executor corresponding to the event loop.
Definition: fd_loop.c:268
int io_fd_loop_stopped(io_fd_loop_t *loop)
Returns 1 if the file descriptor event loop is stopped, and 0 if not.
Definition: fd_loop.c:299
size_t io_fd_loop_run(io_fd_loop_t *loop)
Equivalent to.
Definition: fd_loop.c:328
This header file is part of the event library; it contains the file descriptor event loop declaration...
const struct ev_exec_vtbl *const ev_exec_t
An abstract task executor.
Definition: ev.h:29
This is the public header file of the utilities library.
#define structof(ptr, type, member)
Obtains the address of a structure from the address of one of its members.
Definition: util.h:93
io_ctx_t * io_poll_get_ctx(const io_poll_t *poll)
Returns a pointer to the I/O context with which the I/O polling instance is registered.
Definition: poll.c:275
#define IO_POLL_WATCH_INIT(func)
The static initializer for io_poll_watch.
Definition: poll.h:65
ev_poll_t * io_poll_get_poll(const io_poll_t *poll)
Returns a pointer to the ev_poll_t instance corresponding to the I/O polling instance.
Definition: poll.c:281
int io_poll_watch(io_poll_t *poll, io_handle_t handle, struct io_event *event, int keep)
Registers an I/O device with an I/O polling interface and instructs it to watch for certain events.
Definition: poll.c:249
This header file is part of the utilities library; it contains the singly-linked list declarations.
void sllist_init(struct sllist *list)
Initializes a singly-linked list.
Definition: sllist.h:184
struct sllist * sllist_append(struct sllist *dst, struct sllist *src)
Appends the singly-linked list at src to the one at dst.
Definition: sllist.h:233
struct slnode * sllist_remove(struct sllist *list, struct slnode *node)
Removes a node from a singly-linked list.
Definition: sllist.c:46
void sllist_push_back(struct sllist *list, struct slnode *node)
Pushes a node to the back of a singly-linked list.
Definition: sllist.h:213
int sllist_empty(const struct sllist *list)
Returns 1 if the singly-linked list is empty, and 0 if not.
Definition: sllist.h:190
struct slnode * sllist_pop_front(struct sllist *list)
Pops a node from the front of a singly-linked list.
Definition: sllist.h:221
This is the internal header file of the Windows-specific I/O declarations.
This header file is part of the event library; it contains the standard executor declarations.
This header file is part of the C11 and POSIX compatibility library; it includes <stdatomic....
This header file is part of the C11 and POSIX compatibility library; it includes <stdint....
This header file is part of the C11 and POSIX compatibility library; it includes <stdlib....
An I/O polling interface.
Definition: poll.c:48
An executable task.
Definition: task.h:41
ev_exec_t * exec
A pointer to the executor to which the task is (to be) submitted.
Definition: task.h:43
Definition: ctx.c:35
A file descriptor event loop.
Definition: fd_loop.c:80
pthread_mutex_t mtx
The mutex protecting the task queue.
Definition: fd_loop.c:107
const struct ev_std_exec_impl_vtbl * impl_vptr
A pointer to the virtual table containing the interface used by the standard executor (exec).
Definition: fd_loop.c:95
io_poll_t * poll
A pointer to the I/O polling instance used to monitor the event loop.
Definition: fd_loop.c:84
io_ctx_t * ctx
A pointer to the I/O context with which the event loop is registered.
Definition: fd_loop.c:90
struct io_poll_watch watch
The object used to monitor the file descriptor for I/O events.
Definition: fd_loop.c:99
struct ev_std_exec exec
The executor corresponding to the event loop.
Definition: fd_loop.c:97
struct sllist queue
The queue of pending tasks.
Definition: fd_loop.c:124
size_t ntasks
The number of pending tasks.
Definition: fd_loop.c:119
struct io_svc svc
The I/O service representing the event loop.
Definition: fd_loop.c:86
int fd[2]
The file descriptor corresponding to the event loop.
Definition: fd_loop.c:101
An object representing a file descriptor being monitored for I/O events.
Definition: poll.h:56
The virtual table of an I/O service.
Definition: ctx.h:67
An I/O service.
Definition: ctx.h:49
A singly-linked list.
Definition: sllist.h:51
This header file is part of the event library; it contains the task declarations.
struct ev_task * ev_task_from_node(struct slnode *node)
Converts a pointer to a node in a queue to the address of the task containing the node.
Definition: task.c:32
This header file is part of the C11 and POSIX compatibility library; it includes <unistd....