Lely core libraries  2.2.5
poll.c
Go to the documentation of this file.
1 
24 #include "io.h"
25 #include <lely/io/poll.h>
26 #include <lely/util/cmp.h>
27 #include <lely/util/errnum.h>
28 #include <lely/util/rbtree.h>
29 #ifdef _WIN32
30 #include <lely/io/sock.h>
31 #else
32 #include <lely/io/pipe.h>
33 #endif
34 #include "handle.h"
35 
36 #include <assert.h>
37 #include <stdlib.h>
38 
39 #if _POSIX_C_SOURCE >= 200112L
40 #if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
41 #include <sys/epoll.h>
42 #else
43 #include <poll.h>
44 #endif
45 #endif
46 
48 struct __io_poll {
49 #ifndef LELY_NO_THREADS
52 #endif
54  struct rbtree tree;
55 #if defined(_WIN32) || _POSIX_C_SOURCE >= 200112L
58 #endif
59 #if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
61  int epfd;
62 #endif
63 };
64 
66 struct io_watch {
68  struct rbnode node;
70  struct io_handle *handle;
72  struct io_event event;
77  int keep;
78 };
79 
80 #ifdef LELY_NO_THREADS
81 #define io_poll_lock(poll)
82 #define io_poll_unlock(poll)
83 #else
84 static void io_poll_lock(io_poll_t *poll);
85 static void io_poll_unlock(io_poll_t *poll);
86 #endif
87 
88 static struct io_watch *io_poll_insert(
89  io_poll_t *poll, struct io_handle *handle);
90 static void io_poll_remove(io_poll_t *poll, struct io_watch *watch);
91 
92 #if _POSIX_C_SOURCE >= 200112L \
93  && !(defined(__linux__) && defined(HAVE_SYS_EPOLL_H))
94 static int _poll(struct pollfd *fds, nfds_t nfds, int timeout);
95 #endif
96 
97 void *
98 __io_poll_alloc(void)
99 {
100  void *ptr = malloc(sizeof(struct __io_poll));
101  if (!ptr)
102  set_errc(errno2c(errno));
103  return ptr;
104 }
105 
106 void
107 __io_poll_free(void *ptr)
108 {
109  free(ptr);
110 }
111 
112 struct __io_poll *
113 __io_poll_init(struct __io_poll *poll)
114 {
115  assert(poll);
116 
117  int errc = 0;
118 
119 #ifndef LELY_NO_THREADS
120  mtx_init(&poll->mtx, mtx_plain);
121 #endif
122 
123 #if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
124  // Track attributes with the I/O device handle.
125  rbtree_init(&poll->tree, ptr_cmp);
126 #else
127  // Track attributes with native file descriptor.
128 #ifdef _WIN32
129  rbtree_init(&poll->tree, ptr_cmp);
130 #else
131  rbtree_init(&poll->tree, int_cmp);
132 #endif
133 #endif
134 
135 #if defined(_WIN32) || _POSIX_C_SOURCE >= 200112L
136  // Create a self-pipe for signal events.
137 #ifdef _WIN32
139  == -1) {
140 #else
141  if (io_open_pipe(poll->pipe) == -1) {
142 #endif
143  errc = get_errc();
144  goto error_open_pipe;
145  }
146 
147  // Make the both ends of the self-pipe non-blocking.
148  if (io_set_flags(poll->pipe[0], IO_FLAG_NONBLOCK) == -1) {
149  errc = get_errc();
150  goto error_set_flags;
151  }
152  if (io_set_flags(poll->pipe[1], IO_FLAG_NONBLOCK) == -1) {
153  errc = get_errc();
154  goto error_set_flags;
155  }
156 #endif
157 
158 #if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
159  poll->epfd = epoll_create1(EPOLL_CLOEXEC);
160  if (poll->epfd == -1) {
161  errc = get_errc();
162  goto error_epoll_create1;
163  }
164 
165  // Register the read end of the self-pipe with epoll.
166  struct epoll_event ev = { .events = EPOLLIN,
167  .data.ptr = poll->pipe[0] };
168  if (epoll_ctl(poll->epfd, EPOLL_CTL_ADD, poll->pipe[0]->fd, &ev)
169  == -1) {
170  errc = get_errc();
171  goto error_epoll_ctl;
172  }
173 #endif
174 
175  return poll;
176 
177 #if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
178 error_epoll_ctl:
179  close(poll->epfd);
180 error_epoll_create1:
181 #endif
182 #if defined(_WIN32) || _POSIX_C_SOURCE >= 200112L
183 error_set_flags:
184  io_close(poll->pipe[1]);
185  io_close(poll->pipe[0]);
186 error_open_pipe:
187 #endif
188  set_errc(errc);
189  return NULL;
190 }
191 
192 void
193 __io_poll_fini(struct __io_poll *poll)
194 {
195  assert(poll);
196 
197  rbtree_foreach (&poll->tree, node)
198  io_poll_remove(poll, structof(node, struct io_watch, node));
199 
200 #if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
201  close(poll->epfd);
202 #endif
203 
204 #if defined(_WIN32) || _POSIX_C_SOURCE >= 200112L
205  io_close(poll->pipe[1]);
206  io_close(poll->pipe[0]);
207 #endif
208 
209 #ifndef LELY_NO_THREADS
210  mtx_destroy(&poll->mtx);
211 #endif
212 }
213 
214 io_poll_t *
216 {
217  int errc = 0;
218 
219  io_poll_t *poll = __io_poll_alloc();
220  if (!poll) {
221  errc = get_errc();
222  goto error_alloc_poll;
223  }
224 
225  if (!__io_poll_init(poll)) {
226  errc = get_errc();
227  goto error_init_poll;
228  }
229 
230  return poll;
231 
232 error_init_poll:
233  __io_poll_free(poll);
234 error_alloc_poll:
235  set_errc(errc);
236  return NULL;
237 }
238 
239 void
241 {
242  if (poll) {
243  __io_poll_fini(poll);
244  __io_poll_free(poll);
245  }
246 }
247 
248 int
249 io_poll_watch(io_poll_t *poll, io_handle_t handle, struct io_event *event,
250  int keep)
251 {
252  assert(poll);
253 
254  if (!handle) {
256  return -1;
257  }
258 
259  assert(handle->vtab);
260  switch (handle->vtab->type) {
261 #if defined(__linux__) && defined(HAVE_LINUX_CAN_H)
262  case IO_TYPE_CAN:
263 #endif
264 #if _POSIX_C_SOURCE >= 200112L
265  case IO_TYPE_FILE:
266  case IO_TYPE_PIPE:
267  case IO_TYPE_SERIAL:
268 #endif
269 #if defined(_WIN32) || _POSIX_C_SOURCE >= 200112L
270  case IO_TYPE_SOCK:
271 #endif
272  break;
273  default: set_errnum(ERRNUM_INVAL); return -1;
274  }
275 
276  int errc = 0;
277 
278  io_poll_lock(poll);
279 
280  // Check if the I/O device has already been registered.
281 #if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
282  struct rbnode *node = rbtree_find(&poll->tree, handle);
283 #else
284  struct rbnode *node = rbtree_find(&poll->tree, &handle->fd);
285 #endif
286  struct io_watch *watch =
287  node ? structof(node, struct io_watch, node) : NULL;
288  // If event is not NULL, register the device or update the events being
289  // watched. If event is NULL, remove the device.
290  if (event) {
291  if (!watch) {
292  watch = io_poll_insert(poll, handle);
293  if (!watch) {
294  errc = get_errc();
295  goto error_watch;
296  }
297  }
298 
299  // Update the events being watched.
300  watch->event = *event;
301  watch->keep = keep;
302 
303 #if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
304  // Modify or add the event to the epoll instance depending on
305  // whether the file descriptor is already registered.
306  int op = node ? EPOLL_CTL_MOD : EPOLL_CTL_ADD;
307 
308  struct epoll_event ev = { 0, { NULL } };
309  if (event->events & IO_EVENT_READ)
310  ev.events |= EPOLLIN | EPOLLRDHUP | EPOLLPRI;
311  if (event->events & IO_EVENT_WRITE)
312  ev.events |= EPOLLOUT;
313  ev.data.ptr = watch->handle;
314 
315  if (epoll_ctl(poll->epfd, op, watch->handle->fd, &ev) == -1) {
316  errc = get_errc();
317  goto error_epoll_ctl;
318  }
319 #endif
320  } else {
321  if (!watch) {
322  errc = errnum2c(ERRNUM_INVAL);
323  goto error_watch;
324  }
325 
326 #if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
327  // Delete the event from the epoll instance.
328  epoll_ctl(poll->epfd, EPOLL_CTL_DEL, watch->handle->fd, NULL);
329 #endif
330  io_poll_remove(poll, watch);
331  }
332 
333  io_poll_unlock(poll);
334 
335  return 0;
336 
337 #if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
338 error_epoll_ctl:
339  epoll_ctl(poll->epfd, EPOLL_CTL_DEL, watch->handle->fd, NULL);
340 #endif
341  io_poll_remove(poll, watch);
342 error_watch:
343  io_poll_unlock(poll);
344  set_errc(errc);
345  return -1;
346 }
347 
348 int
349 io_poll_wait(io_poll_t *poll, int maxevents, struct io_event *events,
350  int timeout)
351 {
352  assert(poll);
353 
354  if (maxevents < 0) {
356  return -1;
357  }
358 
359  if (!maxevents || !events)
360  return 0;
361 
362  int nevents = 0;
363 #if defined(_WIN32) || _POSIX_C_SOURCE >= 200112L
364  unsigned char sig = 0;
365 #endif
366 
367 #ifdef _WIN32
368  fd_set readfds;
369  FD_ZERO(&readfds);
370 
371  int nwritefds = 0;
372  fd_set writefds;
373  FD_ZERO(&writefds);
374 
375  fd_set errorfds;
376  FD_ZERO(&errorfds);
377 
378  FD_SET((SOCKET)poll->pipe[0]->fd, &readfds);
379 
380  io_poll_lock(poll);
381  struct rbnode *node = rbtree_first(&poll->tree);
382  while (node) {
383  struct io_watch *watch = structof(node, struct io_watch, node);
384  node = rbnode_next(node);
385  // Skip abandoned device handles.
386  if (io_handle_unique(watch->handle)) {
387  io_poll_remove(poll, watch);
388  continue;
389  }
390 
391  SOCKET fd = (SOCKET)watch->handle->fd;
392  if (watch->event.events & IO_EVENT_READ)
393  FD_SET(fd, &readfds);
394  if (watch->event.events & IO_EVENT_WRITE) {
395  nwritefds++;
396  FD_SET(fd, &writefds);
397  }
398  FD_SET(fd, &errorfds);
399  }
400  io_poll_unlock(poll);
401 
402  struct timeval tv = { .tv_sec = timeout / 1000,
403  .tv_usec = (timeout % 1000) * 1000 };
404  int result = select(0, &readfds, nwritefds ? &writefds : NULL,
405  &errorfds, timeout >= 0 ? &tv : NULL);
406  if (result == -1)
407  return -1;
408 
409  // Check the read end of the self-pipe.
410  if (FD_ISSET((SOCKET)poll->pipe[0]->fd, &readfds))
411  sig = 1;
412 
413  io_poll_lock(poll);
414  node = rbtree_first(&poll->tree);
415  while (node && nevents < maxevents) {
416  struct io_watch *watch = structof(node, struct io_watch, node);
417  node = rbnode_next(node);
418  // Skip abandoned device handles.
419  if (io_handle_unique(watch->handle)) {
420  io_poll_remove(poll, watch);
421  continue;
422  }
423 
424  events[nevents].events = 0;
425  if (FD_ISSET((SOCKET)watch->handle->fd, &readfds)
426  && (watch->event.events & IO_EVENT_READ))
427  events[nevents].events |= IO_EVENT_READ;
428  if (FD_ISSET((SOCKET)watch->handle->fd, &writefds)
429  && (watch->event.events & IO_EVENT_WRITE))
430  events[nevents].events |= IO_EVENT_WRITE;
431  if (FD_ISSET((SOCKET)watch->handle->fd, &errorfds))
432  events[nevents].events |= IO_EVENT_ERROR;
433  // Ignore non-events.
434  if (!events[nevents].events)
435  continue;
436 
437  events[nevents].u = watch->event.u;
438  nevents++;
439 
440  if (!watch->keep)
441  io_poll_remove(poll, watch);
442  }
443  io_poll_unlock(poll);
444 #elif _POSIX_C_SOURCE >= 200112L
445 #if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
446  struct epoll_event ev[maxevents];
447  int nfds;
448  int errsv = errno;
449  do {
450  errno = errsv;
451  nfds = epoll_wait(poll->epfd, ev, maxevents,
452  timeout >= 0 ? timeout : -1);
453  } while (nfds == -1 && errno == EINTR);
454  if (nfds == -1)
455  return -1;
456 
457  io_poll_lock(poll);
458  for (int i = 0; i < nfds; i++) {
459  // Ignore signal events; they are handled below.
460  if (ev[i].data.ptr == poll->pipe[0]) {
461  sig = 1;
462  continue;
463  }
464 
465  struct rbnode *node = rbtree_find(&poll->tree, ev[i].data.ptr);
466  if (!node)
467  continue;
468  struct io_watch *watch = structof(node, struct io_watch, node);
469 
470  if (!io_handle_unique(watch->handle)) {
471  events[nevents].events = 0;
472  // We consider hang up and high-priority (OOB) data an
473  // error.
474  // clang-format off
475  if (ev[i].events & (EPOLLRDHUP | EPOLLPRI | EPOLLERR
476  | EPOLLHUP))
477  // clang-format on
478  events[nevents].events |= IO_EVENT_ERROR;
479  if (ev[i].events & EPOLLIN)
480  events[nevents].events |= IO_EVENT_READ;
481  if (ev[i].events & EPOLLOUT)
482  events[nevents].events |= IO_EVENT_WRITE;
483  events[nevents].u = watch->event.u;
484  nevents++;
485  }
486 
487  if (io_handle_unique(watch->handle) || !watch->keep) {
488  epoll_ctl(poll->epfd, EPOLL_CTL_DEL, watch->handle->fd,
489  NULL);
490  io_poll_remove(poll, watch);
491  }
492  }
493  io_poll_unlock(poll);
494 #else
495  io_poll_lock(poll);
496  struct pollfd fds[rbtree_size(&poll->tree) + 1];
497  nfds_t nfds = 0;
498  // Watch the read end of the self-pipe.
499  fds[nfds].fd = poll->pipe[0]->fd;
500  fds[nfds].events = POLLIN;
501  nfds++;
502  struct rbnode *node = rbtree_first(&poll->tree);
503  while (node) {
504  struct io_watch *watch = structof(node, struct io_watch, node);
505  node = rbnode_next(node);
506  // Skip abandoned device handles.
507  if (io_handle_unique(watch->handle)) {
508  io_poll_remove(poll, watch);
509  continue;
510  }
511 
512  fds[nfds].fd = watch->handle->fd;
513  fds[nfds].events = 0;
514  if (watch->event.events & IO_EVENT_READ)
515  fds[nfds].events |= POLLIN | POLLPRI;
516  if (watch->event.events & IO_EVENT_WRITE)
517  fds[nfds].events |= POLLOUT;
518  nfds++;
519  }
520  io_poll_unlock(poll);
521 
522  int n;
523  int errsv = errno;
524  do {
525  errno = errsv;
526  n = _poll(fds, nfds, timeout >= 0 ? timeout : -1);
527  } while (n == -1 && errno == EINTR);
528  if (n == -1)
529  return -1;
530  maxevents = MIN(n, maxevents);
531 
532  io_poll_lock(poll);
533  for (nfds_t nfd = 0; nfd < nfds && nevents < maxevents; nfd++) {
534  // Ignore signal events; they are handled below.
535  if (fds[nfd].fd == poll->pipe[0]->fd) {
536  sig = 1;
537  continue;
538  }
539 
540  events[nevents].events = 0;
541  // We consider hang up and high-priority (OOB) data an error.
542  if (fds[nfd].revents & (POLLPRI | POLLERR | POLLHUP | POLLNVAL))
543  events[nevents].events |= IO_EVENT_ERROR;
544  // We don't distinguish between normal and high-priority data.
545  if (fds[nfd].revents & POLLIN)
546  events[nevents].events |= IO_EVENT_READ;
547  if (fds[nfd].revents & POLLOUT)
548  events[nevents].events |= IO_EVENT_WRITE;
549  // Ignore non-events.
550  if (!events[nevents].events)
551  continue;
552 
553  struct rbnode *node = rbtree_find(&poll->tree, &fds[nfd].fd);
554  if (!node)
555  continue;
556  struct io_watch *watch = structof(node, struct io_watch, node);
557 
558  if (!io_handle_unique(watch->handle)) {
559  events[nevents].u = watch->event.u;
560  nevents++;
561  }
562 
563  if (io_handle_unique(watch->handle) || !watch->keep)
564  io_poll_remove(poll, watch);
565  }
566  io_poll_unlock(poll);
567 #endif // __linux__ && HAVE_SYS_EPOLL_H
568 #else
569  (void)timeout;
570 #endif // _WIN32
571 
572 #if defined(_WIN32) || _POSIX_C_SOURCE >= 200112L
573  if (sig) {
574  // If one or more signals were received, generate the
575  // corresponding events.
576  while (nevents < maxevents
577  && io_read(poll->pipe[0], &sig, 1) == 1) {
578  events[nevents].events = IO_EVENT_SIGNAL;
579  events[nevents].u.sig = sig;
580  nevents++;
581  }
582  }
583 #endif
584 
585  return nevents;
586 }
587 
588 #if defined(_WIN32) || _POSIX_C_SOURCE >= 200112L
589 int
590 io_poll_signal(io_poll_t *poll, unsigned char sig)
591 {
592  assert(poll);
593 
594  return io_write(poll->pipe[1], &sig, 1) == 1 ? 0 : -1;
595 }
596 #endif
597 
598 #ifndef LELY_NO_THREADS
599 
600 static void
601 io_poll_lock(io_poll_t *poll)
602 {
603  assert(poll);
604 
605  mtx_lock(&poll->mtx);
606 }
607 
608 static void
609 io_poll_unlock(io_poll_t *poll)
610 {
611  assert(poll);
612 
613  mtx_unlock(&poll->mtx);
614 }
615 
616 #endif // !LELY_NO_THREADS
617 
618 static struct io_watch *
619 io_poll_insert(io_poll_t *poll, struct io_handle *handle)
620 {
621  assert(poll);
622  assert(handle);
623 
624  struct io_watch *watch = malloc(sizeof(*watch));
625  if (!watch)
626  return NULL;
627 
628  watch->handle = io_handle_acquire(handle);
629 #if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
630  watch->node.key = watch->handle;
631 #else
632  watch->node.key = &watch->handle->fd;
633 #endif
634  rbtree_insert(&poll->tree, &watch->node);
635 
636  return watch;
637 }
638 
639 static void
640 io_poll_remove(io_poll_t *poll, struct io_watch *watch)
641 {
642  assert(poll);
643  assert(watch);
644 
645  struct io_handle *handle = watch->handle;
646  rbtree_remove(&poll->tree, &watch->node);
647  free(watch);
648  io_handle_release(handle);
649 }
650 
651 #if _POSIX_C_SOURCE >= 200112L \
652  && !(defined(__linux__) && defined(HAVE_SYS_EPOLL_H))
653 static int
654 _poll(struct pollfd *fds, nfds_t nfds, int timeout)
655 {
656  return poll(fds, nfds, timeout);
657 }
658 #endif
This header file is part of the utilities library; it contains the comparison function definitions.
This header file is part of the utilities library; it contains the native and platform-independent er...
int errnum2c(errnum_t errnum)
Transforms a platform-independent error number to a native error code.
Definition: errnum.c:825
@ ERRNUM_BADF
Bad file descriptor.
Definition: errnum.h:90
@ ERRNUM_INVAL
Invalid argument.
Definition: errnum.h:129
int get_errc(void)
Returns the last (thread-specific) native error code set by a system call or library function.
Definition: errnum.c:947
void set_errc(int errc)
Sets the current (thread-specific) native error code to errc.
Definition: errnum.c:957
int errno2c(int errnum)
Transforms a standard C error number to a native error code.
Definition: errnum.c:43
void set_errnum(errnum_t errnum)
Sets the current (thread-specific) platform-independent error number to errnum.
Definition: errnum.h:375
This is the internal header file of the I/O handle declarations.
int io_close(io_handle_t handle)
Closes an I/O device.
Definition: io.c:74
@ IO_TYPE_PIPE
A pipe.
Definition: io.h:51
@ IO_TYPE_FILE
A regular file.
Definition: io.h:49
@ IO_TYPE_SERIAL
A serial I/O device.
Definition: io.h:53
@ IO_TYPE_CAN
A CAN device.
Definition: io.h:47
@ IO_TYPE_SOCK
A network socket.
Definition: io.h:55
ssize_t io_read(io_handle_t handle, void *buf, size_t nbytes)
Performs a read operation.
Definition: io.c:150
io_handle_t io_handle_acquire(io_handle_t handle)
Increments the reference count of an I/O device handle.
Definition: handle.c:32
int io_handle_unique(io_handle_t handle)
Returns 1 if there is only a single reference to the specified I/O device handle, and 0 otherwise.
Definition: handle.c:67
int io_set_flags(io_handle_t handle, int flags)
Sets the flags of an I/O device.
Definition: io.c:124
@ IO_FLAG_NONBLOCK
Perform I/O operations in non-blocking mode.
Definition: io.h:62
ssize_t io_write(io_handle_t handle, const void *buf, size_t nbytes)
Performs a write operation.
Definition: io.c:167
void io_handle_release(io_handle_t handle)
Decrements the reference count of an I/O device handle.
Definition: handle.c:47
#define structof(ptr, type, member)
Obtains the address of a structure from the address of one of its members.
Definition: util.h:93
#define MIN(a, b)
Returns the minimum of a and b.
Definition: util.h:57
This header file is part of the I/O library; it contains the I/O polling declarations for Windows.
int io_poll_watch(io_poll_t *poll, io_handle_t handle, struct io_event *event, int keep)
Registers an I/O device with an I/O polling interface and instructs it to watch for certain events.
Definition: poll.c:249
io_poll_t * io_poll_create(void)
Creates a new I/O polling interface.
Definition: poll.c:215
void io_poll_destroy(io_poll_t *poll)
Destroys an I/O polling interface.
Definition: poll.c:240
int io_poll_signal(io_poll_t *poll, unsigned char sig)
Generates a signal event.
Definition: poll.c:590
int io_poll_wait(io_poll_t *poll, int maxevents, struct io_event *events, int timeout)
Waits at most timeout milliseconds for at most maxevents I/O events to occur for any of the I/O devic...
Definition: poll.c:349
This header file is part of the I/O library; it contains I/O polling interface declarations.
@ IO_EVENT_READ
An event signaling that a file descriptor is ready for reading normal-priority (non-OOB) data.
Definition: poll.h:41
@ IO_EVENT_WRITE
An event signaling that a file descriptor is ready for writing normal-priority (non-OOB) data.
Definition: poll.h:46
@ IO_EVENT_SIGNAL
An event representing the occurrence of a signal.
Definition: poll.h:29
@ IO_EVENT_ERROR
An event signaling that an error has occurred for a file descriptor.
Definition: poll.h:36
This header file is part of the I/O library; it contains the pipe declarations.
int io_open_pipe(io_handle_t handle_vector[2])
Opens a pipe.
Definition: pipe.c:50
This header file is part of the utilities library; it contains the red-black tree declarations.
void rbtree_insert(struct rbtree *tree, struct rbnode *node)
Inserts a node into a red-black tree.
Definition: rbtree.c:108
void rbtree_init(struct rbtree *tree, rbtree_cmp_t *cmp)
Initializes a red-black tree.
Definition: rbtree.h:238
size_t rbtree_size(const struct rbtree *tree)
Returns the size (in number of nodes) of a red-black tree.
Definition: rbtree.h:252
void rbtree_remove(struct rbtree *tree, struct rbnode *node)
Removes a node from a red-black tree.
Definition: rbtree.c:187
struct rbnode * rbtree_first(const struct rbtree *tree)
Returns a pointer to the first (leftmost) node in a red-black tree.
Definition: rbtree.c:322
#define rbtree_foreach(tree, node)
Iterates over each node in a red-black tree in ascending order.
Definition: rbtree.h:226
struct rbnode * rbtree_find(const struct rbtree *tree, const void *key)
Finds a node in a red-black tree.
Definition: rbtree.c:306
struct rbnode * rbnode_next(const struct rbnode *node)
Returns a pointer to the next (in-order) node in a red-black tree with respect to node.
Definition: rbtree.c:91
This header file is part of the I/O library; it contains the network socket declarations.
int io_open_socketpair(int domain, int type, io_handle_t handle_vector[2])
Opens a pair of connected sockets.
Definition: sock.c:181
@ IO_SOCK_STREAM
A stream-oriented connection-mode socket type.
Definition: sock.h:43
@ IO_SOCK_IPV4
An IPv4 socket.
Definition: sock.h:31
This is the internal header file of the Windows-specific I/O declarations.
This header file is part of the C11 and POSIX compatibility library; it includes <stdlib....
An I/O polling interface.
Definition: poll.c:48
io_handle_t pipe[2]
A self-pipe used to generate signal events.
Definition: poll.c:57
struct rbtree tree
The tree containing the I/O device handles being watched.
Definition: poll.c:54
mtx_t mtx
The mutex protecting tree.
Definition: poll.c:51
int epfd
The epoll file descriptor.
Definition: poll.c:61
An I/O event.
Definition: poll.h:50
unsigned char sig
The signal number (if events == IO_EVENT_SIGNAL).
Definition: poll.h:60
int events
The events that should be watched or have been triggered (either IO_EVENT_SIGNAL, or any combination ...
Definition: poll.h:56
union io_event::@13 u
Signal attributes depending on the value of events.
int type
The type of the device (one of IO_TYPE_CAN, IO_TYPE_FILE, IO_TYPE_PIPE, IO_TYPE_SERIAL or IO_TYPE_SOC...
Definition: handle.h:79
An I/O device handle.
Definition: handle.h:41
int fd
The native file descriptor.
Definition: handle.h:56
const struct io_handle_vtab * vtab
A pointer to the virtual table.
Definition: handle.h:43
The attributes of an I/O device handle being watched.
Definition: poll.c:66
struct rbnode node
The node in the tree of file descriptors.
Definition: poll.c:68
struct io_handle * handle
A pointer to the I/O device handle.
Definition: poll.c:70
struct io_event event
The events being watched.
Definition: poll.c:72
int keep
A flag indicating whether to keep watching the file descriptor after an event occurs.
Definition: poll.c:77
A node in a red-black tree.
Definition: rbtree.h:52
const void * key
A pointer to the key for this node.
Definition: rbtree.h:58
A red-black tree.
Definition: rbtree.h:90
int mtx_init(mtx_t *mtx, int type)
Creates a mutex object with properties indicated by type, which must have one of the four values:
int mtx_lock(mtx_t *mtx)
Blocks until it locks the mutex at mtx.
int mtx_unlock(mtx_t *mtx)
Unlocks the mutex at mtx.
pthread_mutex_t mtx_t
A complete object type that holds an identifier for a mutex.
Definition: threads.h:102
void mtx_destroy(mtx_t *mtx)
Releases any resources used by the mutex at mtx.
@ mtx_plain
A mutex type that supports neither timeout nor test and return.
Definition: threads.h:109