Lely core libraries  2.3.4
poll.c
Go to the documentation of this file.
1 
24 #include "io.h"
25 
26 #if !LELY_NO_STDIO
27 
28 #include <lely/io/poll.h>
29 #include <lely/util/cmp.h>
30 #include <lely/util/errnum.h>
31 #include <lely/util/rbtree.h>
32 #if _WIN32
33 #include <lely/io/sock.h>
34 #else
35 #include <lely/io/pipe.h>
36 #endif
37 #include "handle.h"
38 
39 #include <assert.h>
40 #include <stdlib.h>
41 
42 #if _POSIX_C_SOURCE >= 200112L
43 #if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
44 #include <sys/epoll.h>
45 #else
46 #include <poll.h>
47 #endif
48 #endif
49 
51 struct __io_poll {
52 #if !LELY_NO_THREADS
53  mtx_t mtx;
55 #endif
56  struct rbtree tree;
58 #if _WIN32 || _POSIX_C_SOURCE >= 200112L
59  io_handle_t pipe[2];
61 #endif
62 #if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
63  int epfd;
65 #endif
66 };
67 
69 struct io_watch {
71  struct rbnode node;
73  struct io_handle *handle;
75  struct io_event event;
80  int keep;
81 };
82 
83 #if LELY_NO_THREADS
84 #define io_poll_lock(poll)
85 #define io_poll_unlock(poll)
86 #else
87 static void io_poll_lock(io_poll_t *poll);
88 static void io_poll_unlock(io_poll_t *poll);
89 #endif
90 
91 static struct io_watch *io_poll_insert(
92  io_poll_t *poll, struct io_handle *handle);
93 static void io_poll_remove(io_poll_t *poll, struct io_watch *watch);
94 
95 #if _POSIX_C_SOURCE >= 200112L \
96  && !(defined(__linux__) && defined(HAVE_SYS_EPOLL_H))
97 static int _poll(struct pollfd *fds, nfds_t nfds, int timeout);
98 #endif
99 
100 void *
101 __io_poll_alloc(void)
102 {
103  void *ptr = malloc(sizeof(struct __io_poll));
104  if (!ptr)
105  set_errc(errno2c(errno));
106  return ptr;
107 }
108 
109 void
110 __io_poll_free(void *ptr)
111 {
112  free(ptr);
113 }
114 
115 struct __io_poll *
116 __io_poll_init(struct __io_poll *poll)
117 {
118  assert(poll);
119 
120  int errc = 0;
121 
122 #if !LELY_NO_THREADS
123  mtx_init(&poll->mtx, mtx_plain);
124 #endif
125 
126 #if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
127  // Track attributes with the I/O device handle.
128  rbtree_init(&poll->tree, ptr_cmp);
129 #else
130  // Track attributes with native file descriptor.
131 #if _WIN32
132  rbtree_init(&poll->tree, ptr_cmp);
133 #else
134  rbtree_init(&poll->tree, int_cmp);
135 #endif
136 #endif
137 
138 #if _WIN32 || _POSIX_C_SOURCE >= 200112L
139  // Create a self-pipe for signal events.
140 #if _WIN32
142  == -1) {
143 #else
144  if (io_open_pipe(poll->pipe) == -1) {
145 #endif
146  errc = get_errc();
147  goto error_open_pipe;
148  }
149 
150  // Make the both ends of the self-pipe non-blocking.
151  if (io_set_flags(poll->pipe[0], IO_FLAG_NONBLOCK) == -1) {
152  errc = get_errc();
153  goto error_set_flags;
154  }
155  if (io_set_flags(poll->pipe[1], IO_FLAG_NONBLOCK) == -1) {
156  errc = get_errc();
157  goto error_set_flags;
158  }
159 #endif
160 
161 #if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
162  poll->epfd = epoll_create1(EPOLL_CLOEXEC);
163  if (poll->epfd == -1) {
164  errc = get_errc();
165  goto error_epoll_create1;
166  }
167 
168  // Register the read end of the self-pipe with epoll.
169  struct epoll_event ev = { .events = EPOLLIN,
170  .data.ptr = poll->pipe[0] };
171  if (epoll_ctl(poll->epfd, EPOLL_CTL_ADD, poll->pipe[0]->fd, &ev)
172  == -1) {
173  errc = get_errc();
174  goto error_epoll_ctl;
175  }
176 #endif
177 
178  return poll;
179 
180 #if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
181 error_epoll_ctl:
182  close(poll->epfd);
183 error_epoll_create1:
184 #endif
185 #if _WIN32 || _POSIX_C_SOURCE >= 200112L
186 error_set_flags:
187  io_close(poll->pipe[1]);
188  io_close(poll->pipe[0]);
189 error_open_pipe:
190 #endif
191  set_errc(errc);
192  return NULL;
193 }
194 
195 void
196 __io_poll_fini(struct __io_poll *poll)
197 {
198  assert(poll);
199 
200  rbtree_foreach (&poll->tree, node)
201  io_poll_remove(poll, structof(node, struct io_watch, node));
202 
203 #if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
204  close(poll->epfd);
205 #endif
206 
207 #if _WIN32 || _POSIX_C_SOURCE >= 200112L
208  io_close(poll->pipe[1]);
209  io_close(poll->pipe[0]);
210 #endif
211 
212 #if !LELY_NO_THREADS
213  mtx_destroy(&poll->mtx);
214 #endif
215 }
216 
217 io_poll_t *
219 {
220  int errc = 0;
221 
222  io_poll_t *poll = __io_poll_alloc();
223  if (!poll) {
224  errc = get_errc();
225  goto error_alloc_poll;
226  }
227 
228  if (!__io_poll_init(poll)) {
229  errc = get_errc();
230  goto error_init_poll;
231  }
232 
233  return poll;
234 
235 error_init_poll:
236  __io_poll_free(poll);
237 error_alloc_poll:
238  set_errc(errc);
239  return NULL;
240 }
241 
242 void
244 {
245  if (poll) {
246  __io_poll_fini(poll);
247  __io_poll_free(poll);
248  }
249 }
250 
251 int
252 io_poll_watch(io_poll_t *poll, io_handle_t handle, struct io_event *event,
253  int keep)
254 {
255  assert(poll);
256 
257  if (!handle) {
259  return -1;
260  }
261 
262  assert(handle->vtab);
263  switch (handle->vtab->type) {
264 #if defined(__linux__) && defined(HAVE_LINUX_CAN_H)
265  case IO_TYPE_CAN:
266 #endif
267 #if _POSIX_C_SOURCE >= 200112L
268  case IO_TYPE_FILE:
269  case IO_TYPE_PIPE:
270  case IO_TYPE_SERIAL:
271 #endif
272 #if _WIN32 || _POSIX_C_SOURCE >= 200112L
273  case IO_TYPE_SOCK:
274 #endif
275  break;
276  default: set_errnum(ERRNUM_INVAL); return -1;
277  }
278 
279  int errc = 0;
280 
281  io_poll_lock(poll);
282 
283  // Check if the I/O device has already been registered.
284 #if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
285  struct rbnode *node = rbtree_find(&poll->tree, handle);
286 #else
287  struct rbnode *node = rbtree_find(&poll->tree, &handle->fd);
288 #endif
289  struct io_watch *watch =
290  node ? structof(node, struct io_watch, node) : NULL;
291  // If event is not NULL, register the device or update the events being
292  // watched. If event is NULL, remove the device.
293  if (event) {
294  if (!watch) {
295  watch = io_poll_insert(poll, handle);
296  if (!watch) {
297  errc = get_errc();
298  goto error_watch;
299  }
300  }
301 
302  // Update the events being watched.
303  watch->event = *event;
304  watch->keep = keep;
305 
306 #if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
307  // Modify or add the event to the epoll instance depending on
308  // whether the file descriptor is already registered.
309  int op = node ? EPOLL_CTL_MOD : EPOLL_CTL_ADD;
310 
311  struct epoll_event ev = { 0, { NULL } };
312  if (event->events & IO_EVENT_READ)
313  ev.events |= EPOLLIN | EPOLLRDHUP | EPOLLPRI;
314  if (event->events & IO_EVENT_WRITE)
315  ev.events |= EPOLLOUT;
316  ev.data.ptr = watch->handle;
317 
318  if (epoll_ctl(poll->epfd, op, watch->handle->fd, &ev) == -1) {
319  errc = get_errc();
320  goto error_epoll_ctl;
321  }
322 #endif
323  } else {
324  if (!watch) {
325  errc = errnum2c(ERRNUM_INVAL);
326  goto error_watch;
327  }
328 
329 #if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
330  // Delete the event from the epoll instance.
331  epoll_ctl(poll->epfd, EPOLL_CTL_DEL, watch->handle->fd, NULL);
332 #endif
333  io_poll_remove(poll, watch);
334  }
335 
336  io_poll_unlock(poll);
337 
338  return 0;
339 
340 #if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
341 error_epoll_ctl:
342  epoll_ctl(poll->epfd, EPOLL_CTL_DEL, watch->handle->fd, NULL);
343 #endif
344  io_poll_remove(poll, watch);
345 error_watch:
346  io_poll_unlock(poll);
347  set_errc(errc);
348  return -1;
349 }
350 
351 int
352 io_poll_wait(io_poll_t *poll, int maxevents, struct io_event *events,
353  int timeout)
354 {
355  assert(poll);
356 
357  if (maxevents < 0) {
359  return -1;
360  }
361 
362  if (!maxevents || !events)
363  return 0;
364 
365  int nevents = 0;
366 #if _WIN32 || _POSIX_C_SOURCE >= 200112L
367  unsigned char sig = 0;
368 #endif
369 
370 #if _WIN32
371  fd_set readfds;
372  FD_ZERO(&readfds);
373 
374  int nwritefds = 0;
375  fd_set writefds;
376  FD_ZERO(&writefds);
377 
378  fd_set errorfds;
379  FD_ZERO(&errorfds);
380 
381  FD_SET((SOCKET)poll->pipe[0]->fd, &readfds);
382 
383  io_poll_lock(poll);
384  struct rbnode *node = rbtree_first(&poll->tree);
385  while (node) {
386  struct io_watch *watch = structof(node, struct io_watch, node);
387  node = rbnode_next(node);
388  // Skip abandoned device handles.
389  if (io_handle_unique(watch->handle)) {
390  io_poll_remove(poll, watch);
391  continue;
392  }
393 
394  SOCKET fd = (SOCKET)watch->handle->fd;
395  if (watch->event.events & IO_EVENT_READ)
396  FD_SET(fd, &readfds);
397  if (watch->event.events & IO_EVENT_WRITE) {
398  nwritefds++;
399  FD_SET(fd, &writefds);
400  }
401  FD_SET(fd, &errorfds);
402  }
403  io_poll_unlock(poll);
404 
405  struct timeval tv = { .tv_sec = timeout / 1000,
406  .tv_usec = (timeout % 1000) * 1000 };
407  int result = select(0, &readfds, nwritefds ? &writefds : NULL,
408  &errorfds, timeout >= 0 ? &tv : NULL);
409  if (result == -1)
410  return -1;
411 
412  // Check the read end of the self-pipe.
413  if (FD_ISSET((SOCKET)poll->pipe[0]->fd, &readfds))
414  sig = 1;
415 
416  io_poll_lock(poll);
417  node = rbtree_first(&poll->tree);
418  while (node && nevents < maxevents) {
419  struct io_watch *watch = structof(node, struct io_watch, node);
420  node = rbnode_next(node);
421  // Skip abandoned device handles.
422  if (io_handle_unique(watch->handle)) {
423  io_poll_remove(poll, watch);
424  continue;
425  }
426 
427  events[nevents].events = 0;
428  if (FD_ISSET((SOCKET)watch->handle->fd, &readfds)
429  && (watch->event.events & IO_EVENT_READ))
430  events[nevents].events |= IO_EVENT_READ;
431  if (FD_ISSET((SOCKET)watch->handle->fd, &writefds)
432  && (watch->event.events & IO_EVENT_WRITE))
433  events[nevents].events |= IO_EVENT_WRITE;
434  if (FD_ISSET((SOCKET)watch->handle->fd, &errorfds))
435  events[nevents].events |= IO_EVENT_ERROR;
436  // Ignore non-events.
437  if (!events[nevents].events)
438  continue;
439 
440  events[nevents].u = watch->event.u;
441  nevents++;
442 
443  if (!watch->keep)
444  io_poll_remove(poll, watch);
445  }
446  io_poll_unlock(poll);
447 #elif _POSIX_C_SOURCE >= 200112L
448 #if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
449  struct epoll_event ev[maxevents];
450  int nfds;
451  int errsv = errno;
452  do {
453  errno = errsv;
454  nfds = epoll_wait(poll->epfd, ev, maxevents,
455  timeout >= 0 ? timeout : -1);
456  } while (nfds == -1 && errno == EINTR);
457  if (nfds == -1)
458  return -1;
459 
460  io_poll_lock(poll);
461  for (int i = 0; i < nfds; i++) {
462  // Ignore signal events; they are handled below.
463  if (ev[i].data.ptr == poll->pipe[0]) {
464  sig = 1;
465  continue;
466  }
467 
468  struct rbnode *node = rbtree_find(&poll->tree, ev[i].data.ptr);
469  if (!node)
470  continue;
471  struct io_watch *watch = structof(node, struct io_watch, node);
472 
473  if (!io_handle_unique(watch->handle)) {
474  events[nevents].events = 0;
475  // We consider hang up and high-priority (OOB) data an
476  // error.
477  // clang-format off
478  if (ev[i].events & (EPOLLRDHUP | EPOLLPRI | EPOLLERR
479  | EPOLLHUP))
480  // clang-format on
481  events[nevents].events |= IO_EVENT_ERROR;
482  if (ev[i].events & EPOLLIN)
483  events[nevents].events |= IO_EVENT_READ;
484  if (ev[i].events & EPOLLOUT)
485  events[nevents].events |= IO_EVENT_WRITE;
486  events[nevents].u = watch->event.u;
487  nevents++;
488  }
489 
490  if (io_handle_unique(watch->handle) || !watch->keep) {
491  epoll_ctl(poll->epfd, EPOLL_CTL_DEL, watch->handle->fd,
492  NULL);
493  io_poll_remove(poll, watch);
494  }
495  }
496  io_poll_unlock(poll);
497 #else
498  io_poll_lock(poll);
499  struct pollfd fds[rbtree_size(&poll->tree) + 1];
500  nfds_t nfds = 0;
501  // Watch the read end of the self-pipe.
502  fds[nfds].fd = poll->pipe[0]->fd;
503  fds[nfds].events = POLLIN;
504  nfds++;
505  struct rbnode *node = rbtree_first(&poll->tree);
506  while (node) {
507  struct io_watch *watch = structof(node, struct io_watch, node);
508  node = rbnode_next(node);
509  // Skip abandoned device handles.
510  if (io_handle_unique(watch->handle)) {
511  io_poll_remove(poll, watch);
512  continue;
513  }
514 
515  fds[nfds].fd = watch->handle->fd;
516  fds[nfds].events = 0;
517  if (watch->event.events & IO_EVENT_READ)
518  fds[nfds].events |= POLLIN | POLLPRI;
519  if (watch->event.events & IO_EVENT_WRITE)
520  fds[nfds].events |= POLLOUT;
521  nfds++;
522  }
523  io_poll_unlock(poll);
524 
525  int n;
526  int errsv = errno;
527  do {
528  errno = errsv;
529  n = _poll(fds, nfds, timeout >= 0 ? timeout : -1);
530  } while (n == -1 && errno == EINTR);
531  if (n == -1)
532  return -1;
533  maxevents = MIN(n, maxevents);
534 
535  io_poll_lock(poll);
536  for (nfds_t nfd = 0; nfd < nfds && nevents < maxevents; nfd++) {
537  // Ignore signal events; they are handled below.
538  if (fds[nfd].fd == poll->pipe[0]->fd) {
539  sig = 1;
540  continue;
541  }
542 
543  events[nevents].events = 0;
544  // We consider hang up and high-priority (OOB) data an error.
545  if (fds[nfd].revents & (POLLPRI | POLLERR | POLLHUP | POLLNVAL))
546  events[nevents].events |= IO_EVENT_ERROR;
547  // We don't distinguish between normal and high-priority data.
548  if (fds[nfd].revents & POLLIN)
549  events[nevents].events |= IO_EVENT_READ;
550  if (fds[nfd].revents & POLLOUT)
551  events[nevents].events |= IO_EVENT_WRITE;
552  // Ignore non-events.
553  if (!events[nevents].events)
554  continue;
555 
556  struct rbnode *node = rbtree_find(&poll->tree, &fds[nfd].fd);
557  if (!node)
558  continue;
559  struct io_watch *watch = structof(node, struct io_watch, node);
560 
561  if (!io_handle_unique(watch->handle)) {
562  events[nevents].u = watch->event.u;
563  nevents++;
564  }
565 
566  if (io_handle_unique(watch->handle) || !watch->keep)
567  io_poll_remove(poll, watch);
568  }
569  io_poll_unlock(poll);
570 #endif // __linux__ && HAVE_SYS_EPOLL_H
571 #else
572  (void)timeout;
573 #endif // _WIN32
574 
575 #if _WIN32 || _POSIX_C_SOURCE >= 200112L
576  if (sig) {
577  // If one or more signals were received, generate the
578  // corresponding events.
579  while (nevents < maxevents
580  && io_read(poll->pipe[0], &sig, 1) == 1) {
581  events[nevents].events = IO_EVENT_SIGNAL;
582  events[nevents].u.sig = sig;
583  nevents++;
584  }
585  }
586 #endif
587 
588  return nevents;
589 }
590 
591 #if _WIN32 || _POSIX_C_SOURCE >= 200112L
592 int
593 io_poll_signal(io_poll_t *poll, unsigned char sig)
594 {
595  assert(poll);
596 
597  return io_write(poll->pipe[1], &sig, 1) == 1 ? 0 : -1;
598 }
599 #endif
600 
601 #if !LELY_NO_THREADS
602 
603 static void
604 io_poll_lock(io_poll_t *poll)
605 {
606  assert(poll);
607 
608  mtx_lock(&poll->mtx);
609 }
610 
611 static void
612 io_poll_unlock(io_poll_t *poll)
613 {
614  assert(poll);
615 
616  mtx_unlock(&poll->mtx);
617 }
618 
619 #endif // !LELY_NO_THREADS
620 
621 static struct io_watch *
622 io_poll_insert(io_poll_t *poll, struct io_handle *handle)
623 {
624  assert(poll);
625  assert(handle);
626 
627  struct io_watch *watch = malloc(sizeof(*watch));
628  if (!watch)
629  return NULL;
630 
631  watch->handle = io_handle_acquire(handle);
632 #if defined(__linux__) && defined(HAVE_SYS_EPOLL_H)
633  watch->node.key = watch->handle;
634 #else
635  watch->node.key = &watch->handle->fd;
636 #endif
637  rbtree_insert(&poll->tree, &watch->node);
638 
639  return watch;
640 }
641 
642 static void
643 io_poll_remove(io_poll_t *poll, struct io_watch *watch)
644 {
645  assert(poll);
646  assert(watch);
647 
648  struct io_handle *handle = watch->handle;
649  rbtree_remove(&poll->tree, &watch->node);
650  free(watch);
651  io_handle_release(handle);
652 }
653 
654 #if _POSIX_C_SOURCE >= 200112L \
655  && !(defined(__linux__) && defined(HAVE_SYS_EPOLL_H))
656 static int
657 _poll(struct pollfd *fds, nfds_t nfds, int timeout)
658 {
659  return poll(fds, nfds, timeout);
660 }
661 #endif
662 
663 #endif // !LELY_NO_STDIO
IO_EVENT_ERROR
@ IO_EVENT_ERROR
An event signaling that an error has occurred for a file descriptor.
Definition: poll.h:36
__io_poll::mtx
mtx_t mtx
The mutex protecting tree.
Definition: poll.c:54
IO_FLAG_NONBLOCK
@ IO_FLAG_NONBLOCK
Perform I/O operations in non-blocking mode.
Definition: io.h:62
rbtree_insert
void rbtree_insert(struct rbtree *tree, struct rbnode *node)
Inserts a node into a red-black tree.
Definition: rbtree.c:108
io_watch::handle
struct io_handle * handle
A pointer to the I/O device handle.
Definition: poll.c:73
io_close
int io_close(io_handle_t handle)
Closes an I/O device.
Definition: io.c:78
rbtree_foreach
#define rbtree_foreach(tree, node)
Iterates over each node in a red-black tree in ascending order.
Definition: rbtree.h:234
rbnode_next
struct rbnode * rbnode_next(const struct rbnode *node)
Returns a pointer to the next (in-order) node in a red-black tree with respect to node.
Definition: rbtree.c:91
mtx_destroy
void mtx_destroy(mtx_t *mtx)
Releases any resources used by the mutex at mtx.
Definition: threads-pthread.c:113
rbtree
A red-black tree.
Definition: rbtree.h:91
IO_EVENT_READ
@ IO_EVENT_READ
An event signaling that a file descriptor is ready for reading normal-priority (non-OOB) data.
Definition: poll.h:41
__io_poll::epfd
int epfd
The epoll file descriptor.
Definition: poll.c:64
cmp.h
io_poll_destroy
void io_poll_destroy(io_poll_t *poll)
Destroys an I/O polling interface.
Definition: poll.c:243
rbtree_first
struct rbnode * rbtree_first(const struct rbtree *tree)
Returns a pointer to the first (leftmost) node in a red-black tree.
Definition: rbtree.c:335
io_handle_release
void io_handle_release(io_handle_t handle)
Decrements the reference count of an I/O device handle.
Definition: handle.c:51
IO_EVENT_WRITE
@ IO_EVENT_WRITE
An event signaling that a file descriptor is ready for writing normal-priority (non-OOB) data.
Definition: poll.h:46
io_poll_wait
int io_poll_wait(io_poll_t *poll, int maxevents, struct io_event *events, int timeout)
Waits at most timeout milliseconds for at most maxevents I/O events to occur for any of the I/O devic...
Definition: poll.c:352
io_event::sig
unsigned char sig
The signal number (if events == IO_EVENT_SIGNAL).
Definition: poll.h:60
rbtree_find
struct rbnode * rbtree_find(const struct rbtree *tree, const void *key)
Finds a node in a red-black tree.
Definition: rbtree.c:306
io_event::u
union io_event::@13 u
Signal attributes depending on the value of events.
io_poll_signal
int io_poll_signal(io_poll_t *poll, unsigned char sig)
Generates a signal event.
Definition: poll.c:593
MIN
#define MIN(a, b)
Returns the minimum of a and b.
Definition: util.h:57
mtx_lock
int mtx_lock(mtx_t *mtx)
Blocks until it locks the mutex at mtx.
Definition: threads-pthread.c:150
io_set_flags
int io_set_flags(io_handle_t handle, int flags)
Sets the flags of an I/O device.
Definition: io.c:128
io_handle_vtab::type
int type
The type of the device (one of IO_TYPE_CAN, IO_TYPE_FILE, IO_TYPE_PIPE, IO_TYPE_SERIAL or IO_TYPE_SOC...
Definition: handle.h:71
io_watch
The attributes of an I/O device handle being watched.
Definition: poll.c:69
get_errc
int get_errc(void)
Returns the last (thread-specific) native error code set by a system call or library function.
Definition: errnum.c:932
errno2c
int errno2c(int errnum)
Transforms a standard C error number to a native error code.
Definition: errnum.c:46
io_read
ssize_t io_read(io_handle_t handle, void *buf, size_t nbytes)
Performs a read operation.
Definition: io.c:154
io_open_socketpair
int io_open_socketpair(int domain, int type, io_handle_t handle_vector[2])
Opens a pair of connected sockets.
Definition: sock.c:185
io_handle
An I/O device handle.
Definition: handle.h:33
mtx_t
pthread_mutex_t mtx_t
A complete object type that holds an identifier for a mutex.
Definition: threads.h:102
io_handle_acquire
io_handle_t io_handle_acquire(io_handle_t handle)
Increments the reference count of an I/O device handle.
Definition: handle.c:36
io_event
An I/O event.
Definition: poll.h:50
mtx_unlock
int mtx_unlock(mtx_t *mtx)
Unlocks the mutex at mtx.
Definition: threads-pthread.c:183
ERRNUM_BADF
@ ERRNUM_BADF
Bad file descriptor.
Definition: errnum.h:93
poll.h
pipe.h
IO_TYPE_SOCK
@ IO_TYPE_SOCK
A network socket.
Definition: io.h:55
io_watch::event
struct io_event event
The events being watched.
Definition: poll.c:75
set_errnum
void set_errnum(errnum_t errnum)
Sets the current (thread-specific) platform-independent error number to errnum.
Definition: errnum.h:424
set_errc
void set_errc(int errc)
Sets the current (thread-specific) native error code to errc.
Definition: errnum.c:944
handle.h
__io_poll
An I/O polling interface.
Definition: poll.c:51
io.h
errnum2c
int errnum2c(errnum_t errnum)
Transforms a platform-independent error number to a native error code.
Definition: errnum.c:810
__io_poll::pipe
io_handle_t pipe[2]
A self-pipe used to generate signal events.
Definition: poll.c:60
errnum.h
io_handle::vtab
const struct io_handle_vtab * vtab
A pointer to the virtual table.
Definition: handle.h:35
rbtree.h
ERRNUM_INVAL
@ ERRNUM_INVAL
Invalid argument.
Definition: errnum.h:132
io_event::events
int events
The events that should be watched or have been triggered (either IO_EVENT_SIGNAL, or any combination ...
Definition: poll.h:56
io_open_pipe
int io_open_pipe(io_handle_t handle_vector[2])
Opens a pipe.
Definition: pipe.c:54
rbnode
A node in a red-black tree.
Definition: rbtree.h:53
io_watch::keep
int keep
A flag indicating whether to keep watching the file descriptor after an event occurs.
Definition: poll.c:80
io_watch::node
struct rbnode node
The node in the tree of file descriptors.
Definition: poll.c:71
IO_EVENT_SIGNAL
@ IO_EVENT_SIGNAL
An event representing the occurrence of a signal.
Definition: poll.h:29
rbtree_init
void rbtree_init(struct rbtree *tree, rbtree_cmp_t *cmp)
Initializes a red-black tree.
Definition: rbtree.h:248
io_poll_watch
int io_poll_watch(io_poll_t *poll, io_handle_t handle, struct io_event *event, int keep)
Registers an I/O device with an I/O polling interface and instructs it to watch for certain events.
Definition: poll.c:252
io_handle_unique
int io_handle_unique(io_handle_t handle)
Returns 1 if there is only a single reference to the specified I/O device handle, and 0 otherwise.
Definition: handle.c:71
rbtree_remove
void rbtree_remove(struct rbtree *tree, struct rbnode *node)
Removes a node from a red-black tree.
Definition: rbtree.c:187
structof
#define structof(ptr, type, member)
Obtains the address of a structure from the address of one of its members.
Definition: util.h:93
rbnode::key
const void * key
A pointer to the key for this node.
Definition: rbtree.h:59
IO_TYPE_CAN
@ IO_TYPE_CAN
A CAN device.
Definition: io.h:47
IO_TYPE_FILE
@ IO_TYPE_FILE
A regular file.
Definition: io.h:49
io_handle::fd
int fd
The native file descriptor.
Definition: handle.h:48
__io_poll::tree
struct rbtree tree
The tree containing the I/O device handles being watched.
Definition: poll.c:57
rbtree_size
size_t rbtree_size(const struct rbtree *tree)
Returns the size (in number of nodes) of a red-black tree.
Definition: rbtree.h:265
io_write
ssize_t io_write(io_handle_t handle, const void *buf, size_t nbytes)
Performs a write operation.
Definition: io.c:171
stdlib.h
IO_SOCK_IPV4
@ IO_SOCK_IPV4
An IPv4 socket.
Definition: sock.h:31
IO_TYPE_PIPE
@ IO_TYPE_PIPE
A pipe.
Definition: io.h:51
IO_TYPE_SERIAL
@ IO_TYPE_SERIAL
A serial I/O device.
Definition: io.h:53
sock.h
IO_SOCK_STREAM
@ IO_SOCK_STREAM
A stream-oriented connection-mode socket type.
Definition: sock.h:43
mtx_init
int mtx_init(mtx_t *mtx, int type)
Creates a mutex object with properties indicated by type, which must have one of the four values:
Definition: threads-pthread.c:119
mtx_plain
@ mtx_plain
A mutex type that supports neither timeout nor test and return.
Definition: threads.h:109
io_poll_create
io_poll_t * io_poll_create(void)
Creates a new I/O polling interface.
Definition: poll.c:218