uloop: fix deleting pending fd events on uloop_fd_del
[project/libubox.git] / uloop.c
1 /*
2 * uloop - event loop implementation
3 *
4 * Copyright (C) 2010-2013 Felix Fietkau <nbd@openwrt.org>
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 #include <sys/time.h>
19 #include <sys/types.h>
20
21 #include <unistd.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <errno.h>
25 #include <poll.h>
26 #include <string.h>
27 #include <fcntl.h>
28 #include <stdbool.h>
29
30 #include "uloop.h"
31 #include "utils.h"
32
33 #ifdef USE_KQUEUE
34 #include <sys/event.h>
35 #endif
36 #ifdef USE_EPOLL
37 #include <sys/epoll.h>
38 #endif
39 #include <sys/wait.h>
40
41 struct uloop_fd_event {
42 struct uloop_fd *fd;
43 unsigned int events;
44 };
45
46 struct uloop_fd_stack {
47 struct uloop_fd_stack *next;
48 struct uloop_fd *fd;
49 unsigned int events;
50 };
51
52 static struct uloop_fd_stack *fd_stack = NULL;
53
54 #define ULOOP_MAX_EVENTS 10
55
56 static struct list_head timeouts = LIST_HEAD_INIT(timeouts);
57 static struct list_head processes = LIST_HEAD_INIT(processes);
58
59 static int poll_fd = -1;
60 bool uloop_cancelled = false;
61 bool uloop_handle_sigchld = true;
62 static bool do_sigchld = false;
63
64 static struct uloop_fd_event cur_fds[ULOOP_MAX_EVENTS];
65 static int cur_fd, cur_nfds;
66
67 #ifdef USE_KQUEUE
68
69 int uloop_init(void)
70 {
71 struct timespec timeout = { 0, 0 };
72 struct kevent ev = {};
73
74 if (poll_fd >= 0)
75 return 0;
76
77 poll_fd = kqueue();
78 if (poll_fd < 0)
79 return -1;
80
81 EV_SET(&ev, SIGCHLD, EVFILT_SIGNAL, EV_ADD, 0, 0, 0);
82 kevent(poll_fd, &ev, 1, NULL, 0, &timeout);
83
84 return 0;
85 }
86
87
88 static uint16_t get_flags(unsigned int flags, unsigned int mask)
89 {
90 uint16_t kflags = 0;
91
92 if (!(flags & mask))
93 return EV_DELETE;
94
95 kflags = EV_ADD;
96 if (flags & ULOOP_EDGE_TRIGGER)
97 kflags |= EV_CLEAR;
98
99 return kflags;
100 }
101
102 static struct kevent events[ULOOP_MAX_EVENTS];
103
104 static int register_kevent(struct uloop_fd *fd, unsigned int flags)
105 {
106 struct timespec timeout = { 0, 0 };
107 struct kevent ev[2];
108 int nev = 0;
109 unsigned int fl = 0;
110 unsigned int changed;
111 uint16_t kflags;
112
113 if (flags & ULOOP_EDGE_DEFER)
114 flags &= ~ULOOP_EDGE_TRIGGER;
115
116 changed = flags ^ fd->flags;
117 if (changed & ULOOP_EDGE_TRIGGER)
118 changed |= flags;
119
120 if (changed & ULOOP_READ) {
121 kflags = get_flags(flags, ULOOP_READ);
122 EV_SET(&ev[nev++], fd->fd, EVFILT_READ, kflags, 0, 0, fd);
123 }
124
125 if (changed & ULOOP_WRITE) {
126 kflags = get_flags(flags, ULOOP_WRITE);
127 EV_SET(&ev[nev++], fd->fd, EVFILT_WRITE, kflags, 0, 0, fd);
128 }
129
130 if (!flags)
131 fl |= EV_DELETE;
132
133 fd->flags = flags;
134 if (kevent(poll_fd, ev, nev, NULL, fl, &timeout) == -1)
135 return -1;
136
137 return 0;
138 }
139
140 static int register_poll(struct uloop_fd *fd, unsigned int flags)
141 {
142 if (flags & ULOOP_EDGE_TRIGGER)
143 flags |= ULOOP_EDGE_DEFER;
144 else
145 flags &= ~ULOOP_EDGE_DEFER;
146
147 return register_kevent(fd, flags);
148 }
149
150 static int __uloop_fd_delete(struct uloop_fd *fd)
151 {
152 return register_poll(fd, 0);
153 }
154
155 static int uloop_fetch_events(int timeout)
156 {
157 struct timespec ts;
158 int nfds, n;
159
160 if (timeout >= 0) {
161 ts.tv_sec = timeout / 1000;
162 ts.tv_nsec = (timeout % 1000) * 1000000;
163 }
164
165 nfds = kevent(poll_fd, NULL, 0, events, ARRAY_SIZE(events), timeout >= 0 ? &ts : NULL);
166 for (n = 0; n < nfds; n++) {
167 struct uloop_fd_event *cur = &cur_fds[n];
168 struct uloop_fd *u = events[n].udata;
169 unsigned int ev = 0;
170
171 cur->fd = u;
172 if (!u)
173 continue;
174
175 if (events[n].flags & EV_ERROR) {
176 u->error = true;
177 uloop_fd_delete(u);
178 }
179
180 if(events[n].filter == EVFILT_READ)
181 ev |= ULOOP_READ;
182 else if (events[n].filter == EVFILT_WRITE)
183 ev |= ULOOP_WRITE;
184
185 if (events[n].flags & EV_EOF)
186 u->eof = true;
187 else if (!ev)
188 cur->fd = NULL;
189
190 cur->events = ev;
191 if (u->flags & ULOOP_EDGE_DEFER) {
192 u->flags &= ~ULOOP_EDGE_DEFER;
193 u->flags |= ULOOP_EDGE_TRIGGER;
194 register_kevent(u, u->flags);
195 }
196 }
197 return nfds;
198 }
199
200 #endif
201
202 #ifdef USE_EPOLL
203
204 /**
205 * FIXME: uClibc < 0.9.30.3 does not define EPOLLRDHUP for Linux >= 2.6.17
206 */
207 #ifndef EPOLLRDHUP
208 #define EPOLLRDHUP 0x2000
209 #endif
210
211 int uloop_init(void)
212 {
213 if (poll_fd >= 0)
214 return 0;
215
216 poll_fd = epoll_create(32);
217 if (poll_fd < 0)
218 return -1;
219
220 fcntl(poll_fd, F_SETFD, fcntl(poll_fd, F_GETFD) | FD_CLOEXEC);
221 return 0;
222 }
223
224 static int register_poll(struct uloop_fd *fd, unsigned int flags)
225 {
226 struct epoll_event ev;
227 int op = fd->registered ? EPOLL_CTL_MOD : EPOLL_CTL_ADD;
228
229 memset(&ev, 0, sizeof(struct epoll_event));
230
231 if (flags & ULOOP_READ)
232 ev.events |= EPOLLIN | EPOLLRDHUP;
233
234 if (flags & ULOOP_WRITE)
235 ev.events |= EPOLLOUT;
236
237 if (flags & ULOOP_EDGE_TRIGGER)
238 ev.events |= EPOLLET;
239
240 ev.data.fd = fd->fd;
241 ev.data.ptr = fd;
242 fd->flags = flags;
243
244 return epoll_ctl(poll_fd, op, fd->fd, &ev);
245 }
246
247 static struct epoll_event events[ULOOP_MAX_EVENTS];
248
249 static int __uloop_fd_delete(struct uloop_fd *sock)
250 {
251 sock->flags = 0;
252 return epoll_ctl(poll_fd, EPOLL_CTL_DEL, sock->fd, 0);
253 }
254
255 static int uloop_fetch_events(int timeout)
256 {
257 int n, nfds;
258
259 nfds = epoll_wait(poll_fd, events, ARRAY_SIZE(events), timeout);
260 for (n = 0; n < nfds; ++n) {
261 struct uloop_fd_event *cur = &cur_fds[n];
262 struct uloop_fd *u = events[n].data.ptr;
263 unsigned int ev = 0;
264
265 cur->fd = u;
266 if (!u)
267 continue;
268
269 if (events[n].events & (EPOLLERR|EPOLLHUP)) {
270 u->error = true;
271 uloop_fd_delete(u);
272 }
273
274 if(!(events[n].events & (EPOLLRDHUP|EPOLLIN|EPOLLOUT|EPOLLERR|EPOLLHUP))) {
275 cur->fd = NULL;
276 continue;
277 }
278
279 if(events[n].events & EPOLLRDHUP)
280 u->eof = true;
281
282 if(events[n].events & EPOLLIN)
283 ev |= ULOOP_READ;
284
285 if(events[n].events & EPOLLOUT)
286 ev |= ULOOP_WRITE;
287
288 cur->events = ev;
289 }
290
291 return nfds;
292 }
293
294 #endif
295
296 static bool uloop_fd_stack_event(struct uloop_fd *fd, int events)
297 {
298 struct uloop_fd_stack *cur;
299
300 /*
301 * Do not buffer events for level-triggered fds, they will keep firing.
302 * Caller needs to take care of recursion issues.
303 */
304 if (!(fd->flags & ULOOP_EDGE_TRIGGER))
305 return false;
306
307 for (cur = fd_stack; cur; cur = cur->next) {
308 if (cur->fd != fd)
309 continue;
310
311 if (events < 0)
312 cur->fd = NULL;
313 else
314 cur->events |= events | ULOOP_EVENT_BUFFERED;
315
316 return true;
317 }
318
319 return false;
320 }
321
322 static void uloop_run_events(int timeout)
323 {
324 struct uloop_fd_event *cur;
325 struct uloop_fd *fd;
326
327 if (!cur_nfds) {
328 cur_fd = 0;
329 cur_nfds = uloop_fetch_events(timeout);
330 if (cur_nfds < 0)
331 cur_nfds = 0;
332 }
333
334 while (cur_nfds > 0) {
335 struct uloop_fd_stack stack_cur;
336 unsigned int events;
337
338 cur = &cur_fds[cur_fd++];
339 cur_nfds--;
340
341 fd = cur->fd;
342 events = cur->events;
343 if (!fd)
344 continue;
345
346 if (!fd->cb)
347 continue;
348
349 if (uloop_fd_stack_event(fd, cur->events))
350 continue;
351
352 stack_cur.next = fd_stack;
353 stack_cur.fd = fd;
354 fd_stack = &stack_cur;
355 do {
356 stack_cur.events = 0;
357 fd->cb(fd, events);
358 events = stack_cur.events & ULOOP_EVENT_MASK;
359 } while (stack_cur.fd && events);
360 fd_stack = stack_cur.next;
361
362 return;
363 }
364 }
365
366 int uloop_fd_add(struct uloop_fd *sock, unsigned int flags)
367 {
368 unsigned int fl;
369 int ret;
370
371 if (!(flags & (ULOOP_READ | ULOOP_WRITE)))
372 return uloop_fd_delete(sock);
373
374 if (!sock->registered && !(flags & ULOOP_BLOCKING)) {
375 fl = fcntl(sock->fd, F_GETFL, 0);
376 fl |= O_NONBLOCK;
377 fcntl(sock->fd, F_SETFL, fl);
378 }
379
380 ret = register_poll(sock, flags);
381 if (ret < 0)
382 goto out;
383
384 sock->registered = true;
385 sock->eof = false;
386
387 out:
388 return ret;
389 }
390
391 int uloop_fd_delete(struct uloop_fd *fd)
392 {
393 int i;
394
395 for (i = 0; i < cur_nfds; i++) {
396 if (cur_fds[cur_fd + i].fd != fd)
397 continue;
398
399 cur_fds[cur_fd + i].fd = NULL;
400 }
401
402 if (!fd->registered)
403 return 0;
404
405 fd->registered = false;
406 uloop_fd_stack_event(fd, -1);
407 return __uloop_fd_delete(fd);
408 }
409
410 static int tv_diff(struct timeval *t1, struct timeval *t2)
411 {
412 return
413 (t1->tv_sec - t2->tv_sec) * 1000 +
414 (t1->tv_usec - t2->tv_usec) / 1000;
415 }
416
417 int uloop_timeout_add(struct uloop_timeout *timeout)
418 {
419 struct uloop_timeout *tmp;
420 struct list_head *h = &timeouts;
421
422 if (timeout->pending)
423 return -1;
424
425 list_for_each_entry(tmp, &timeouts, list) {
426 if (tv_diff(&tmp->time, &timeout->time) > 0) {
427 h = &tmp->list;
428 break;
429 }
430 }
431
432 list_add_tail(&timeout->list, h);
433 timeout->pending = true;
434
435 return 0;
436 }
437
438 static void uloop_gettime(struct timeval *tv)
439 {
440 struct timespec ts;
441
442 clock_gettime(CLOCK_MONOTONIC, &ts);
443 tv->tv_sec = ts.tv_sec;
444 tv->tv_usec = ts.tv_nsec / 1000;
445 }
446
447 int uloop_timeout_set(struct uloop_timeout *timeout, int msecs)
448 {
449 struct timeval *time = &timeout->time;
450
451 if (timeout->pending)
452 uloop_timeout_cancel(timeout);
453
454 uloop_gettime(&timeout->time);
455
456 time->tv_sec += msecs / 1000;
457 time->tv_usec += (msecs % 1000) * 1000;
458
459 if (time->tv_usec > 1000000) {
460 time->tv_sec++;
461 time->tv_usec %= 1000000;
462 }
463
464 return uloop_timeout_add(timeout);
465 }
466
467 int uloop_timeout_cancel(struct uloop_timeout *timeout)
468 {
469 if (!timeout->pending)
470 return -1;
471
472 list_del(&timeout->list);
473 timeout->pending = false;
474
475 return 0;
476 }
477
478 int uloop_timeout_remaining(struct uloop_timeout *timeout)
479 {
480 struct timeval now;
481
482 if (!timeout->pending)
483 return -1;
484
485 uloop_gettime(&now);
486
487 return tv_diff(&timeout->time, &now);
488 }
489
490 int uloop_process_add(struct uloop_process *p)
491 {
492 struct uloop_process *tmp;
493 struct list_head *h = &processes;
494
495 if (p->pending)
496 return -1;
497
498 list_for_each_entry(tmp, &processes, list) {
499 if (tmp->pid > p->pid) {
500 h = &tmp->list;
501 break;
502 }
503 }
504
505 list_add_tail(&p->list, h);
506 p->pending = true;
507
508 return 0;
509 }
510
511 int uloop_process_delete(struct uloop_process *p)
512 {
513 if (!p->pending)
514 return -1;
515
516 list_del(&p->list);
517 p->pending = false;
518
519 return 0;
520 }
521
522 static void uloop_handle_processes(void)
523 {
524 struct uloop_process *p, *tmp;
525 pid_t pid;
526 int ret;
527
528 do_sigchld = false;
529
530 while (1) {
531 pid = waitpid(-1, &ret, WNOHANG);
532 if (pid <= 0)
533 return;
534
535 list_for_each_entry_safe(p, tmp, &processes, list) {
536 if (p->pid < pid)
537 continue;
538
539 if (p->pid > pid)
540 break;
541
542 uloop_process_delete(p);
543 p->cb(p, ret);
544 }
545 }
546
547 }
548
549 static void uloop_handle_sigint(int signo)
550 {
551 uloop_cancelled = true;
552 }
553
554 static void uloop_sigchld(int signo)
555 {
556 do_sigchld = true;
557 }
558
559 static void uloop_setup_signals(void)
560 {
561 struct sigaction s;
562
563 memset(&s, 0, sizeof(struct sigaction));
564 s.sa_handler = uloop_handle_sigint;
565 s.sa_flags = 0;
566 sigaction(SIGINT, &s, NULL);
567
568 if (uloop_handle_sigchld) {
569 s.sa_handler = uloop_sigchld;
570 sigaction(SIGCHLD, &s, NULL);
571 }
572 }
573
574 static int uloop_get_next_timeout(struct timeval *tv)
575 {
576 struct uloop_timeout *timeout;
577 int diff;
578
579 if (list_empty(&timeouts))
580 return -1;
581
582 timeout = list_first_entry(&timeouts, struct uloop_timeout, list);
583 diff = tv_diff(&timeout->time, tv);
584 if (diff < 0)
585 return 0;
586
587 return diff;
588 }
589
590 static void uloop_process_timeouts(struct timeval *tv)
591 {
592 struct uloop_timeout *t;
593
594 while (!list_empty(&timeouts)) {
595 t = list_first_entry(&timeouts, struct uloop_timeout, list);
596
597 if (tv_diff(&t->time, tv) > 0)
598 break;
599
600 uloop_timeout_cancel(t);
601 if (t->cb)
602 t->cb(t);
603 }
604 }
605
606 static void uloop_clear_timeouts(void)
607 {
608 struct uloop_timeout *t, *tmp;
609
610 list_for_each_entry_safe(t, tmp, &timeouts, list)
611 uloop_timeout_cancel(t);
612 }
613
614 static void uloop_clear_processes(void)
615 {
616 struct uloop_process *p, *tmp;
617
618 list_for_each_entry_safe(p, tmp, &processes, list)
619 uloop_process_delete(p);
620 }
621
622 void uloop_run(void)
623 {
624 struct timeval tv;
625
626 uloop_setup_signals();
627 while(!uloop_cancelled)
628 {
629 uloop_gettime(&tv);
630 uloop_process_timeouts(&tv);
631 if (uloop_cancelled)
632 break;
633
634 if (do_sigchld)
635 uloop_handle_processes();
636 uloop_gettime(&tv);
637 uloop_run_events(uloop_get_next_timeout(&tv));
638 }
639 }
640
641 void uloop_done(void)
642 {
643 if (poll_fd < 0)
644 return;
645
646 close(poll_fd);
647 poll_fd = -1;
648
649 uloop_clear_timeouts();
650 uloop_clear_processes();
651 }