utils: prefer using gccs builtin swapping functions which support constant folding
[project/libubox.git] / uloop.c
1 /*
2 * uloop - event loop implementation
3 *
4 * Copyright (C) 2010-2013 Felix Fietkau <nbd@openwrt.org>
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 #include <sys/time.h>
19 #include <sys/types.h>
20
21 #include <unistd.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <errno.h>
25 #include <poll.h>
26 #include <string.h>
27 #include <fcntl.h>
28 #include <stdbool.h>
29
30 #include "uloop.h"
31 #include "utils.h"
32
33 #ifdef USE_KQUEUE
34 #include <sys/event.h>
35 #endif
36 #ifdef USE_EPOLL
37 #include <sys/epoll.h>
38 #endif
39 #include <sys/wait.h>
40
41 #define ULOOP_MAX_EVENTS 10
42
43 static struct list_head timeouts = LIST_HEAD_INIT(timeouts);
44 static struct list_head processes = LIST_HEAD_INIT(processes);
45
46 static int poll_fd = -1;
47 bool uloop_cancelled = false;
48 bool uloop_handle_sigchld = true;
49 static bool do_sigchld = false;
50 static int cur_fd, cur_nfds;
51
52 #ifdef USE_KQUEUE
53
54 int uloop_init(void)
55 {
56 struct timespec timeout = { 0, 0 };
57 struct kevent ev = {};
58
59 if (poll_fd >= 0)
60 return 0;
61
62 poll_fd = kqueue();
63 if (poll_fd < 0)
64 return -1;
65
66 EV_SET(&ev, SIGCHLD, EVFILT_SIGNAL, EV_ADD, 0, 0, 0);
67 kevent(poll_fd, &ev, 1, NULL, 0, &timeout);
68
69 return 0;
70 }
71
72
73 static uint16_t get_flags(unsigned int flags, unsigned int mask)
74 {
75 uint16_t kflags = 0;
76
77 if (!(flags & mask))
78 return EV_DELETE;
79
80 kflags = EV_ADD;
81 if (flags & ULOOP_EDGE_TRIGGER)
82 kflags |= EV_CLEAR;
83
84 return kflags;
85 }
86
87 static struct kevent events[ULOOP_MAX_EVENTS];
88
89 static int register_kevent(struct uloop_fd *fd, unsigned int flags)
90 {
91 struct timespec timeout = { 0, 0 };
92 struct kevent ev[2];
93 int nev = 0;
94 unsigned int fl = 0;
95 unsigned int changed;
96 uint16_t kflags;
97
98 if (flags & ULOOP_EDGE_DEFER)
99 flags &= ~ULOOP_EDGE_TRIGGER;
100
101 changed = flags ^ fd->flags;
102 if (changed & ULOOP_EDGE_TRIGGER)
103 changed |= flags;
104
105 if (changed & ULOOP_READ) {
106 kflags = get_flags(flags, ULOOP_READ);
107 EV_SET(&ev[nev++], fd->fd, EVFILT_READ, kflags, 0, 0, fd);
108 }
109
110 if (changed & ULOOP_WRITE) {
111 kflags = get_flags(flags, ULOOP_WRITE);
112 EV_SET(&ev[nev++], fd->fd, EVFILT_WRITE, kflags, 0, 0, fd);
113 }
114
115 if (!flags)
116 fl |= EV_DELETE;
117
118 fd->flags = flags;
119 if (kevent(poll_fd, ev, nev, NULL, fl, &timeout) == -1)
120 return -1;
121
122 return 0;
123 }
124
125 static int register_poll(struct uloop_fd *fd, unsigned int flags)
126 {
127 if (flags & ULOOP_EDGE_TRIGGER)
128 flags |= ULOOP_EDGE_DEFER;
129 else
130 flags &= ~ULOOP_EDGE_DEFER;
131
132 return register_kevent(fd, flags);
133 }
134
135 int uloop_fd_delete(struct uloop_fd *sock)
136 {
137 int i;
138
139 for (i = cur_fd + 1; i < cur_nfds; i++) {
140 if (events[i].udata != sock)
141 continue;
142
143 events[i].udata = NULL;
144 }
145
146 sock->registered = false;
147 return register_poll(sock, 0);
148 }
149
150 static void uloop_run_events(int timeout)
151 {
152 struct timespec ts;
153 int nfds, n;
154
155 if (timeout >= 0) {
156 ts.tv_sec = timeout / 1000;
157 ts.tv_nsec = (timeout % 1000) * 1000000;
158 }
159
160 nfds = kevent(poll_fd, NULL, 0, events, ARRAY_SIZE(events), timeout >= 0 ? &ts : NULL);
161 for(n = 0; n < nfds; ++n)
162 {
163 struct uloop_fd *u = events[n].udata;
164 unsigned int ev = 0;
165
166 if (!u)
167 continue;
168
169 if (events[n].flags & EV_ERROR) {
170 u->error = true;
171 uloop_fd_delete(u);
172 }
173
174 if(events[n].filter == EVFILT_READ)
175 ev |= ULOOP_READ;
176 else if (events[n].filter == EVFILT_WRITE)
177 ev |= ULOOP_WRITE;
178
179 if (events[n].flags & EV_EOF)
180 u->eof = true;
181 else if (!ev)
182 continue;
183
184 if (u->cb) {
185 cur_fd = n;
186 cur_nfds = nfds;
187 u->cb(u, ev);
188 if (u->flags & ULOOP_EDGE_DEFER) {
189 u->flags &= ~ULOOP_EDGE_DEFER;
190 register_kevent(u, u->flags);
191 }
192 }
193 }
194 cur_nfds = 0;
195 }
196
197 #endif
198
199 #ifdef USE_EPOLL
200
201 /**
202 * FIXME: uClibc < 0.9.30.3 does not define EPOLLRDHUP for Linux >= 2.6.17
203 */
204 #ifndef EPOLLRDHUP
205 #define EPOLLRDHUP 0x2000
206 #endif
207
208 int uloop_init(void)
209 {
210 if (poll_fd >= 0)
211 return 0;
212
213 poll_fd = epoll_create(32);
214 if (poll_fd < 0)
215 return -1;
216
217 fcntl(poll_fd, F_SETFD, fcntl(poll_fd, F_GETFD) | FD_CLOEXEC);
218 return 0;
219 }
220
221 static int register_poll(struct uloop_fd *fd, unsigned int flags)
222 {
223 struct epoll_event ev;
224 int op = fd->registered ? EPOLL_CTL_MOD : EPOLL_CTL_ADD;
225
226 memset(&ev, 0, sizeof(struct epoll_event));
227
228 if (flags & ULOOP_READ)
229 ev.events |= EPOLLIN | EPOLLRDHUP;
230
231 if (flags & ULOOP_WRITE)
232 ev.events |= EPOLLOUT;
233
234 if (flags & ULOOP_EDGE_TRIGGER)
235 ev.events |= EPOLLET;
236
237 ev.data.fd = fd->fd;
238 ev.data.ptr = fd;
239
240 return epoll_ctl(poll_fd, op, fd->fd, &ev);
241 }
242
243 static struct epoll_event events[ULOOP_MAX_EVENTS];
244
245 int uloop_fd_delete(struct uloop_fd *sock)
246 {
247 int i;
248
249 if (!sock->registered)
250 return 0;
251
252 for (i = cur_fd + 1; i < cur_nfds; i++) {
253 if (events[i].data.ptr != sock)
254 continue;
255
256 events[i].data.ptr = NULL;
257 }
258 sock->registered = false;
259 return epoll_ctl(poll_fd, EPOLL_CTL_DEL, sock->fd, 0);
260 }
261
262 static void uloop_run_events(int timeout)
263 {
264 int n, nfds;
265
266 nfds = epoll_wait(poll_fd, events, ARRAY_SIZE(events), timeout);
267 for(n = 0; n < nfds; ++n)
268 {
269 struct uloop_fd *u = events[n].data.ptr;
270 unsigned int ev = 0;
271
272 if (!u)
273 continue;
274
275 if(events[n].events & (EPOLLERR|EPOLLHUP)) {
276 u->error = true;
277 uloop_fd_delete(u);
278 }
279
280 if(!(events[n].events & (EPOLLRDHUP|EPOLLIN|EPOLLOUT|EPOLLERR|EPOLLHUP)))
281 continue;
282
283 if(events[n].events & EPOLLRDHUP)
284 u->eof = true;
285
286 if(events[n].events & EPOLLIN)
287 ev |= ULOOP_READ;
288
289 if(events[n].events & EPOLLOUT)
290 ev |= ULOOP_WRITE;
291
292 if(u->cb) {
293 cur_fd = n;
294 cur_nfds = nfds;
295 u->cb(u, ev);
296 }
297 }
298 cur_nfds = 0;
299 }
300
301 #endif
302
303 int uloop_fd_add(struct uloop_fd *sock, unsigned int flags)
304 {
305 unsigned int fl;
306 int ret;
307
308 if (!(flags & (ULOOP_READ | ULOOP_WRITE)))
309 return uloop_fd_delete(sock);
310
311 if (!sock->registered && !(flags & ULOOP_BLOCKING)) {
312 fl = fcntl(sock->fd, F_GETFL, 0);
313 fl |= O_NONBLOCK;
314 fcntl(sock->fd, F_SETFL, fl);
315 }
316
317 ret = register_poll(sock, flags);
318 if (ret < 0)
319 goto out;
320
321 sock->registered = true;
322 sock->eof = false;
323
324 out:
325 return ret;
326 }
327
328 static int tv_diff(struct timeval *t1, struct timeval *t2)
329 {
330 return
331 (t1->tv_sec - t2->tv_sec) * 1000 +
332 (t1->tv_usec - t2->tv_usec) / 1000;
333 }
334
335 int uloop_timeout_add(struct uloop_timeout *timeout)
336 {
337 struct uloop_timeout *tmp;
338 struct list_head *h = &timeouts;
339
340 if (timeout->pending)
341 return -1;
342
343 list_for_each_entry(tmp, &timeouts, list) {
344 if (tv_diff(&tmp->time, &timeout->time) > 0) {
345 h = &tmp->list;
346 break;
347 }
348 }
349
350 list_add_tail(&timeout->list, h);
351 timeout->pending = true;
352
353 return 0;
354 }
355
356 static void uloop_gettime(struct timeval *tv)
357 {
358 struct timespec ts;
359
360 clock_gettime(CLOCK_MONOTONIC, &ts);
361 tv->tv_sec = ts.tv_sec;
362 tv->tv_usec = ts.tv_nsec / 1000;
363 }
364
365 int uloop_timeout_set(struct uloop_timeout *timeout, int msecs)
366 {
367 struct timeval *time = &timeout->time;
368
369 if (timeout->pending)
370 uloop_timeout_cancel(timeout);
371
372 uloop_gettime(&timeout->time);
373
374 time->tv_sec += msecs / 1000;
375 time->tv_usec += (msecs % 1000) * 1000;
376
377 if (time->tv_usec > 1000000) {
378 time->tv_sec++;
379 time->tv_usec %= 1000000;
380 }
381
382 return uloop_timeout_add(timeout);
383 }
384
385 int uloop_timeout_cancel(struct uloop_timeout *timeout)
386 {
387 if (!timeout->pending)
388 return -1;
389
390 list_del(&timeout->list);
391 timeout->pending = false;
392
393 return 0;
394 }
395
396 int uloop_timeout_remaining(struct uloop_timeout *timeout)
397 {
398 struct timeval now;
399
400 if (!timeout->pending)
401 return -1;
402
403 uloop_gettime(&now);
404
405 return tv_diff(&timeout->time, &now);
406 }
407
408 int uloop_process_add(struct uloop_process *p)
409 {
410 struct uloop_process *tmp;
411 struct list_head *h = &processes;
412
413 if (p->pending)
414 return -1;
415
416 list_for_each_entry(tmp, &processes, list) {
417 if (tmp->pid > p->pid) {
418 h = &tmp->list;
419 break;
420 }
421 }
422
423 list_add_tail(&p->list, h);
424 p->pending = true;
425
426 return 0;
427 }
428
429 int uloop_process_delete(struct uloop_process *p)
430 {
431 if (!p->pending)
432 return -1;
433
434 list_del(&p->list);
435 p->pending = false;
436
437 return 0;
438 }
439
440 static void uloop_handle_processes(void)
441 {
442 struct uloop_process *p, *tmp;
443 pid_t pid;
444 int ret;
445
446 do_sigchld = false;
447
448 while (1) {
449 pid = waitpid(-1, &ret, WNOHANG);
450 if (pid <= 0)
451 return;
452
453 list_for_each_entry_safe(p, tmp, &processes, list) {
454 if (p->pid < pid)
455 continue;
456
457 if (p->pid > pid)
458 break;
459
460 uloop_process_delete(p);
461 p->cb(p, ret);
462 }
463 }
464
465 }
466
467 static void uloop_handle_sigint(int signo)
468 {
469 uloop_cancelled = true;
470 }
471
472 static void uloop_sigchld(int signo)
473 {
474 do_sigchld = true;
475 }
476
477 static void uloop_setup_signals(void)
478 {
479 struct sigaction s;
480
481 memset(&s, 0, sizeof(struct sigaction));
482 s.sa_handler = uloop_handle_sigint;
483 s.sa_flags = 0;
484 sigaction(SIGINT, &s, NULL);
485
486 if (uloop_handle_sigchld) {
487 s.sa_handler = uloop_sigchld;
488 sigaction(SIGCHLD, &s, NULL);
489 }
490 }
491
492 static int uloop_get_next_timeout(struct timeval *tv)
493 {
494 struct uloop_timeout *timeout;
495 int diff;
496
497 if (list_empty(&timeouts))
498 return -1;
499
500 timeout = list_first_entry(&timeouts, struct uloop_timeout, list);
501 diff = tv_diff(&timeout->time, tv);
502 if (diff < 0)
503 return 0;
504
505 return diff;
506 }
507
508 static void uloop_process_timeouts(struct timeval *tv)
509 {
510 struct uloop_timeout *t;
511
512 while (!list_empty(&timeouts)) {
513 t = list_first_entry(&timeouts, struct uloop_timeout, list);
514
515 if (tv_diff(&t->time, tv) > 0)
516 break;
517
518 uloop_timeout_cancel(t);
519 if (t->cb)
520 t->cb(t);
521 }
522 }
523
524 static void uloop_clear_timeouts(void)
525 {
526 struct uloop_timeout *t, *tmp;
527
528 list_for_each_entry_safe(t, tmp, &timeouts, list)
529 uloop_timeout_cancel(t);
530 }
531
532 static void uloop_clear_processes(void)
533 {
534 struct uloop_process *p, *tmp;
535
536 list_for_each_entry_safe(p, tmp, &processes, list)
537 uloop_process_delete(p);
538 }
539
540 void uloop_run(void)
541 {
542 struct timeval tv;
543
544 uloop_setup_signals();
545 while(!uloop_cancelled)
546 {
547 uloop_gettime(&tv);
548 uloop_process_timeouts(&tv);
549 if (uloop_cancelled)
550 break;
551
552 if (do_sigchld)
553 uloop_handle_processes();
554 uloop_run_events(uloop_get_next_timeout(&tv));
555 }
556 }
557
558 void uloop_done(void)
559 {
560 if (poll_fd < 0)
561 return;
562
563 close(poll_fd);
564 poll_fd = -1;
565
566 uloop_clear_timeouts();
567 uloop_clear_processes();
568 }