uloop: fix a segfault on deleting the first active timer from within another timer
[project/libubox.git] / uloop.c
1 /*
2 * Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org>
3 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
4 * Copyright (C) 2010 Steven Barth <steven@midlink.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
19 *
20 */
21
22 #include <sys/time.h>
23 #include <sys/types.h>
24
25 #include <unistd.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <errno.h>
29 #include <poll.h>
30 #include <string.h>
31 #include <fcntl.h>
32 #include <stdbool.h>
33
34 #include "uloop.h"
35
36 #ifdef USE_KQUEUE
37 #include <sys/event.h>
38 #endif
39 #ifdef USE_EPOLL
40 #include <sys/epoll.h>
41 #endif
42 #include <sys/wait.h>
43
44
45 #ifndef ARRAY_SIZE
46 #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
47 #endif
48 #define ULOOP_MAX_EVENTS 10
49
50 static struct list_head timeouts = LIST_HEAD_INIT(timeouts);
51 static struct list_head processes = LIST_HEAD_INIT(processes);
52
53 static int poll_fd = -1;
54 bool uloop_cancelled = false;
55 bool uloop_handle_sigchld = true;
56 static bool do_sigchld = false;
57 static int cur_fd, cur_nfds;
58
59 #ifdef USE_KQUEUE
60
61 int uloop_init(void)
62 {
63 if (poll_fd >= 0)
64 return 0;
65
66 poll_fd = kqueue();
67 if (poll_fd < 0)
68 return -1;
69
70 return 0;
71 }
72
73
74 static uint16_t get_flags(unsigned int flags, unsigned int mask)
75 {
76 uint16_t kflags = 0;
77
78 if (!(flags & mask))
79 return EV_DELETE;
80
81 kflags = EV_ADD;
82 if (flags & ULOOP_EDGE_TRIGGER)
83 kflags |= EV_CLEAR;
84
85 return kflags;
86 }
87
88 static struct kevent events[ULOOP_MAX_EVENTS];
89
90 static int register_poll(struct uloop_fd *fd, unsigned int flags)
91 {
92 struct timespec timeout = { 0, 0 };
93 struct kevent ev[2];
94 unsigned int changed;
95 int nev = 0;
96 unsigned int fl = 0;
97
98 changed = fd->kqflags ^ flags;
99 if (changed & ULOOP_EDGE_TRIGGER)
100 changed |= flags;
101
102 if (changed & ULOOP_READ) {
103 uint16_t kflags = get_flags(flags, ULOOP_READ);
104 EV_SET(&ev[nev++], fd->fd, EVFILT_READ, kflags, 0, 0, fd);
105 }
106
107 if (changed & ULOOP_WRITE) {
108 uint16_t kflags = get_flags(flags, ULOOP_WRITE);
109 EV_SET(&ev[nev++], fd->fd, EVFILT_WRITE, kflags, 0, 0, fd);
110 }
111
112 if (!flags)
113 fl |= EV_DELETE;
114
115 if (nev && (kevent(poll_fd, ev, nev, NULL, fl, &timeout) == -1))
116 return -1;
117
118 fd->kqflags = flags;
119 return 0;
120 }
121
122 int uloop_fd_delete(struct uloop_fd *sock)
123 {
124 int i;
125
126 for (i = cur_fd + 1; i < cur_nfds; i++) {
127 if (events[i].udata != sock)
128 continue;
129
130 events[i].udata = NULL;
131 }
132
133 sock->registered = false;
134 return register_poll(sock, 0);
135 }
136
137 static void uloop_run_events(int timeout)
138 {
139 struct timespec ts;
140 int nfds, n;
141
142 if (timeout > 0) {
143 ts.tv_sec = timeout / 1000;
144 ts.tv_nsec = (timeout % 1000) * 1000000;
145 }
146
147 nfds = kevent(poll_fd, NULL, 0, events, ARRAY_SIZE(events), timeout > 0 ? &ts : NULL);
148 for(n = 0; n < nfds; ++n)
149 {
150 struct uloop_fd *u = events[n].udata;
151 unsigned int ev = 0;
152
153 if (!u)
154 continue;
155
156 if (events[n].flags & EV_ERROR) {
157 u->error = true;
158 uloop_fd_delete(u);
159 }
160
161 if(events[n].filter == EVFILT_READ)
162 ev |= ULOOP_READ;
163 else if (events[n].filter == EVFILT_WRITE)
164 ev |= ULOOP_WRITE;
165
166 if (events[n].flags & EV_EOF)
167 u->eof = true;
168 else if (!ev)
169 continue;
170
171 if (u->cb) {
172 cur_fd = n;
173 cur_nfds = nfds;
174 u->cb(u, ev);
175 }
176 }
177 cur_nfds = 0;
178 }
179
180 #endif
181
182 #ifdef USE_EPOLL
183
184 /**
185 * FIXME: uClibc < 0.9.30.3 does not define EPOLLRDHUP for Linux >= 2.6.17
186 */
187 #ifndef EPOLLRDHUP
188 #define EPOLLRDHUP 0x2000
189 #endif
190
191 int uloop_init(void)
192 {
193 if (poll_fd >= 0)
194 return 0;
195
196 poll_fd = epoll_create(32);
197 if (poll_fd < 0)
198 return -1;
199
200 fcntl(poll_fd, F_SETFD, fcntl(poll_fd, F_GETFD) | FD_CLOEXEC);
201 return 0;
202 }
203
204 static int register_poll(struct uloop_fd *fd, unsigned int flags)
205 {
206 struct epoll_event ev;
207 int op = fd->registered ? EPOLL_CTL_MOD : EPOLL_CTL_ADD;
208
209 memset(&ev, 0, sizeof(struct epoll_event));
210
211 if (flags & ULOOP_READ)
212 ev.events |= EPOLLIN | EPOLLRDHUP;
213
214 if (flags & ULOOP_WRITE)
215 ev.events |= EPOLLOUT;
216
217 if (flags & ULOOP_EDGE_TRIGGER)
218 ev.events |= EPOLLET;
219
220 ev.data.fd = fd->fd;
221 ev.data.ptr = fd;
222
223 return epoll_ctl(poll_fd, op, fd->fd, &ev);
224 }
225
226 static struct epoll_event events[ULOOP_MAX_EVENTS];
227
228 int uloop_fd_delete(struct uloop_fd *sock)
229 {
230 int i;
231
232 for (i = cur_fd + 1; i < cur_nfds; i++) {
233 if (events[i].data.ptr != sock)
234 continue;
235
236 events[i].data.ptr = NULL;
237 }
238 sock->registered = false;
239 return epoll_ctl(poll_fd, EPOLL_CTL_DEL, sock->fd, 0);
240 }
241
242 static void uloop_run_events(int timeout)
243 {
244 int n, nfds;
245
246 nfds = epoll_wait(poll_fd, events, ARRAY_SIZE(events), timeout);
247 for(n = 0; n < nfds; ++n)
248 {
249 struct uloop_fd *u = events[n].data.ptr;
250 unsigned int ev = 0;
251
252 if (!u)
253 continue;
254
255 if(events[n].events & (EPOLLERR|EPOLLHUP)) {
256 u->error = true;
257 uloop_fd_delete(u);
258 }
259
260 if(!(events[n].events & (EPOLLRDHUP|EPOLLIN|EPOLLOUT|EPOLLERR|EPOLLHUP)))
261 continue;
262
263 if(events[n].events & EPOLLRDHUP)
264 u->eof = true;
265
266 if(events[n].events & EPOLLIN)
267 ev |= ULOOP_READ;
268
269 if(events[n].events & EPOLLOUT)
270 ev |= ULOOP_WRITE;
271
272 if(u->cb) {
273 cur_fd = n;
274 cur_nfds = nfds;
275 u->cb(u, ev);
276 }
277 }
278 cur_nfds = 0;
279 }
280
281 #endif
282
283 int uloop_fd_add(struct uloop_fd *sock, unsigned int flags)
284 {
285 unsigned int fl;
286 int ret;
287
288 if (!sock->registered && !(flags & ULOOP_BLOCKING)) {
289 fl = fcntl(sock->fd, F_GETFL, 0);
290 fl |= O_NONBLOCK;
291 fcntl(sock->fd, F_SETFL, fl);
292 }
293
294 ret = register_poll(sock, flags);
295 if (ret < 0)
296 goto out;
297
298 sock->registered = true;
299 sock->eof = false;
300
301 out:
302 return ret;
303 }
304
305 static int tv_diff(struct timeval *t1, struct timeval *t2)
306 {
307 if (t1->tv_sec != t2->tv_sec)
308 return (t1->tv_sec - t2->tv_sec) * 1000;
309 else
310 return (t1->tv_usec - t2->tv_usec) / 1000;
311 }
312
313 int uloop_timeout_add(struct uloop_timeout *timeout)
314 {
315 struct uloop_timeout *tmp;
316 struct list_head *h = &timeouts;
317
318 if (timeout->pending)
319 return -1;
320
321 list_for_each_entry(tmp, &timeouts, list) {
322 if (tv_diff(&tmp->time, &timeout->time) > 0) {
323 h = &tmp->list;
324 break;
325 }
326 }
327
328 list_add_tail(&timeout->list, h);
329 timeout->pending = true;
330
331 return 0;
332 }
333
334 int uloop_timeout_set(struct uloop_timeout *timeout, int msecs)
335 {
336 struct timeval *time = &timeout->time;
337
338 if (timeout->pending)
339 uloop_timeout_cancel(timeout);
340
341 gettimeofday(&timeout->time, NULL);
342
343 time->tv_sec += msecs / 1000;
344 time->tv_usec += msecs % 1000;
345
346 if (time->tv_usec > 1000000) {
347 time->tv_sec++;
348 time->tv_usec %= 100000;
349 }
350
351 return uloop_timeout_add(timeout);
352 }
353
354 int uloop_timeout_cancel(struct uloop_timeout *timeout)
355 {
356 if (!timeout->pending)
357 return -1;
358
359 list_del(&timeout->list);
360 timeout->pending = false;
361
362 return 0;
363 }
364
365 int uloop_process_add(struct uloop_process *p)
366 {
367 struct uloop_process *tmp;
368 struct list_head *h = &processes;
369
370 if (p->pending)
371 return -1;
372
373 list_for_each_entry(tmp, &processes, list) {
374 if (tmp->pid > p->pid) {
375 h = &tmp->list;
376 break;
377 }
378 }
379
380 list_add_tail(&p->list, h);
381 p->pending = true;
382
383 return 0;
384 }
385
386 int uloop_process_delete(struct uloop_process *p)
387 {
388 if (!p->pending)
389 return -1;
390
391 list_del(&p->list);
392 p->pending = false;
393
394 return 0;
395 }
396
397 static void uloop_handle_processes(void)
398 {
399 struct uloop_process *p, *tmp;
400 pid_t pid;
401 int ret;
402
403 do_sigchld = false;
404
405 while (1) {
406 pid = waitpid(-1, &ret, WNOHANG);
407 if (pid <= 0)
408 return;
409
410 list_for_each_entry_safe(p, tmp, &processes, list) {
411 if (p->pid < pid)
412 continue;
413
414 if (p->pid > pid)
415 break;
416
417 uloop_process_delete(p);
418 p->cb(p, ret);
419 }
420 }
421
422 }
423
424 static void uloop_handle_sigint(int signo)
425 {
426 uloop_cancelled = true;
427 }
428
429 static void uloop_sigchld(int signo)
430 {
431 do_sigchld = true;
432 }
433
434 static void uloop_setup_signals(void)
435 {
436 struct sigaction s;
437
438 memset(&s, 0, sizeof(struct sigaction));
439 s.sa_handler = uloop_handle_sigint;
440 s.sa_flags = 0;
441 sigaction(SIGINT, &s, NULL);
442
443 if (uloop_handle_sigchld) {
444 s.sa_handler = uloop_sigchld;
445 sigaction(SIGCHLD, &s, NULL);
446 }
447 }
448
449 static int uloop_get_next_timeout(struct timeval *tv)
450 {
451 struct uloop_timeout *timeout;
452 int diff;
453
454 if (list_empty(&timeouts))
455 return -1;
456
457 timeout = list_first_entry(&timeouts, struct uloop_timeout, list);
458 diff = tv_diff(&timeout->time, tv);
459 if (diff < 0)
460 return 0;
461
462 return diff;
463 }
464
465 static void uloop_process_timeouts(struct timeval *tv)
466 {
467 struct uloop_timeout *t;
468
469 while (!list_empty(&timeouts)) {
470 t = list_first_entry(&timeouts, struct uloop_timeout, list);
471
472 if (tv_diff(&t->time, tv) > 0)
473 break;
474
475 uloop_timeout_cancel(t);
476 if (t->cb)
477 t->cb(t);
478 }
479 }
480
481 static void uloop_clear_timeouts(void)
482 {
483 struct uloop_timeout *t, *tmp;
484
485 list_for_each_entry_safe(t, tmp, &timeouts, list)
486 uloop_timeout_cancel(t);
487 }
488
489 static void uloop_clear_processes(void)
490 {
491 struct uloop_process *p, *tmp;
492
493 list_for_each_entry_safe(p, tmp, &processes, list)
494 uloop_process_delete(p);
495 }
496
497 void uloop_run(void)
498 {
499 struct timeval tv;
500
501 uloop_setup_signals();
502 while(!uloop_cancelled)
503 {
504 gettimeofday(&tv, NULL);
505 uloop_process_timeouts(&tv);
506 if (uloop_cancelled)
507 break;
508
509 if (do_sigchld)
510 uloop_handle_processes();
511 uloop_run_events(uloop_get_next_timeout(&tv));
512 }
513 }
514
515 void uloop_done(void)
516 {
517 if (poll_fd < 0)
518 return;
519
520 close(poll_fd);
521 poll_fd = -1;
522
523 uloop_clear_timeouts();
524 uloop_clear_processes();
525 }