2 * Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org>
3 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
4 * Copyright (C) 2010 Steven Barth <steven@midlink.org>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
23 #include <sys/types.h>
38 #include <sys/event.h>
41 #include <sys/epoll.h>
45 #define ULOOP_MAX_EVENTS 10
47 static struct list_head timeouts
= LIST_HEAD_INIT(timeouts
);
48 static struct list_head processes
= LIST_HEAD_INIT(processes
);
50 static int poll_fd
= -1;
51 bool uloop_cancelled
= false;
52 bool uloop_handle_sigchld
= true;
53 static bool do_sigchld
= false;
54 static int cur_fd
, cur_nfds
;
60 struct timespec timeout
= { 0, 0 };
61 struct kevent ev
= {};
70 EV_SET(&ev
, SIGCHLD
, EVFILT_SIGNAL
, EV_ADD
, 0, 0, 0);
71 kevent(poll_fd
, &ev
, 1, NULL
, 0, &timeout
);
77 static uint16_t get_flags(unsigned int flags
, unsigned int mask
)
85 if (flags
& ULOOP_EDGE_TRIGGER
)
91 static struct kevent events
[ULOOP_MAX_EVENTS
];
93 static int register_kevent(struct uloop_fd
*fd
, unsigned int flags
)
95 struct timespec timeout
= { 0, 0 };
101 if (flags
& ULOOP_EDGE_DEFER
)
102 flags
&= ~ULOOP_EDGE_TRIGGER
;
104 kflags
= get_flags(flags
, ULOOP_READ
);
105 EV_SET(&ev
[nev
++], fd
->fd
, EVFILT_READ
, kflags
, 0, 0, fd
);
107 kflags
= get_flags(flags
, ULOOP_WRITE
);
108 EV_SET(&ev
[nev
++], fd
->fd
, EVFILT_WRITE
, kflags
, 0, 0, fd
);
113 if (kevent(poll_fd
, ev
, nev
, NULL
, fl
, &timeout
) == -1)
119 static int register_poll(struct uloop_fd
*fd
, unsigned int flags
)
121 if (flags
& ULOOP_EDGE_TRIGGER
)
122 flags
|= ULOOP_EDGE_DEFER
;
124 flags
&= ~ULOOP_EDGE_DEFER
;
127 return register_kevent(fd
, flags
);
130 int uloop_fd_delete(struct uloop_fd
*sock
)
134 for (i
= cur_fd
+ 1; i
< cur_nfds
; i
++) {
135 if (events
[i
].udata
!= sock
)
138 events
[i
].udata
= NULL
;
141 sock
->registered
= false;
142 return register_poll(sock
, 0);
145 static void uloop_run_events(int timeout
)
151 ts
.tv_sec
= timeout
/ 1000;
152 ts
.tv_nsec
= (timeout
% 1000) * 1000000;
155 nfds
= kevent(poll_fd
, NULL
, 0, events
, ARRAY_SIZE(events
), timeout
>= 0 ? &ts
: NULL
);
156 for(n
= 0; n
< nfds
; ++n
)
158 struct uloop_fd
*u
= events
[n
].udata
;
164 if (events
[n
].flags
& EV_ERROR
) {
169 if(events
[n
].filter
== EVFILT_READ
)
171 else if (events
[n
].filter
== EVFILT_WRITE
)
174 if (events
[n
].flags
& EV_EOF
)
183 if (u
->flags
& ULOOP_EDGE_DEFER
) {
184 u
->flags
&= ~ULOOP_EDGE_DEFER
;
185 register_kevent(u
, u
->flags
);
197 * FIXME: uClibc < 0.9.30.3 does not define EPOLLRDHUP for Linux >= 2.6.17
200 #define EPOLLRDHUP 0x2000
208 poll_fd
= epoll_create(32);
212 fcntl(poll_fd
, F_SETFD
, fcntl(poll_fd
, F_GETFD
) | FD_CLOEXEC
);
216 static int register_poll(struct uloop_fd
*fd
, unsigned int flags
)
218 struct epoll_event ev
;
219 int op
= fd
->registered
? EPOLL_CTL_MOD
: EPOLL_CTL_ADD
;
221 memset(&ev
, 0, sizeof(struct epoll_event
));
223 if (flags
& ULOOP_READ
)
224 ev
.events
|= EPOLLIN
| EPOLLRDHUP
;
226 if (flags
& ULOOP_WRITE
)
227 ev
.events
|= EPOLLOUT
;
229 if (flags
& ULOOP_EDGE_TRIGGER
)
230 ev
.events
|= EPOLLET
;
235 return epoll_ctl(poll_fd
, op
, fd
->fd
, &ev
);
238 static struct epoll_event events
[ULOOP_MAX_EVENTS
];
240 int uloop_fd_delete(struct uloop_fd
*sock
)
244 for (i
= cur_fd
+ 1; i
< cur_nfds
; i
++) {
245 if (events
[i
].data
.ptr
!= sock
)
248 events
[i
].data
.ptr
= NULL
;
250 sock
->registered
= false;
251 return epoll_ctl(poll_fd
, EPOLL_CTL_DEL
, sock
->fd
, 0);
254 static void uloop_run_events(int timeout
)
258 nfds
= epoll_wait(poll_fd
, events
, ARRAY_SIZE(events
), timeout
);
259 for(n
= 0; n
< nfds
; ++n
)
261 struct uloop_fd
*u
= events
[n
].data
.ptr
;
267 if(events
[n
].events
& (EPOLLERR
|EPOLLHUP
)) {
272 if(!(events
[n
].events
& (EPOLLRDHUP
|EPOLLIN
|EPOLLOUT
|EPOLLERR
|EPOLLHUP
)))
275 if(events
[n
].events
& EPOLLRDHUP
)
278 if(events
[n
].events
& EPOLLIN
)
281 if(events
[n
].events
& EPOLLOUT
)
295 int uloop_fd_add(struct uloop_fd
*sock
, unsigned int flags
)
300 if (!sock
->registered
&& !(flags
& ULOOP_BLOCKING
)) {
301 fl
= fcntl(sock
->fd
, F_GETFL
, 0);
303 fcntl(sock
->fd
, F_SETFL
, fl
);
306 ret
= register_poll(sock
, flags
);
310 sock
->registered
= true;
317 static int tv_diff(struct timeval
*t1
, struct timeval
*t2
)
320 (t1
->tv_sec
- t2
->tv_sec
) * 1000 +
321 (t1
->tv_usec
- t2
->tv_usec
) / 1000;
324 int uloop_timeout_add(struct uloop_timeout
*timeout
)
326 struct uloop_timeout
*tmp
;
327 struct list_head
*h
= &timeouts
;
329 if (timeout
->pending
)
332 list_for_each_entry(tmp
, &timeouts
, list
) {
333 if (tv_diff(&tmp
->time
, &timeout
->time
) > 0) {
339 list_add_tail(&timeout
->list
, h
);
340 timeout
->pending
= true;
345 int uloop_timeout_set(struct uloop_timeout
*timeout
, int msecs
)
347 struct timeval
*time
= &timeout
->time
;
349 if (timeout
->pending
)
350 uloop_timeout_cancel(timeout
);
352 gettimeofday(&timeout
->time
, NULL
);
354 time
->tv_sec
+= msecs
/ 1000;
355 time
->tv_usec
+= (msecs
% 1000) * 1000;
357 if (time
->tv_usec
> 1000000) {
359 time
->tv_usec
%= 1000000;
362 return uloop_timeout_add(timeout
);
365 int uloop_timeout_cancel(struct uloop_timeout
*timeout
)
367 if (!timeout
->pending
)
370 list_del(&timeout
->list
);
371 timeout
->pending
= false;
376 int uloop_process_add(struct uloop_process
*p
)
378 struct uloop_process
*tmp
;
379 struct list_head
*h
= &processes
;
384 list_for_each_entry(tmp
, &processes
, list
) {
385 if (tmp
->pid
> p
->pid
) {
391 list_add_tail(&p
->list
, h
);
397 int uloop_process_delete(struct uloop_process
*p
)
408 static void uloop_handle_processes(void)
410 struct uloop_process
*p
, *tmp
;
417 pid
= waitpid(-1, &ret
, WNOHANG
);
421 list_for_each_entry_safe(p
, tmp
, &processes
, list
) {
428 uloop_process_delete(p
);
435 static void uloop_handle_sigint(int signo
)
437 uloop_cancelled
= true;
440 static void uloop_sigchld(int signo
)
445 static void uloop_setup_signals(void)
449 memset(&s
, 0, sizeof(struct sigaction
));
450 s
.sa_handler
= uloop_handle_sigint
;
452 sigaction(SIGINT
, &s
, NULL
);
454 if (uloop_handle_sigchld
) {
455 s
.sa_handler
= uloop_sigchld
;
456 sigaction(SIGCHLD
, &s
, NULL
);
460 static int uloop_get_next_timeout(struct timeval
*tv
)
462 struct uloop_timeout
*timeout
;
465 if (list_empty(&timeouts
))
468 timeout
= list_first_entry(&timeouts
, struct uloop_timeout
, list
);
469 diff
= tv_diff(&timeout
->time
, tv
);
476 static void uloop_process_timeouts(struct timeval
*tv
)
478 struct uloop_timeout
*t
;
480 while (!list_empty(&timeouts
)) {
481 t
= list_first_entry(&timeouts
, struct uloop_timeout
, list
);
483 if (tv_diff(&t
->time
, tv
) > 0)
486 uloop_timeout_cancel(t
);
492 static void uloop_clear_timeouts(void)
494 struct uloop_timeout
*t
, *tmp
;
496 list_for_each_entry_safe(t
, tmp
, &timeouts
, list
)
497 uloop_timeout_cancel(t
);
500 static void uloop_clear_processes(void)
502 struct uloop_process
*p
, *tmp
;
504 list_for_each_entry_safe(p
, tmp
, &processes
, list
)
505 uloop_process_delete(p
);
512 uloop_setup_signals();
513 while(!uloop_cancelled
)
515 gettimeofday(&tv
, NULL
);
516 uloop_process_timeouts(&tv
);
521 uloop_handle_processes();
522 uloop_run_events(uloop_get_next_timeout(&tv
));
526 void uloop_done(void)
534 uloop_clear_timeouts();
535 uloop_clear_processes();