unsigned int events;
};
+struct uloop_fd_stack {
+ struct uloop_fd_stack *next;
+ struct uloop_fd *fd;
+ unsigned int events;
+};
+
+static struct uloop_fd_stack *fd_stack = NULL;
+
#define ULOOP_MAX_EVENTS 10
static struct list_head timeouts = LIST_HEAD_INIT(timeouts);
else if (!ev)
cur->fd = NULL;
+ cur->events = ev;
if (u->flags & ULOOP_EDGE_DEFER) {
u->flags &= ~ULOOP_EDGE_DEFER;
+ u->flags |= ULOOP_EDGE_TRIGGER;
register_kevent(u, u->flags);
}
}
ev.data.fd = fd->fd;
ev.data.ptr = fd;
+ fd->flags = flags;
return epoll_ctl(poll_fd, op, fd->fd, &ev);
}
static int __uloop_fd_delete(struct uloop_fd *sock)
{
+ sock->flags = 0;
return epoll_ctl(poll_fd, EPOLL_CTL_DEL, sock->fd, 0);
}
#endif
+static bool uloop_fd_stack_event(struct uloop_fd *fd, int events)
+{
+ struct uloop_fd_stack *cur;
+
+ /*
+ * Do not buffer events for level-triggered fds, they will keep firing.
+ * Caller needs to take care of recursion issues.
+ */
+ if (!(fd->flags & ULOOP_EDGE_TRIGGER))
+ return false;
+
+ for (cur = fd_stack; cur; cur = cur->next) {
+ if (cur->fd != fd)
+ continue;
+
+ if (events < 0)
+ cur->fd = NULL;
+ else
+ cur->events |= events | ULOOP_EVENT_BUFFERED;
+
+ return true;
+ }
+
+ return false;
+}
+
static void uloop_run_events(int timeout)
{
struct uloop_fd_event *cur;
}
while (cur_nfds > 0) {
+ struct uloop_fd_stack stack_cur;
+ unsigned int events;
+
cur = &cur_fds[cur_fd++];
cur_nfds--;
fd = cur->fd;
+ events = cur->events;
if (!fd)
continue;
if (!fd->cb)
continue;
- fd->cb(fd, cur->events);
+ if (uloop_fd_stack_event(fd, cur->events))
+ continue;
+
+ stack_cur.next = fd_stack;
+ stack_cur.fd = fd;
+ fd_stack = &stack_cur;
+ do {
+ stack_cur.events = 0;
+ fd->cb(fd, events);
+ events = stack_cur.events & ULOOP_EVENT_MASK;
+ } while (stack_cur.fd && events);
+ fd_stack = stack_cur.next;
+
return;
}
}
{
int i;
- if (!fd->registered)
- return 0;
-
for (i = 0; i < cur_nfds; i++) {
if (cur_fds[cur_fd + i].fd != fd)
continue;
cur_fds[cur_fd + i].fd = NULL;
}
+
+ if (!fd->registered)
+ return 0;
+
fd->registered = false;
+ uloop_fd_stack_event(fd, -1);
return __uloop_fd_delete(fd);
}
do_sigchld = true;
}
-static void uloop_setup_signals(void)
+static void uloop_setup_signals(bool add)
{
+ static struct sigaction old_sigint, old_sigchld;
struct sigaction s;
memset(&s, 0, sizeof(struct sigaction));
- s.sa_handler = uloop_handle_sigint;
- s.sa_flags = 0;
- sigaction(SIGINT, &s, NULL);
- if (uloop_handle_sigchld) {
- s.sa_handler = uloop_sigchld;
- sigaction(SIGCHLD, &s, NULL);
+ if (add) {
+ s.sa_handler = uloop_handle_sigint;
+ s.sa_flags = 0;
+ } else {
+ s = old_sigint;
}
+
+ sigaction(SIGINT, &s, &old_sigint);
+
+ if (!uloop_handle_sigchld)
+ return;
+
+ if (add)
+ s.sa_handler = uloop_sigchld;
+ else
+ s = old_sigchld;
+
+ sigaction(SIGCHLD, &s, &old_sigchld);
}
static int uloop_get_next_timeout(struct timeval *tv)
void uloop_run(void)
{
+ static int recursive_calls = 0;
struct timeval tv;
- uloop_setup_signals();
+ /*
+ * Handlers are only updated for the first call to uloop_run() (and restored
+ * when this call is done).
+ */
+ if (!recursive_calls++)
+ uloop_setup_signals(true);
+
while(!uloop_cancelled)
{
uloop_gettime(&tv);
if (do_sigchld)
uloop_handle_processes();
+ uloop_gettime(&tv);
uloop_run_events(uloop_get_next_timeout(&tv));
}
+
+ if (!--recursive_calls)
+ uloop_setup_signals(false);
}
void uloop_done(void)