/*
- * Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2010 Steven Barth <steven@midlink.org>
+ * uloop - event loop implementation
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * Copyright (C) 2010-2013 Felix Fietkau <nbd@openwrt.org>
*
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
*
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-
#include <sys/time.h>
#include <sys/types.h>
-#include <sys/epoll.h>
#include <unistd.h>
#include <stdio.h>
#include <poll.h>
#include <string.h>
#include <fcntl.h>
-#include <signal.h>
#include <stdbool.h>
#include "uloop.h"
+#include "utils.h"
+
+#ifdef USE_KQUEUE
+#include <sys/event.h>
+#endif
+#ifdef USE_EPOLL
+#include <sys/epoll.h>
+#endif
+#include <sys/wait.h>
+
+struct uloop_fd_event {
+ struct uloop_fd *fd;
+ unsigned int events;
+};
+
+struct uloop_fd_stack {
+ struct uloop_fd_stack *next;
+ struct uloop_fd *fd;
+ unsigned int events;
+};
+
+static struct uloop_fd_stack *fd_stack = NULL;
+
+#define ULOOP_MAX_EVENTS 10
+
+static struct list_head timeouts = LIST_HEAD_INIT(timeouts);
+static struct list_head processes = LIST_HEAD_INIT(processes);
+
+static int poll_fd = -1;
+bool uloop_cancelled = false;
+bool uloop_handle_sigchld = true;
+static bool do_sigchld = false;
+
+static struct uloop_fd_event cur_fds[ULOOP_MAX_EVENTS];
+static int cur_fd, cur_nfds;
+
+#ifdef USE_KQUEUE
+
+int uloop_init(void)
+{
+ struct timespec timeout = { 0, 0 };
+ struct kevent ev = {};
+
+ if (poll_fd >= 0)
+ return 0;
+
+ poll_fd = kqueue();
+ if (poll_fd < 0)
+ return -1;
+
+ EV_SET(&ev, SIGCHLD, EVFILT_SIGNAL, EV_ADD, 0, 0, 0);
+ kevent(poll_fd, &ev, 1, NULL, 0, &timeout);
+
+ return 0;
+}
+
+
+static uint16_t get_flags(unsigned int flags, unsigned int mask)
+{
+ uint16_t kflags = 0;
+
+ if (!(flags & mask))
+ return EV_DELETE;
+
+ kflags = EV_ADD;
+ if (flags & ULOOP_EDGE_TRIGGER)
+ kflags |= EV_CLEAR;
+
+ return kflags;
+}
+
+static struct kevent events[ULOOP_MAX_EVENTS];
+
+static int register_kevent(struct uloop_fd *fd, unsigned int flags)
+{
+ struct timespec timeout = { 0, 0 };
+ struct kevent ev[2];
+ int nev = 0;
+ unsigned int fl = 0;
+ unsigned int changed;
+ uint16_t kflags;
+
+ if (flags & ULOOP_EDGE_DEFER)
+ flags &= ~ULOOP_EDGE_TRIGGER;
+
+ changed = flags ^ fd->flags;
+ if (changed & ULOOP_EDGE_TRIGGER)
+ changed |= flags;
+
+ if (changed & ULOOP_READ) {
+ kflags = get_flags(flags, ULOOP_READ);
+ EV_SET(&ev[nev++], fd->fd, EVFILT_READ, kflags, 0, 0, fd);
+ }
+
+ if (changed & ULOOP_WRITE) {
+ kflags = get_flags(flags, ULOOP_WRITE);
+ EV_SET(&ev[nev++], fd->fd, EVFILT_WRITE, kflags, 0, 0, fd);
+ }
+
+ if (!flags)
+ fl |= EV_DELETE;
+
+ fd->flags = flags;
+ if (kevent(poll_fd, ev, nev, NULL, fl, &timeout) == -1)
+ return -1;
+
+ return 0;
+}
+
+static int register_poll(struct uloop_fd *fd, unsigned int flags)
+{
+ if (flags & ULOOP_EDGE_TRIGGER)
+ flags |= ULOOP_EDGE_DEFER;
+ else
+ flags &= ~ULOOP_EDGE_DEFER;
+
+ return register_kevent(fd, flags);
+}
+
+static int __uloop_fd_delete(struct uloop_fd *fd)
+{
+ return register_poll(fd, 0);
+}
+
+static int uloop_fetch_events(int timeout)
+{
+ struct timespec ts;
+ int nfds, n;
+
+ if (timeout >= 0) {
+ ts.tv_sec = timeout / 1000;
+ ts.tv_nsec = (timeout % 1000) * 1000000;
+ }
+
+ nfds = kevent(poll_fd, NULL, 0, events, ARRAY_SIZE(events), timeout >= 0 ? &ts : NULL);
+ for (n = 0; n < nfds; n++) {
+ struct uloop_fd_event *cur = &cur_fds[n];
+ struct uloop_fd *u = events[n].udata;
+ unsigned int ev = 0;
+
+ cur->fd = u;
+ if (!u)
+ continue;
+
+ if (events[n].flags & EV_ERROR) {
+ u->error = true;
+ uloop_fd_delete(u);
+ }
+
+ if(events[n].filter == EVFILT_READ)
+ ev |= ULOOP_READ;
+ else if (events[n].filter == EVFILT_WRITE)
+ ev |= ULOOP_WRITE;
+
+ if (events[n].flags & EV_EOF)
+ u->eof = true;
+ else if (!ev)
+ cur->fd = NULL;
+
+ cur->events = ev;
+ if (u->flags & ULOOP_EDGE_DEFER) {
+ u->flags &= ~ULOOP_EDGE_DEFER;
+ u->flags |= ULOOP_EDGE_TRIGGER;
+ register_kevent(u, u->flags);
+ }
+ }
+ return nfds;
+}
+
+#endif
+
+#ifdef USE_EPOLL
/**
* FIXME: uClibc < 0.9.30.3 does not define EPOLLRDHUP for Linux >= 2.6.17
#define EPOLLRDHUP 0x2000
#endif
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
-#endif
+int uloop_init(void)
+{
+ if (poll_fd >= 0)
+ return 0;
-struct uloop_timeout *first_timeout;
-static int epoll_fd;
-static bool cancel;
+ poll_fd = epoll_create(32);
+ if (poll_fd < 0)
+ return -1;
-int uloop_fd_add(struct uloop_fd *sock, unsigned int flags)
+ fcntl(poll_fd, F_SETFD, fcntl(poll_fd, F_GETFD) | FD_CLOEXEC);
+ return 0;
+}
+
+static int register_poll(struct uloop_fd *fd, unsigned int flags)
{
struct epoll_event ev;
- int op = sock->registered ? EPOLL_CTL_MOD : EPOLL_CTL_ADD;
- unsigned int fl;
- int ret;
-
- if (!sock->registered) {
- fl = fcntl(sock->fd, F_GETFL, 0);
- fl |= O_NONBLOCK;
- fcntl(sock->fd, F_SETFL, fl);
- }
+ int op = fd->registered ? EPOLL_CTL_MOD : EPOLL_CTL_ADD;
memset(&ev, 0, sizeof(struct epoll_event));
if (flags & ULOOP_EDGE_TRIGGER)
ev.events |= EPOLLET;
- ev.data.fd = sock->fd;
- ev.data.ptr = sock;
+ ev.data.fd = fd->fd;
+ ev.data.ptr = fd;
+ fd->flags = flags;
+
+ return epoll_ctl(poll_fd, op, fd->fd, &ev);
+}
+
+static struct epoll_event events[ULOOP_MAX_EVENTS];
+
+static int __uloop_fd_delete(struct uloop_fd *sock)
+{
+ sock->flags = 0;
+ return epoll_ctl(poll_fd, EPOLL_CTL_DEL, sock->fd, 0);
+}
+
+static int uloop_fetch_events(int timeout)
+{
+ int n, nfds;
+
+ nfds = epoll_wait(poll_fd, events, ARRAY_SIZE(events), timeout);
+ for (n = 0; n < nfds; ++n) {
+ struct uloop_fd_event *cur = &cur_fds[n];
+ struct uloop_fd *u = events[n].data.ptr;
+ unsigned int ev = 0;
+
+ cur->fd = u;
+ if (!u)
+ continue;
+
+ if (events[n].events & (EPOLLERR|EPOLLHUP)) {
+ u->error = true;
+ uloop_fd_delete(u);
+ }
+
+ if(!(events[n].events & (EPOLLRDHUP|EPOLLIN|EPOLLOUT|EPOLLERR|EPOLLHUP))) {
+ cur->fd = NULL;
+ continue;
+ }
+
+ if(events[n].events & EPOLLRDHUP)
+ u->eof = true;
+
+ if(events[n].events & EPOLLIN)
+ ev |= ULOOP_READ;
+
+ if(events[n].events & EPOLLOUT)
+ ev |= ULOOP_WRITE;
+
+ cur->events = ev;
+ }
+
+ return nfds;
+}
+
+#endif
+
+static bool uloop_fd_stack_event(struct uloop_fd *fd, int events)
+{
+ struct uloop_fd_stack *cur;
+
+ /*
+ * Do not buffer events for level-triggered fds, they will keep firing.
+ * Caller needs to take care of recursion issues.
+ */
+ if (!(fd->flags & ULOOP_EDGE_TRIGGER))
+ return false;
- ret = epoll_ctl(epoll_fd, op, sock->fd, &ev);
+ for (cur = fd_stack; cur; cur = cur->next) {
+ if (cur->fd != fd)
+ continue;
+
+ if (events < 0)
+ cur->fd = NULL;
+ else
+ cur->events |= events | ULOOP_EVENT_BUFFERED;
+
+ return true;
+ }
+
+ return false;
+}
+
+static void uloop_run_events(int timeout)
+{
+ struct uloop_fd_event *cur;
+ struct uloop_fd *fd;
+
+ if (!cur_nfds) {
+ cur_fd = 0;
+ cur_nfds = uloop_fetch_events(timeout);
+ if (cur_nfds < 0)
+ cur_nfds = 0;
+ }
+
+ while (cur_nfds > 0) {
+ struct uloop_fd_stack stack_cur;
+ unsigned int events;
+
+ cur = &cur_fds[cur_fd++];
+ cur_nfds--;
+
+ fd = cur->fd;
+ events = cur->events;
+ if (!fd)
+ continue;
+
+ if (!fd->cb)
+ continue;
+
+ if (uloop_fd_stack_event(fd, cur->events))
+ continue;
+
+ stack_cur.next = fd_stack;
+ stack_cur.fd = fd;
+ fd_stack = &stack_cur;
+ do {
+ stack_cur.events = 0;
+ fd->cb(fd, events);
+ events = stack_cur.events & ULOOP_EVENT_MASK;
+ } while (stack_cur.fd && events);
+ fd_stack = stack_cur.next;
+
+ return;
+ }
+}
+
+int uloop_fd_add(struct uloop_fd *sock, unsigned int flags)
+{
+ unsigned int fl;
+ int ret;
+
+ if (!(flags & (ULOOP_READ | ULOOP_WRITE)))
+ return uloop_fd_delete(sock);
+
+ if (!sock->registered && !(flags & ULOOP_BLOCKING)) {
+ fl = fcntl(sock->fd, F_GETFL, 0);
+ fl |= O_NONBLOCK;
+ fcntl(sock->fd, F_SETFL, fl);
+ }
+
+ ret = register_poll(sock, flags);
if (ret < 0)
goto out;
return ret;
}
-int uloop_fd_delete(struct uloop_fd *sock)
+int uloop_fd_delete(struct uloop_fd *fd)
{
- sock->registered = false;
- return epoll_ctl(epoll_fd, EPOLL_CTL_DEL, sock->fd, 0);
+ int i;
+
+ for (i = 0; i < cur_nfds; i++) {
+ if (cur_fds[cur_fd + i].fd != fd)
+ continue;
+
+ cur_fds[cur_fd + i].fd = NULL;
+ }
+
+ if (!fd->registered)
+ return 0;
+
+ fd->registered = false;
+ uloop_fd_stack_event(fd, -1);
+ return __uloop_fd_delete(fd);
}
static int tv_diff(struct timeval *t1, struct timeval *t2)
{
- if (t1->tv_sec != t2->tv_sec)
- return (t1->tv_sec - t2->tv_sec) * 1000;
- else
- return (t1->tv_usec - t2->tv_usec) / 1000;
+ return
+ (t1->tv_sec - t2->tv_sec) * 1000 +
+ (t1->tv_usec - t2->tv_usec) / 1000;
}
int uloop_timeout_add(struct uloop_timeout *timeout)
{
- struct uloop_timeout **head = &first_timeout;
- struct uloop_timeout *prev = NULL;
+ struct uloop_timeout *tmp;
+ struct list_head *h = &timeouts;
if (timeout->pending)
return -1;
- while (*head) {
- if (tv_diff(&(*head)->time, &timeout->time) > 0)
+ list_for_each_entry(tmp, &timeouts, list) {
+ if (tv_diff(&tmp->time, &timeout->time) > 0) {
+ h = &tmp->list;
break;
-
- prev = *head;
- head = &(*head)->next;
+ }
}
- timeout->prev = prev;
- timeout->next = *head;
- if (timeout->next)
- timeout->next->prev = timeout;
- *head = timeout;
+ list_add_tail(&timeout->list, h);
timeout->pending = true;
return 0;
}
+static void uloop_gettime(struct timeval *tv)
+{
+ struct timespec ts;
+
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ tv->tv_sec = ts.tv_sec;
+ tv->tv_usec = ts.tv_nsec / 1000;
+}
+
int uloop_timeout_set(struct uloop_timeout *timeout, int msecs)
{
struct timeval *time = &timeout->time;
if (timeout->pending)
uloop_timeout_cancel(timeout);
- gettimeofday(&timeout->time, NULL);
+ uloop_gettime(&timeout->time);
time->tv_sec += msecs / 1000;
- time->tv_usec += msecs % 1000;
+ time->tv_usec += (msecs % 1000) * 1000;
if (time->tv_usec > 1000000) {
time->tv_sec++;
- time->tv_usec %= 100000;
+ time->tv_usec %= 1000000;
}
return uloop_timeout_add(timeout);
if (!timeout->pending)
return -1;
- if (timeout->prev)
- timeout->prev->next = timeout->next;
- else
- first_timeout = timeout->next;
+ list_del(&timeout->list);
+ timeout->pending = false;
- if (timeout->next)
- timeout->next->prev = timeout->prev;
+ return 0;
+}
- timeout->pending = false;
+int uloop_timeout_remaining(struct uloop_timeout *timeout)
+{
+ struct timeval now;
+
+ if (!timeout->pending)
+ return -1;
+
+ uloop_gettime(&now);
+
+ return tv_diff(&timeout->time, &now);
+}
+
+int uloop_process_add(struct uloop_process *p)
+{
+ struct uloop_process *tmp;
+ struct list_head *h = &processes;
+
+ if (p->pending)
+ return -1;
+
+ list_for_each_entry(tmp, &processes, list) {
+ if (tmp->pid > p->pid) {
+ h = &tmp->list;
+ break;
+ }
+ }
+
+ list_add_tail(&p->list, h);
+ p->pending = true;
+
+ return 0;
+}
+
+int uloop_process_delete(struct uloop_process *p)
+{
+ if (!p->pending)
+ return -1;
+
+ list_del(&p->list);
+ p->pending = false;
return 0;
}
+static void uloop_handle_processes(void)
+{
+ struct uloop_process *p, *tmp;
+ pid_t pid;
+ int ret;
+
+ do_sigchld = false;
+
+ while (1) {
+ pid = waitpid(-1, &ret, WNOHANG);
+ if (pid <= 0)
+ return;
+
+ list_for_each_entry_safe(p, tmp, &processes, list) {
+ if (p->pid < pid)
+ continue;
+
+ if (p->pid > pid)
+ break;
+
+ uloop_process_delete(p);
+ p->cb(p, ret);
+ }
+ }
+
+}
+
static void uloop_handle_sigint(int signo)
{
- cancel = true;
+ uloop_cancelled = true;
+}
+
+static void uloop_sigchld(int signo)
+{
+ do_sigchld = true;
}
-static void uloop_setup_signals(void)
+static void uloop_setup_signals(bool add)
{
+ static struct sigaction old_sigint, old_sigchld;
struct sigaction s;
+
memset(&s, 0, sizeof(struct sigaction));
- s.sa_handler = uloop_handle_sigint;
- s.sa_flags = 0;
- sigaction(SIGINT, &s, NULL);
+
+ if (add) {
+ s.sa_handler = uloop_handle_sigint;
+ s.sa_flags = 0;
+ } else {
+ s = old_sigint;
+ }
+
+ sigaction(SIGINT, &s, &old_sigint);
+
+ if (!uloop_handle_sigchld)
+ return;
+
+ if (add)
+ s.sa_handler = uloop_sigchld;
+ else
+ s = old_sigchld;
+
+ sigaction(SIGCHLD, &s, &old_sigchld);
}
static int uloop_get_next_timeout(struct timeval *tv)
{
+ struct uloop_timeout *timeout;
int diff;
- if (!first_timeout)
+ if (list_empty(&timeouts))
return -1;
- diff = tv_diff(&first_timeout->time, tv);
+ timeout = list_first_entry(&timeouts, struct uloop_timeout, list);
+ diff = tv_diff(&timeout->time, tv);
if (diff < 0)
return 0;
static void uloop_process_timeouts(struct timeval *tv)
{
- struct uloop_timeout *timeout;
+ struct uloop_timeout *t;
- while (first_timeout) {
- if (tv_diff(&first_timeout->time, tv) > 0)
+ while (!list_empty(&timeouts)) {
+ t = list_first_entry(&timeouts, struct uloop_timeout, list);
+
+ if (tv_diff(&t->time, tv) > 0)
break;
- timeout = first_timeout;
- uloop_timeout_cancel(timeout);
- if (timeout->cb)
- timeout->cb(timeout);
+ uloop_timeout_cancel(t);
+ if (t->cb)
+ t->cb(t);
}
}
-void uloop_end(void)
+static void uloop_clear_timeouts(void)
{
- cancel = true;
+ struct uloop_timeout *t, *tmp;
+
+ list_for_each_entry_safe(t, tmp, &timeouts, list)
+ uloop_timeout_cancel(t);
}
-int uloop_init(void)
+static void uloop_clear_processes(void)
{
- epoll_fd = epoll_create(32);
- if (epoll_fd < 0)
- return -1;
+ struct uloop_process *p, *tmp;
- fcntl(epoll_fd, F_SETFD, fcntl(epoll_fd, F_GETFD) | FD_CLOEXEC);
- return 0;
+ list_for_each_entry_safe(p, tmp, &processes, list)
+ uloop_process_delete(p);
}
void uloop_run(void)
{
- struct epoll_event events[10];
+ static int recursive_calls = 0;
struct timeval tv;
- int timeout;
- int nfds, n;
- uloop_setup_signals();
- while(!cancel)
+ /*
+ * Handlers are only updated for the first call to uloop_run() (and restored
+ * when this call is done).
+ */
+ if (!recursive_calls++)
+ uloop_setup_signals(true);
+
+ while(!uloop_cancelled)
{
- gettimeofday(&tv, NULL);
+ uloop_gettime(&tv);
uloop_process_timeouts(&tv);
- timeout = uloop_get_next_timeout(&tv);
- nfds = epoll_wait(epoll_fd, events, ARRAY_SIZE(events), timeout);
- for(n = 0; n < nfds; ++n)
- {
- struct uloop_fd *u = events[n].data.ptr;
- unsigned int ev = 0;
-
- if(events[n].events & EPOLLERR) {
- u->error = true;
- uloop_fd_delete(u);
- }
-
- if(!(events[n].events & (EPOLLRDHUP|EPOLLIN|EPOLLOUT|EPOLLERR)))
- continue;
-
- if(events[n].events & EPOLLRDHUP)
- u->eof = true;
-
- if(events[n].events & EPOLLIN)
- ev |= ULOOP_READ;
-
- if(events[n].events & EPOLLOUT)
- ev |= ULOOP_WRITE;
+ if (uloop_cancelled)
+ break;
- if(u->cb)
- u->cb(u, ev);
- }
+ if (do_sigchld)
+ uloop_handle_processes();
+ uloop_gettime(&tv);
+ uloop_run_events(uloop_get_next_timeout(&tv));
}
+
+ if (!--recursive_calls)
+ uloop_setup_signals(false);
}
void uloop_done(void)
{
- close(epoll_fd);
+ if (poll_fd < 0)
+ return;
+
+ close(poll_fd);
+ poll_fd = -1;
+
+ uloop_clear_timeouts();
+ uloop_clear_processes();
}