--- libev/ev_epoll.c 2011/01/05 04:25:12 1.56 +++ libev/ev_epoll.c 2019/07/11 05:41:39 1.80 @@ -1,19 +1,19 @@ /* * libev epoll fd activity backend * - * Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann + * Copyright (c) 2007,2008,2009,2010,2011,2016,2017,2019 Marc Alexander Lehmann * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- * tion, are permitted provided that the following conditions are met: - * + * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. - * + * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * + * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO @@ -93,10 +93,10 @@ ev.events = (nev & EV_READ ? EPOLLIN : 0) | (nev & EV_WRITE ? EPOLLOUT : 0); - if (expect_true (!epoll_ctl (backend_fd, oev && oldmask != nev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev))) + if (ecb_expect_true (!epoll_ctl (backend_fd, oev && oldmask != nev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev))) return; - if (expect_true (errno == ENOENT)) + if (ecb_expect_true (errno == ENOENT)) { /* if ENOENT then the fd went away, so try to do the right thing */ if (!nev) @@ -105,7 +105,7 @@ if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev)) return; } - else if (expect_true (errno == EEXIST)) + else if (ecb_expect_true (errno == EEXIST)) { /* EEXIST means we ignored a previous DEL, but the fd is still active */ /* if the kernel mask is the same as the new mask, we assume it hasn't changed */ @@ -115,19 +115,23 @@ if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev)) return; } - else if (expect_true (errno == EPERM)) + else if (ecb_expect_true (errno == EPERM)) { + /* EPERM means the fd is always ready, but epoll is too snobbish */ + /* to handle it, unlike select or poll. */ anfds [fd].emask = EV_EMASK_EPERM; /* add fd to epoll_eperms, if not already inside */ if (!(oldmask & EV_EMASK_EPERM)) { - array_needsize (int, epoll_eperms, epoll_epermmax, epoll_epermcnt + 1, EMPTY2); + array_needsize (int, epoll_eperms, epoll_epermmax, epoll_epermcnt + 1, array_needsize_noinit); epoll_eperms [epoll_epermcnt++] = fd; } return; } + else + assert (("libev: I/O watcher with invalid fd found in epoll_ctl", errno != EBADF && errno != ELOOP && errno != EINVAL)); fd_kill (EV_A_ fd); @@ -141,14 +145,17 @@ { int i; int eventcnt; - + + if (ecb_expect_false (epoll_epermcnt)) + timeout = 0.; + /* epoll wait times cannot be larger than (LONG_MAX - 999UL) / HZ msecs, which is below */ /* the default libev max wait time, however. */ EV_RELEASE_CB; - eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, (int)ceil (timeout * 1000.)); + eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, EV_TS_TO_MS (timeout)); EV_ACQUIRE_CB; - if (expect_false (eventcnt < 0)) + if (ecb_expect_false (eventcnt < 0)) { if (errno != EINTR) ev_syserr ("(libev) epoll_wait"); @@ -165,21 +172,34 @@ int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0); - /* check for spurious notification */ - /* we assume that fd is always in range, as we never shrink the anfds array */ - if (expect_false ((uint32_t)anfds [fd].egen != (uint32_t)(ev->data.u64 >> 32))) + /* + * check for spurious notification. + * this only finds spurious notifications on egen updates + * other spurious notifications will be found by epoll_ctl, below + * we assume that fd is always in range, as we never shrink the anfds array + */ + if (ecb_expect_false ((uint32_t)anfds [fd].egen != (uint32_t)(ev->data.u64 >> 32))) { /* recreate kernel state */ - postfork = 1; + postfork |= 2; continue; } - if (expect_false (got & ~want)) + if (ecb_expect_false (got & ~want)) { anfds [fd].emask = want; - /* we received an event but are not interested in it, try mod or del */ - /* I don't think we ever need MOD, but let's handle it anyways */ + /* + * we received an event but are not interested in it, try mod or del + * this often happens because we optimistically do not unregister fds + * when we are no longer interested in them, but also when we get spurious + * notifications for fds from another process. this is partially handled + * above with the gencounter check (== our fd is not the event fd), and + * partially here, when epoll_ctl returns an error (== a child has the fd + * but we closed it). + * note: for events such as POLLHUP, where we can't know whether it refers + * to EV_READ or EV_WRITE, we might issue redundant EPOLL_CTL_MOD calls. + */ ev->events = (want & EV_READ ? EPOLLIN : 0) | (want & EV_WRITE ? EPOLLOUT : 0); @@ -187,7 +207,7 @@ /* which is fortunately easy to do for us. */ if (epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev)) { - postfork = 1; /* an error occurred, recreate kernel state */ + postfork |= 2; /* an error occurred, recreate kernel state */ continue; } } @@ -196,7 +216,7 @@ } /* if the receive array was full, increase its size */ - if (expect_false (eventcnt == epoll_eventmax)) + if (ecb_expect_false (eventcnt == epoll_eventmax)) { ev_free (epoll_events); epoll_eventmax = array_nextsize (sizeof (struct epoll_event), epoll_eventmax, epoll_eventmax + 1); @@ -212,28 +232,43 @@ if (anfds [fd].emask & EV_EMASK_EPERM && events) fd_event (EV_A_ fd, events); else - epoll_eperms [i] = epoll_eperms [--epoll_epermcnt]; + { + epoll_eperms [i] = epoll_eperms [--epoll_epermcnt]; + anfds [fd].emask = 0; + } } } -int inline_size -epoll_init (EV_P_ int flags) +static int +epoll_epoll_create (void) { -#ifdef EPOLL_CLOEXEC - backend_fd = epoll_create1 (EPOLL_CLOEXEC); + int fd; - if (backend_fd <= 0) +#if defined EPOLL_CLOEXEC && !defined __ANDROID__ + fd = epoll_create1 (EPOLL_CLOEXEC); + + if (fd < 0 && (errno == EINVAL || errno == ENOSYS)) #endif - backend_fd = epoll_create (256); + { + fd = epoll_create (256); - if (backend_fd < 0) - return 0; + if (fd >= 0) + fcntl (fd, F_SETFD, FD_CLOEXEC); + } - fcntl (backend_fd, F_SETFD, FD_CLOEXEC); + return fd; +} - backend_fudge = 0.; /* kernel sources seem to indicate this to be zero */ - backend_modify = epoll_modify; - backend_poll = epoll_poll; +inline_size +int +epoll_init (EV_P_ int flags) +{ + if ((backend_fd = epoll_epoll_create ()) < 0) + return 0; + + backend_mintime = 1e-3; /* epoll does sometimes return early, this is just to avoid the worst */ + backend_modify = epoll_modify; + backend_poll = epoll_poll; epoll_eventmax = 64; /* initial number of events receivable per poll */ epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); @@ -241,23 +276,23 @@ return EVBACKEND_EPOLL; } -void inline_size +inline_size +void epoll_destroy (EV_P) { ev_free (epoll_events); array_free (epoll_eperm, EMPTY); } -void inline_size +ecb_cold +static void epoll_fork (EV_P) { close (backend_fd); - while ((backend_fd = epoll_create (256)) < 0) + while ((backend_fd = epoll_epoll_create ()) < 0) ev_syserr ("(libev) epoll_create"); - fcntl (backend_fd, F_SETFD, FD_CLOEXEC); - fd_rearm_all (EV_A); }