… | |
… | |
27 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
27 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
28 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
28 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
29 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
29 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
30 | */ |
30 | */ |
31 | |
31 | |
|
|
32 | /* |
|
|
33 | * general notes about epoll: |
|
|
34 | * |
|
|
35 | * a) epoll silently removes fds from the fd set. as nothing tells us |
|
|
36 | * that an fd has been removed otherwise, we have to continually |
|
|
37 | * "rearm" fds that we suspect *might* have changed (same |
|
|
38 | * problem with kqueue, but much less costly there). |
|
|
39 | * b) the fact that ADD != MOD creates a lot of extra syscalls due to a) |
|
|
40 | * and seems not to have any advantage. |
|
|
41 | * c) the inability to handle fork or file descriptors (think dup) |
|
|
42 | * limits the applicability over poll, so this is not a generic |
|
|
43 | * poll replacement. |
|
|
44 | * |
|
|
45 | * lots of "weird code" and complication handling in this file is due |
|
|
46 | * to these design problems with epoll, as we try very hard to avoid |
|
|
47 | * epoll_ctl syscalls for common usage patterns. |
|
|
48 | */ |
|
|
49 | |
32 | #include <sys/epoll.h> |
50 | #include <sys/epoll.h> |
33 | |
51 | |
34 | static void |
52 | static void |
35 | epoll_modify (EV_P_ int fd, int oev, int nev) |
53 | epoll_modify (EV_P_ int fd, int oev, int nev) |
36 | { |
54 | { |
37 | int mode = nev ? oev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD : EPOLL_CTL_DEL; |
55 | struct epoll_event ev; |
38 | |
56 | |
39 | struct epoll_event ev; |
57 | /* |
|
|
58 | * we handle EPOLL_CTL_DEL by ignoring it here |
|
|
59 | * on the assumption that the fd is gone anyways |
|
|
60 | * if that is wrong, we have to handle the spurious |
|
|
61 | * event in epoll_poll. |
|
|
62 | */ |
|
|
63 | if (!nev) |
|
|
64 | return; |
|
|
65 | |
40 | ev.data.u64 = fd; /* use u64 to fully initialise the struct, for nicer strace etc. */ |
66 | ev.data.u64 = fd; /* use u64 to fully initialise the struct, for nicer strace etc. */ |
41 | ev.events = |
|
|
42 | (nev & EV_READ ? EPOLLIN : 0) |
67 | ev.events = (nev & EV_READ ? EPOLLIN : 0) |
43 | | (nev & EV_WRITE ? EPOLLOUT : 0); |
68 | | (nev & EV_WRITE ? EPOLLOUT : 0); |
44 | |
69 | |
45 | if (expect_false (epoll_ctl (backend_fd, mode, fd, &ev))) |
70 | if (expect_true (!epoll_ctl (backend_fd, oev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev))) |
|
|
71 | return; |
|
|
72 | |
|
|
73 | if (expect_true (errno == ENOENT)) |
|
|
74 | { |
46 | if (errno != ENOENT /* on ENOENT the fd went away, so try to do the right thing */ |
75 | /* on ENOENT the fd went away, so try to do the right thing */ |
|
|
76 | if (!nev) |
|
|
77 | return; |
|
|
78 | |
47 | || (nev && epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev))) |
79 | if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev)) |
|
|
80 | return; |
|
|
81 | } |
|
|
82 | else if (expect_true (errno == EEXIST)) |
|
|
83 | { |
|
|
84 | /* on EEXIST we ignored a previous DEL */ |
|
|
85 | if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev)) |
|
|
86 | return; |
|
|
87 | } |
|
|
88 | |
48 | fd_kill (EV_A_ fd); |
89 | fd_kill (EV_A_ fd); |
49 | } |
90 | } |
50 | |
91 | |
51 | static void |
92 | static void |
52 | epoll_poll (EV_P_ ev_tstamp timeout) |
93 | epoll_poll (EV_P_ ev_tstamp timeout) |
53 | { |
94 | { |
… | |
… | |
61 | |
102 | |
62 | return; |
103 | return; |
63 | } |
104 | } |
64 | |
105 | |
65 | for (i = 0; i < eventcnt; ++i) |
106 | for (i = 0; i < eventcnt; ++i) |
66 | fd_event ( |
107 | { |
67 | EV_A_ |
108 | struct epoll_event *ev = epoll_events + i; |
68 | epoll_events [i].data.u64, |
109 | |
|
|
110 | int fd = ev->data.u64; |
69 | (epoll_events [i].events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) |
111 | int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) |
70 | | (epoll_events [i].events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0) |
112 | | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0); |
71 | ); |
113 | int want = anfds [fd].events; |
|
|
114 | |
|
|
115 | if (expect_false (got & ~want)) |
|
|
116 | { |
|
|
117 | /* we received an event but are not interested in it, try mod or del */ |
|
|
118 | ev->events = (want & EV_READ ? EPOLLIN : 0) |
|
|
119 | | (want & EV_WRITE ? EPOLLOUT : 0); |
|
|
120 | |
|
|
121 | epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev); |
|
|
122 | } |
|
|
123 | |
|
|
124 | fd_event (EV_A_ fd, got); |
|
|
125 | } |
72 | |
126 | |
73 | /* if the receive array was full, increase its size */ |
127 | /* if the receive array was full, increase its size */ |
74 | if (expect_false (eventcnt == epoll_eventmax)) |
128 | if (expect_false (eventcnt == epoll_eventmax)) |
75 | { |
129 | { |
76 | ev_free (epoll_events); |
130 | ev_free (epoll_events); |
… | |
… | |
87 | if (backend_fd < 0) |
141 | if (backend_fd < 0) |
88 | return 0; |
142 | return 0; |
89 | |
143 | |
90 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); |
144 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); |
91 | |
145 | |
92 | backend_fudge = 2e-4; /* needed to compensate for epoll returning early */ |
146 | backend_fudge = 0.; /* kernel sources seem to indicate this to be zero */ |
93 | backend_modify = epoll_modify; |
147 | backend_modify = epoll_modify; |
94 | backend_poll = epoll_poll; |
148 | backend_poll = epoll_poll; |
95 | |
149 | |
96 | epoll_eventmax = 64; /* intiial number of events receivable per poll */ |
150 | epoll_eventmax = 64; /* intiial number of events receivable per poll */ |
97 | epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); |
151 | epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); |