1 | /* |
1 | /* |
2 | * libev epoll fd activity backend |
2 | * libev epoll fd activity backend |
3 | * |
3 | * |
4 | * Copyright (c) 2007 Marc Alexander Lehmann <libev@schmorp.de> |
4 | * Copyright (c) 2007,2008,2009,2010,2011,2016,2017,2019 Marc Alexander Lehmann <libev@schmorp.de> |
5 | * All rights reserved. |
5 | * All rights reserved. |
6 | * |
6 | * |
7 | * Redistribution and use in source and binary forms, with or without modifica- |
7 | * Redistribution and use in source and binary forms, with or without modifica- |
8 | * tion, are permitted provided that the following conditions are met: |
8 | * tion, are permitted provided that the following conditions are met: |
9 | * |
9 | * |
10 | * 1. Redistributions of source code must retain the above copyright notice, |
10 | * 1. Redistributions of source code must retain the above copyright notice, |
11 | * this list of conditions and the following disclaimer. |
11 | * this list of conditions and the following disclaimer. |
12 | * |
12 | * |
13 | * 2. Redistributions in binary form must reproduce the above copyright |
13 | * 2. Redistributions in binary form must reproduce the above copyright |
14 | * notice, this list of conditions and the following disclaimer in the |
14 | * notice, this list of conditions and the following disclaimer in the |
15 | * documentation and/or other materials provided with the distribution. |
15 | * documentation and/or other materials provided with the distribution. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
18 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- |
18 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- |
19 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO |
19 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO |
20 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- |
20 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- |
21 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
21 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
… | |
… | |
47 | * b) the fact that ADD != MOD creates a lot of extra syscalls due to a) |
47 | * b) the fact that ADD != MOD creates a lot of extra syscalls due to a) |
48 | * and seems not to have any advantage. |
48 | * and seems not to have any advantage. |
49 | * c) the inability to handle fork or file descriptors (think dup) |
49 | * c) the inability to handle fork or file descriptors (think dup) |
50 | * limits the applicability over poll, so this is not a generic |
50 | * limits the applicability over poll, so this is not a generic |
51 | * poll replacement. |
51 | * poll replacement. |
|
|
52 | * d) epoll doesn't work the same as select with many file descriptors |
|
|
53 | * (such as files). while not critical, no other advanced interface |
|
|
54 | * seems to share this (rather non-unixy) limitation. |
|
|
55 | * e) epoll claims to be embeddable, but in practise you never get |
|
|
56 | * a ready event for the epoll fd (broken: <=2.6.26, working: >=2.6.32). |
|
|
57 | * f) epoll_ctl returning EPERM means the fd is always ready. |
52 | * |
58 | * |
53 | * lots of "weird code" and complication handling in this file is due |
59 | * lots of "weird code" and complication handling in this file is due |
54 | * to these design problems with epoll, as we try very hard to avoid |
60 | * to these design problems with epoll, as we try very hard to avoid |
55 | * epoll_ctl syscalls for common usage patterns. |
61 | * epoll_ctl syscalls for common usage patterns and handle the breakage |
|
|
62 | * ensuing from receiving events for closed and otherwise long gone |
|
|
63 | * file descriptors. |
56 | */ |
64 | */ |
57 | |
65 | |
58 | #include <sys/epoll.h> |
66 | #include <sys/epoll.h> |
|
|
67 | |
|
|
68 | #define EV_EMASK_EPERM 0x80 |
59 | |
69 | |
60 | static void |
70 | static void |
61 | epoll_modify (EV_P_ int fd, int oev, int nev) |
71 | epoll_modify (EV_P_ int fd, int oev, int nev) |
62 | { |
72 | { |
63 | struct epoll_event ev; |
73 | struct epoll_event ev; |
|
|
74 | unsigned char oldmask; |
64 | |
75 | |
65 | /* |
76 | /* |
66 | * we handle EPOLL_CTL_DEL by ignoring it here |
77 | * we handle EPOLL_CTL_DEL by ignoring it here |
67 | * on the assumption that the fd is gone anyways |
78 | * on the assumption that the fd is gone anyways |
68 | * if that is wrong, we have to handle the spurious |
79 | * if that is wrong, we have to handle the spurious |
69 | * event in epoll_poll. |
80 | * event in epoll_poll. |
|
|
81 | * if the fd is added again, we try to ADD it, and, if that |
|
|
82 | * fails, we assume it still has the same eventmask. |
70 | */ |
83 | */ |
71 | if (!nev) |
84 | if (!nev) |
72 | return; |
85 | return; |
73 | |
86 | |
74 | ev.data.u64 = fd; /* use u64 to fully initialise the struct, for nicer strace etc. */ |
87 | oldmask = anfds [fd].emask; |
|
|
88 | anfds [fd].emask = nev; |
|
|
89 | |
|
|
90 | /* store the generation counter in the upper 32 bits, the fd in the lower 32 bits */ |
|
|
91 | ev.data.u64 = (uint64_t)(uint32_t)fd |
|
|
92 | | ((uint64_t)(uint32_t)++anfds [fd].egen << 32); |
75 | ev.events = (nev & EV_READ ? EPOLLIN : 0) |
93 | ev.events = (nev & EV_READ ? EPOLLIN : 0) |
76 | | (nev & EV_WRITE ? EPOLLOUT : 0); |
94 | | (nev & EV_WRITE ? EPOLLOUT : 0); |
77 | |
95 | |
78 | if (expect_true (!epoll_ctl (backend_fd, oev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev))) |
96 | if (ecb_expect_true (!epoll_ctl (backend_fd, oev && oldmask != nev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev))) |
79 | return; |
97 | return; |
80 | |
98 | |
81 | if (expect_true (errno == ENOENT)) |
99 | if (ecb_expect_true (errno == ENOENT)) |
82 | { |
100 | { |
83 | /* on ENOENT the fd went away, so try to do the right thing */ |
101 | /* if ENOENT then the fd went away, so try to do the right thing */ |
84 | if (!nev) |
102 | if (!nev) |
85 | return; |
103 | goto dec_egen; |
86 | |
104 | |
87 | if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev)) |
105 | if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev)) |
88 | return; |
106 | return; |
89 | } |
107 | } |
90 | else if (expect_true (errno == EEXIST)) |
108 | else if (ecb_expect_true (errno == EEXIST)) |
91 | { |
109 | { |
92 | /* on EEXIST we ignored a previous DEL */ |
110 | /* EEXIST means we ignored a previous DEL, but the fd is still active */ |
|
|
111 | /* if the kernel mask is the same as the new mask, we assume it hasn't changed */ |
|
|
112 | if (oldmask == nev) |
|
|
113 | goto dec_egen; |
|
|
114 | |
93 | if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev)) |
115 | if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev)) |
94 | return; |
116 | return; |
95 | } |
117 | } |
|
|
118 | else if (ecb_expect_true (errno == EPERM)) |
|
|
119 | { |
|
|
120 | /* EPERM means the fd is always ready, but epoll is too snobbish */ |
|
|
121 | /* to handle it, unlike select or poll. */ |
|
|
122 | anfds [fd].emask = EV_EMASK_EPERM; |
|
|
123 | |
|
|
124 | /* add fd to epoll_eperms, if not already inside */ |
|
|
125 | if (!(oldmask & EV_EMASK_EPERM)) |
|
|
126 | { |
|
|
127 | array_needsize (int, epoll_eperms, epoll_epermmax, epoll_epermcnt + 1, array_needsize_noinit); |
|
|
128 | epoll_eperms [epoll_epermcnt++] = fd; |
|
|
129 | } |
|
|
130 | |
|
|
131 | return; |
|
|
132 | } |
|
|
133 | else |
|
|
134 | assert (("libev: I/O watcher with invalid fd found in epoll_ctl", errno != EBADF && errno != ELOOP && errno != EINVAL)); |
96 | |
135 | |
97 | fd_kill (EV_A_ fd); |
136 | fd_kill (EV_A_ fd); |
|
|
137 | |
|
|
138 | dec_egen: |
|
|
139 | /* we didn't successfully call epoll_ctl, so decrement the generation counter again */ |
|
|
140 | --anfds [fd].egen; |
98 | } |
141 | } |
99 | |
142 | |
100 | static void |
143 | static void |
101 | epoll_poll (EV_P_ ev_tstamp timeout) |
144 | epoll_poll (EV_P_ ev_tstamp timeout) |
102 | { |
145 | { |
103 | int i; |
146 | int i; |
|
|
147 | int eventcnt; |
|
|
148 | |
|
|
149 | if (ecb_expect_false (epoll_epermcnt)) |
|
|
150 | timeout = 0.; |
|
|
151 | |
|
|
152 | /* epoll wait times cannot be larger than (LONG_MAX - 999UL) / HZ msecs, which is below */ |
|
|
153 | /* the default libev max wait time, however. */ |
|
|
154 | EV_RELEASE_CB; |
104 | int eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, (int)ceil (timeout * 1000.)); |
155 | eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, EV_TS_TO_MS (timeout)); |
|
|
156 | EV_ACQUIRE_CB; |
105 | |
157 | |
106 | if (expect_false (eventcnt < 0)) |
158 | if (ecb_expect_false (eventcnt < 0)) |
107 | { |
159 | { |
108 | if (errno != EINTR) |
160 | if (errno != EINTR) |
109 | syserr ("(libev) epoll_wait"); |
161 | ev_syserr ("(libev) epoll_wait"); |
110 | |
162 | |
111 | return; |
163 | return; |
112 | } |
164 | } |
113 | |
165 | |
114 | for (i = 0; i < eventcnt; ++i) |
166 | for (i = 0; i < eventcnt; ++i) |
115 | { |
167 | { |
116 | struct epoll_event *ev = epoll_events + i; |
168 | struct epoll_event *ev = epoll_events + i; |
117 | |
169 | |
118 | int fd = ev->data.u64; |
170 | int fd = (uint32_t)ev->data.u64; /* mask out the lower 32 bits */ |
|
|
171 | int want = anfds [fd].events; |
119 | int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) |
172 | int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) |
120 | | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0); |
173 | | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0); |
121 | int want = anfds [fd].events; |
|
|
122 | |
174 | |
|
|
175 | /* |
|
|
176 | * check for spurious notification. |
|
|
177 | * this only finds spurious notifications on egen updates |
|
|
178 | * other spurious notifications will be found by epoll_ctl, below |
|
|
179 | * we assume that fd is always in range, as we never shrink the anfds array |
|
|
180 | */ |
|
|
181 | if (ecb_expect_false ((uint32_t)anfds [fd].egen != (uint32_t)(ev->data.u64 >> 32))) |
|
|
182 | { |
|
|
183 | /* recreate kernel state */ |
|
|
184 | postfork |= 2; |
|
|
185 | continue; |
|
|
186 | } |
|
|
187 | |
123 | if (expect_false (got & ~want)) |
188 | if (ecb_expect_false (got & ~want)) |
|
|
189 | { |
|
|
190 | anfds [fd].emask = want; |
|
|
191 | |
124 | { |
192 | /* |
125 | /* we received an event but are not interested in it, try mod or del */ |
193 | * we received an event but are not interested in it, try mod or del |
|
|
194 | * this often happens because we optimistically do not unregister fds |
|
|
195 | * when we are no longer interested in them, but also when we get spurious |
|
|
196 | * notifications for fds from another process. this is partially handled |
|
|
197 | * above with the gencounter check (== our fd is not the event fd), and |
|
|
198 | * partially here, when epoll_ctl returns an error (== a child has the fd |
|
|
199 | * but we closed it). |
|
|
200 | * note: for events such as POLLHUP, where we can't know whether it refers |
|
|
201 | * to EV_READ or EV_WRITE, we might issue redundant EPOLL_CTL_MOD calls. |
|
|
202 | */ |
126 | ev->events = (want & EV_READ ? EPOLLIN : 0) |
203 | ev->events = (want & EV_READ ? EPOLLIN : 0) |
127 | | (want & EV_WRITE ? EPOLLOUT : 0); |
204 | | (want & EV_WRITE ? EPOLLOUT : 0); |
128 | |
205 | |
|
|
206 | /* pre-2.6.9 kernels require a non-null pointer with EPOLL_CTL_DEL, */ |
|
|
207 | /* which is fortunately easy to do for us. */ |
129 | epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev); |
208 | if (epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev)) |
|
|
209 | { |
|
|
210 | postfork |= 2; /* an error occurred, recreate kernel state */ |
|
|
211 | continue; |
|
|
212 | } |
130 | } |
213 | } |
131 | |
214 | |
132 | fd_event (EV_A_ fd, got); |
215 | fd_event (EV_A_ fd, got); |
133 | } |
216 | } |
134 | |
217 | |
135 | /* if the receive array was full, increase its size */ |
218 | /* if the receive array was full, increase its size */ |
136 | if (expect_false (eventcnt == epoll_eventmax)) |
219 | if (ecb_expect_false (eventcnt == epoll_eventmax)) |
137 | { |
220 | { |
138 | ev_free (epoll_events); |
221 | ev_free (epoll_events); |
139 | epoll_eventmax = array_nextsize (sizeof (struct epoll_event), epoll_eventmax, epoll_eventmax + 1); |
222 | epoll_eventmax = array_nextsize (sizeof (struct epoll_event), epoll_eventmax, epoll_eventmax + 1); |
140 | epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); |
223 | epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); |
141 | } |
224 | } |
142 | } |
|
|
143 | |
225 | |
|
|
226 | /* now synthesize events for all fds where epoll fails, while select works... */ |
|
|
227 | for (i = epoll_epermcnt; i--; ) |
|
|
228 | { |
|
|
229 | int fd = epoll_eperms [i]; |
|
|
230 | unsigned char events = anfds [fd].events & (EV_READ | EV_WRITE); |
|
|
231 | |
|
|
232 | if (anfds [fd].emask & EV_EMASK_EPERM && events) |
|
|
233 | fd_event (EV_A_ fd, events); |
|
|
234 | else |
|
|
235 | { |
|
|
236 | epoll_eperms [i] = epoll_eperms [--epoll_epermcnt]; |
|
|
237 | anfds [fd].emask = 0; |
|
|
238 | } |
|
|
239 | } |
|
|
240 | } |
|
|
241 | |
|
|
242 | static int |
|
|
243 | epoll_epoll_create (void) |
|
|
244 | { |
|
|
245 | int fd; |
|
|
246 | |
|
|
247 | #if defined EPOLL_CLOEXEC && !defined __ANDROID__ |
|
|
248 | fd = epoll_create1 (EPOLL_CLOEXEC); |
|
|
249 | |
|
|
250 | if (fd < 0 && (errno == EINVAL || errno == ENOSYS)) |
|
|
251 | #endif |
|
|
252 | { |
|
|
253 | fd = epoll_create (256); |
|
|
254 | |
|
|
255 | if (fd >= 0) |
|
|
256 | fcntl (fd, F_SETFD, FD_CLOEXEC); |
|
|
257 | } |
|
|
258 | |
|
|
259 | return fd; |
|
|
260 | } |
|
|
261 | |
144 | int inline_size |
262 | inline_size |
|
|
263 | int |
145 | epoll_init (EV_P_ int flags) |
264 | epoll_init (EV_P_ int flags) |
146 | { |
265 | { |
147 | backend_fd = epoll_create (256); |
266 | if ((backend_fd = epoll_epoll_create ()) < 0) |
148 | |
|
|
149 | if (backend_fd < 0) |
|
|
150 | return 0; |
267 | return 0; |
151 | |
268 | |
152 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); |
269 | backend_mintime = 1e-3; /* epoll does sometimes return early, this is just to avoid the worst */ |
153 | |
|
|
154 | backend_fudge = 0.; /* kernel sources seem to indicate this to be zero */ |
|
|
155 | backend_modify = epoll_modify; |
270 | backend_modify = epoll_modify; |
156 | backend_poll = epoll_poll; |
271 | backend_poll = epoll_poll; |
157 | |
272 | |
158 | epoll_eventmax = 64; /* intiial number of events receivable per poll */ |
273 | epoll_eventmax = 64; /* initial number of events receivable per poll */ |
159 | epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); |
274 | epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); |
160 | |
275 | |
161 | return EVBACKEND_EPOLL; |
276 | return EVBACKEND_EPOLL; |
162 | } |
277 | } |
163 | |
278 | |
164 | void inline_size |
279 | inline_size |
|
|
280 | void |
165 | epoll_destroy (EV_P) |
281 | epoll_destroy (EV_P) |
166 | { |
282 | { |
167 | ev_free (epoll_events); |
283 | ev_free (epoll_events); |
|
|
284 | array_free (epoll_eperm, EMPTY); |
168 | } |
285 | } |
169 | |
286 | |
170 | void inline_size |
287 | ecb_cold |
|
|
288 | static void |
171 | epoll_fork (EV_P) |
289 | epoll_fork (EV_P) |
172 | { |
290 | { |
173 | close (backend_fd); |
291 | close (backend_fd); |
174 | |
292 | |
175 | while ((backend_fd = epoll_create (256)) < 0) |
293 | while ((backend_fd = epoll_epoll_create ()) < 0) |
176 | syserr ("(libev) epoll_create"); |
294 | ev_syserr ("(libev) epoll_create"); |
177 | |
|
|
178 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); |
|
|
179 | |
295 | |
180 | fd_rearm_all (EV_A); |
296 | fd_rearm_all (EV_A); |
181 | } |
297 | } |
182 | |
298 | |