|
|
1 | /* |
|
|
2 | * libev epoll fd activity backend |
|
|
3 | * |
|
|
4 | * Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann <libev@schmorp.de> |
|
|
5 | * All rights reserved. |
|
|
6 | * |
|
|
7 | * Redistribution and use in source and binary forms, with or without modifica- |
|
|
8 | * tion, are permitted provided that the following conditions are met: |
|
|
9 | * |
|
|
10 | * 1. Redistributions of source code must retain the above copyright notice, |
|
|
11 | * this list of conditions and the following disclaimer. |
|
|
12 | * |
|
|
13 | * 2. Redistributions in binary form must reproduce the above copyright |
|
|
14 | * notice, this list of conditions and the following disclaimer in the |
|
|
15 | * documentation and/or other materials provided with the distribution. |
|
|
16 | * |
|
|
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
|
|
18 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- |
|
|
19 | * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO |
|
|
20 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- |
|
|
21 | * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
|
|
22 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; |
|
|
23 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
|
|
24 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- |
|
|
25 | * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED |
|
|
26 | * OF THE POSSIBILITY OF SUCH DAMAGE. |
|
|
27 | * |
|
|
28 | * Alternatively, the contents of this file may be used under the terms of |
|
|
29 | * the GNU General Public License ("GPL") version 2 or any later version, |
|
|
30 | * in which case the provisions of the GPL are applicable instead of |
|
|
31 | * the above. If you wish to allow the use of your version of this file |
|
|
32 | * only under the terms of the GPL and not to allow others to use your |
|
|
33 | * version of this file under the BSD license, indicate your decision |
|
|
34 | * by deleting the provisions above and replace them with the notice |
|
|
35 | * and other provisions required by the GPL. If you do not delete the |
|
|
36 | * provisions above, a recipient may use your version of this file under |
|
|
37 | * either the BSD or the GPL. |
|
|
38 | */ |
|
|
39 | |
|
|
40 | /* |
|
|
41 | * general notes about epoll: |
|
|
42 | * |
|
|
43 | * a) epoll silently removes fds from the fd set. as nothing tells us |
|
|
44 | * that an fd has been removed otherwise, we have to continually |
|
|
45 | * "rearm" fds that we suspect *might* have changed (same |
|
|
46 | * problem with kqueue, but much less costly there). |
|
|
47 | * b) the fact that ADD != MOD creates a lot of extra syscalls due to a) |
|
|
48 | * and seems not to have any advantage. |
|
|
49 | * c) the inability to handle fork or file descriptors (think dup) |
|
|
50 | * limits the applicability over poll, so this is not a generic |
|
|
51 | * poll replacement. |
|
|
52 | * d) epoll doesn't work the same as select with many file descriptors |
|
|
53 | * (such as files). while not critical, no other advanced interface |
|
|
54 | * seems to share this (rather non-unixy) limitation. |
|
|
55 | * e) epoll claims to be embeddable, but in practise you never get |
|
|
56 | * a ready event for the epoll fd (broken: <=2.6.26, working: >=2.6.32). |
|
|
57 | * f) epoll_ctl returning EPERM means the fd is always ready. |
|
|
58 | * |
|
|
59 | * lots of "weird code" and complication handling in this file is due |
|
|
60 | * to these design problems with epoll, as we try very hard to avoid |
|
|
61 | * epoll_ctl syscalls for common usage patterns and handle the breakage |
|
|
62 | * ensuing from receiving events for closed and otherwise long gone |
|
|
63 | * file descriptors. |
|
|
64 | */ |
|
|
65 | |
1 | #include <sys/epoll.h> |
66 | #include <sys/epoll.h> |
2 | |
67 | |
3 | static int epoll_fd = -1; |
68 | #define EV_EMASK_EPERM 0x80 |
4 | |
69 | |
5 | static void |
70 | static void |
6 | epoll_modify (int fd, int oev, int nev) |
71 | epoll_modify (EV_P_ int fd, int oev, int nev) |
7 | { |
72 | { |
8 | int mode = nev ? oev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD : EPOLL_CTL_DEL; |
|
|
9 | |
|
|
10 | struct epoll_event ev; |
73 | struct epoll_event ev; |
11 | ev.data.fd = fd; |
74 | unsigned char oldmask; |
12 | ev.events = |
75 | |
|
|
76 | /* |
|
|
77 | * we handle EPOLL_CTL_DEL by ignoring it here |
|
|
78 | * on the assumption that the fd is gone anyways |
|
|
79 | * if that is wrong, we have to handle the spurious |
|
|
80 | * event in epoll_poll. |
|
|
81 | * if the fd is added again, we try to ADD it, and, if that |
|
|
82 | * fails, we assume it still has the same eventmask. |
|
|
83 | */ |
|
|
84 | if (!nev) |
|
|
85 | return; |
|
|
86 | |
|
|
87 | oldmask = anfds [fd].emask; |
|
|
88 | anfds [fd].emask = nev; |
|
|
89 | |
|
|
90 | /* store the generation counter in the upper 32 bits, the fd in the lower 32 bits */ |
|
|
91 | ev.data.u64 = (uint64_t)(uint32_t)fd |
|
|
92 | | ((uint64_t)(uint32_t)++anfds [fd].egen << 32); |
13 | (nev & EV_READ ? EPOLLIN : 0) |
93 | ev.events = (nev & EV_READ ? EPOLLIN : 0) |
14 | | (nev & EV_WRITE ? EPOLLOUT : 0); |
94 | | (nev & EV_WRITE ? EPOLLOUT : 0); |
15 | |
95 | |
16 | epoll_ctl (epoll_fd, mode, fd, &ev); |
96 | if (expect_true (!epoll_ctl (backend_fd, oev && oldmask != nev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev))) |
17 | } |
97 | return; |
18 | |
98 | |
19 | void epoll_postfork_child (void) |
99 | if (expect_true (errno == ENOENT)) |
20 | { |
100 | { |
21 | int fd; |
101 | /* if ENOENT then the fd went away, so try to do the right thing */ |
|
|
102 | if (!nev) |
|
|
103 | goto dec_egen; |
22 | |
104 | |
23 | epoll_fd = epoll_create (256); |
105 | if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev)) |
24 | fcntl (epoll_fd, F_SETFD, FD_CLOEXEC); |
106 | return; |
|
|
107 | } |
|
|
108 | else if (expect_true (errno == EEXIST)) |
|
|
109 | { |
|
|
110 | /* EEXIST means we ignored a previous DEL, but the fd is still active */ |
|
|
111 | /* if the kernel mask is the same as the new mask, we assume it hasn't changed */ |
|
|
112 | if (oldmask == nev) |
|
|
113 | goto dec_egen; |
25 | |
114 | |
26 | /* re-register interest in fds */ |
115 | if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev)) |
27 | for (fd = 0; fd < anfdmax; ++fd) |
116 | return; |
28 | if (anfds [fd].wev) |
117 | } |
29 | epoll_modify (fd, EV_NONE, anfds [fd].wev); |
118 | else if (expect_true (errno == EPERM)) |
30 | } |
119 | { |
|
|
120 | /* EPERM means the fd is always ready, but epoll is too snobbish */ |
|
|
121 | /* to handle it, unlike select or poll. */ |
|
|
122 | anfds [fd].emask = EV_EMASK_EPERM; |
31 | |
123 | |
32 | static struct epoll_event *events; |
124 | /* add fd to epoll_eperms, if not already inside */ |
33 | static int eventmax; |
125 | if (!(oldmask & EV_EMASK_EPERM)) |
|
|
126 | { |
|
|
127 | array_needsize (int, epoll_eperms, epoll_epermmax, epoll_epermcnt + 1, EMPTY2); |
|
|
128 | epoll_eperms [epoll_epermcnt++] = fd; |
|
|
129 | } |
34 | |
130 | |
|
|
131 | return; |
|
|
132 | } |
|
|
133 | |
|
|
134 | fd_kill (EV_A_ fd); |
|
|
135 | |
|
|
136 | dec_egen: |
|
|
137 | /* we didn't successfully call epoll_ctl, so decrement the generation counter again */ |
|
|
138 | --anfds [fd].egen; |
|
|
139 | } |
|
|
140 | |
|
|
141 | static void |
35 | static void epoll_poll (ev_tstamp timeout) |
142 | epoll_poll (EV_P_ ev_tstamp timeout) |
36 | { |
143 | { |
37 | int eventcnt = epoll_wait (epoll_fd, events, eventmax, ceil (timeout * 1000.)); |
|
|
38 | int i; |
144 | int i; |
|
|
145 | int eventcnt; |
39 | |
146 | |
40 | if (eventcnt < 0) |
147 | /* epoll wait times cannot be larger than (LONG_MAX - 999UL) / HZ msecs, which is below */ |
|
|
148 | /* the default libev max wait time, however. */ |
|
|
149 | EV_RELEASE_CB; |
|
|
150 | eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, |
|
|
151 | epoll_epermcnt ? 0 : (int)ceil (timeout * 1000.)); |
|
|
152 | EV_ACQUIRE_CB; |
|
|
153 | |
|
|
154 | if (expect_false (eventcnt < 0)) |
|
|
155 | { |
|
|
156 | if (errno != EINTR) |
|
|
157 | ev_syserr ("(libev) epoll_wait"); |
|
|
158 | |
41 | return; |
159 | return; |
|
|
160 | } |
42 | |
161 | |
43 | for (i = 0; i < eventcnt; ++i) |
162 | for (i = 0; i < eventcnt; ++i) |
44 | fd_event ( |
163 | { |
45 | events [i].data.fd, |
164 | struct epoll_event *ev = epoll_events + i; |
|
|
165 | |
|
|
166 | int fd = (uint32_t)ev->data.u64; /* mask out the lower 32 bits */ |
|
|
167 | int want = anfds [fd].events; |
46 | (events [i].events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) |
168 | int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) |
47 | | (events [i].events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0) |
169 | | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0); |
48 | ); |
170 | |
|
|
171 | /* check for spurious notification */ |
|
|
172 | /* we assume that fd is always in range, as we never shrink the anfds array */ |
|
|
173 | if (expect_false ((uint32_t)anfds [fd].egen != (uint32_t)(ev->data.u64 >> 32))) |
|
|
174 | { |
|
|
175 | /* recreate kernel state */ |
|
|
176 | postfork = 1; |
|
|
177 | continue; |
|
|
178 | } |
|
|
179 | |
|
|
180 | if (expect_false (got & ~want)) |
|
|
181 | { |
|
|
182 | anfds [fd].emask = want; |
|
|
183 | |
|
|
184 | /* we received an event but are not interested in it, try mod or del */ |
|
|
185 | /* I don't think we ever need MOD, but let's handle it anyways */ |
|
|
186 | ev->events = (want & EV_READ ? EPOLLIN : 0) |
|
|
187 | | (want & EV_WRITE ? EPOLLOUT : 0); |
|
|
188 | |
|
|
189 | /* pre-2.6.9 kernels require a non-null pointer with EPOLL_CTL_DEL, */ |
|
|
190 | /* which is fortunately easy to do for us. */ |
|
|
191 | if (epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev)) |
|
|
192 | { |
|
|
193 | postfork = 1; /* an error occurred, recreate kernel state */ |
|
|
194 | continue; |
|
|
195 | } |
|
|
196 | } |
|
|
197 | |
|
|
198 | fd_event (EV_A_ fd, got); |
|
|
199 | } |
49 | |
200 | |
50 | /* if the receive array was full, increase its size */ |
201 | /* if the receive array was full, increase its size */ |
51 | if (eventcnt == eventmax) |
202 | if (expect_false (eventcnt == epoll_eventmax)) |
52 | { |
203 | { |
53 | free (events); |
204 | ev_free (epoll_events); |
54 | eventmax += eventmax >> 1; |
205 | epoll_eventmax = array_nextsize (sizeof (struct epoll_event), epoll_eventmax, epoll_eventmax + 1); |
55 | events = malloc (sizeof (struct epoll_event) * eventmax); |
206 | epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); |
|
|
207 | } |
|
|
208 | |
|
|
209 | /* now synthesize events for all fds where epoll fails, while select works... */ |
|
|
210 | for (i = epoll_epermcnt; i--; ) |
56 | } |
211 | { |
57 | } |
212 | int fd = epoll_eperms [i]; |
|
|
213 | unsigned char events = anfds [fd].events & (EV_READ | EV_WRITE); |
58 | |
214 | |
|
|
215 | if (anfds [fd].emask & EV_EMASK_EPERM && events) |
|
|
216 | fd_event (EV_A_ fd, events); |
|
|
217 | else |
|
|
218 | epoll_eperms [i] = epoll_eperms [--epoll_epermcnt]; |
|
|
219 | } |
|
|
220 | } |
|
|
221 | |
|
|
222 | int inline_size |
59 | void epoll_init (int flags) |
223 | epoll_init (EV_P_ int flags) |
60 | { |
224 | { |
|
|
225 | #ifdef EPOLL_CLOEXEC |
|
|
226 | backend_fd = epoll_create1 (EPOLL_CLOEXEC); |
|
|
227 | |
|
|
228 | if (backend_fd <= 0) |
|
|
229 | #endif |
61 | epoll_fd = epoll_create (256); |
230 | backend_fd = epoll_create (256); |
62 | |
231 | |
63 | if (epoll_fd < 0) |
232 | if (backend_fd < 0) |
64 | return; |
233 | return 0; |
65 | |
234 | |
66 | fcntl (epoll_fd, F_SETFD, FD_CLOEXEC); |
235 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); |
67 | |
236 | |
68 | ev_method = EVMETHOD_EPOLL; |
237 | backend_fudge = 0.; /* kernel sources seem to indicate this to be zero */ |
69 | method_fudge = 1e-3; /* needed to compensate for epoll returning early */ |
|
|
70 | method_modify = epoll_modify; |
238 | backend_modify = epoll_modify; |
71 | method_poll = epoll_poll; |
239 | backend_poll = epoll_poll; |
72 | |
240 | |
73 | eventmax = 64; /* intiial number of events receivable per poll */ |
241 | epoll_eventmax = 64; /* initial number of events receivable per poll */ |
74 | events = malloc (sizeof (struct epoll_event) * eventmax); |
242 | epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); |
|
|
243 | |
|
|
244 | return EVBACKEND_EPOLL; |
75 | } |
245 | } |
|
|
246 | |
|
|
247 | void inline_size |
|
|
248 | epoll_destroy (EV_P) |
|
|
249 | { |
|
|
250 | ev_free (epoll_events); |
|
|
251 | array_free (epoll_eperm, EMPTY); |
|
|
252 | } |
|
|
253 | |
|
|
254 | void inline_size |
|
|
255 | epoll_fork (EV_P) |
|
|
256 | { |
|
|
257 | close (backend_fd); |
|
|
258 | |
|
|
259 | while ((backend_fd = epoll_create (256)) < 0) |
|
|
260 | ev_syserr ("(libev) epoll_create"); |
|
|
261 | |
|
|
262 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); |
|
|
263 | |
|
|
264 | fd_rearm_all (EV_A); |
|
|
265 | } |
|
|
266 | |