1 | /* |
1 | /* |
2 | * libev epoll fd activity backend |
2 | * libev epoll fd activity backend |
3 | * |
3 | * |
4 | * Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann <libev@schmorp.de> |
4 | * Copyright (c) 2007,2008,2009,2010,2011,2016,2017,2019 Marc Alexander Lehmann <libev@schmorp.de> |
5 | * All rights reserved. |
5 | * All rights reserved. |
6 | * |
6 | * |
7 | * Redistribution and use in source and binary forms, with or without modifica- |
7 | * Redistribution and use in source and binary forms, with or without modifica- |
8 | * tion, are permitted provided that the following conditions are met: |
8 | * tion, are permitted provided that the following conditions are met: |
9 | * |
9 | * |
… | |
… | |
91 | ev.data.u64 = (uint64_t)(uint32_t)fd |
91 | ev.data.u64 = (uint64_t)(uint32_t)fd |
92 | | ((uint64_t)(uint32_t)++anfds [fd].egen << 32); |
92 | | ((uint64_t)(uint32_t)++anfds [fd].egen << 32); |
93 | ev.events = (nev & EV_READ ? EPOLLIN : 0) |
93 | ev.events = (nev & EV_READ ? EPOLLIN : 0) |
94 | | (nev & EV_WRITE ? EPOLLOUT : 0); |
94 | | (nev & EV_WRITE ? EPOLLOUT : 0); |
95 | |
95 | |
96 | if (expect_true (!epoll_ctl (backend_fd, oev && oldmask != nev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev))) |
96 | if (ecb_expect_true (!epoll_ctl (backend_fd, oev && oldmask != nev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev))) |
97 | return; |
97 | return; |
98 | |
98 | |
99 | if (expect_true (errno == ENOENT)) |
99 | if (ecb_expect_true (errno == ENOENT)) |
100 | { |
100 | { |
101 | /* if ENOENT then the fd went away, so try to do the right thing */ |
101 | /* if ENOENT then the fd went away, so try to do the right thing */ |
102 | if (!nev) |
102 | if (!nev) |
103 | goto dec_egen; |
103 | goto dec_egen; |
104 | |
104 | |
105 | if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev)) |
105 | if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev)) |
106 | return; |
106 | return; |
107 | } |
107 | } |
108 | else if (expect_true (errno == EEXIST)) |
108 | else if (ecb_expect_true (errno == EEXIST)) |
109 | { |
109 | { |
110 | /* EEXIST means we ignored a previous DEL, but the fd is still active */ |
110 | /* EEXIST means we ignored a previous DEL, but the fd is still active */ |
111 | /* if the kernel mask is the same as the new mask, we assume it hasn't changed */ |
111 | /* if the kernel mask is the same as the new mask, we assume it hasn't changed */ |
112 | if (oldmask == nev) |
112 | if (oldmask == nev) |
113 | goto dec_egen; |
113 | goto dec_egen; |
114 | |
114 | |
115 | if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev)) |
115 | if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev)) |
116 | return; |
116 | return; |
117 | } |
117 | } |
118 | else if (expect_true (errno == EPERM)) |
118 | else if (ecb_expect_true (errno == EPERM)) |
119 | { |
119 | { |
120 | /* EPERM means the fd is always ready, but epoll is too snobbish */ |
120 | /* EPERM means the fd is always ready, but epoll is too snobbish */ |
121 | /* to handle it, unlike select or poll. */ |
121 | /* to handle it, unlike select or poll. */ |
122 | anfds [fd].emask = EV_EMASK_EPERM; |
122 | anfds [fd].emask = EV_EMASK_EPERM; |
123 | |
123 | |
124 | /* add fd to epoll_eperms, if not already inside */ |
124 | /* add fd to epoll_eperms, if not already inside */ |
125 | if (!(oldmask & EV_EMASK_EPERM)) |
125 | if (!(oldmask & EV_EMASK_EPERM)) |
126 | { |
126 | { |
127 | array_needsize (int, epoll_eperms, epoll_epermmax, epoll_epermcnt + 1, EMPTY2); |
127 | array_needsize (int, epoll_eperms, epoll_epermmax, epoll_epermcnt + 1, array_needsize_noinit); |
128 | epoll_eperms [epoll_epermcnt++] = fd; |
128 | epoll_eperms [epoll_epermcnt++] = fd; |
129 | } |
129 | } |
130 | |
130 | |
131 | return; |
131 | return; |
132 | } |
132 | } |
|
|
133 | else |
|
|
134 | assert (("libev: I/O watcher with invalid fd found in epoll_ctl", errno != EBADF && errno != ELOOP && errno != EINVAL)); |
133 | |
135 | |
134 | fd_kill (EV_A_ fd); |
136 | fd_kill (EV_A_ fd); |
135 | |
137 | |
136 | dec_egen: |
138 | dec_egen: |
137 | /* we didn't successfully call epoll_ctl, so decrement the generation counter again */ |
139 | /* we didn't successfully call epoll_ctl, so decrement the generation counter again */ |
… | |
… | |
142 | epoll_poll (EV_P_ ev_tstamp timeout) |
144 | epoll_poll (EV_P_ ev_tstamp timeout) |
143 | { |
145 | { |
144 | int i; |
146 | int i; |
145 | int eventcnt; |
147 | int eventcnt; |
146 | |
148 | |
147 | if (expect_false (epoll_epermcnt)) |
149 | if (ecb_expect_false (epoll_epermcnt)) |
148 | timeout = 0.; |
150 | timeout = 0.; |
149 | |
151 | |
150 | /* epoll wait times cannot be larger than (LONG_MAX - 999UL) / HZ msecs, which is below */ |
152 | /* epoll wait times cannot be larger than (LONG_MAX - 999UL) / HZ msecs, which is below */ |
151 | /* the default libev max wait time, however. */ |
153 | /* the default libev max wait time, however. */ |
152 | EV_RELEASE_CB; |
154 | EV_RELEASE_CB; |
153 | eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, timeout * 1e3); |
155 | eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, timeout * 1e3); |
154 | EV_ACQUIRE_CB; |
156 | EV_ACQUIRE_CB; |
155 | |
157 | |
156 | if (expect_false (eventcnt < 0)) |
158 | if (ecb_expect_false (eventcnt < 0)) |
157 | { |
159 | { |
158 | if (errno != EINTR) |
160 | if (errno != EINTR) |
159 | ev_syserr ("(libev) epoll_wait"); |
161 | ev_syserr ("(libev) epoll_wait"); |
160 | |
162 | |
161 | return; |
163 | return; |
… | |
… | |
174 | * check for spurious notification. |
176 | * check for spurious notification. |
175 | * this only finds spurious notifications on egen updates |
177 | * this only finds spurious notifications on egen updates |
176 | * other spurious notifications will be found by epoll_ctl, below |
178 | * other spurious notifications will be found by epoll_ctl, below |
177 | * we assume that fd is always in range, as we never shrink the anfds array |
179 | * we assume that fd is always in range, as we never shrink the anfds array |
178 | */ |
180 | */ |
179 | if (expect_false ((uint32_t)anfds [fd].egen != (uint32_t)(ev->data.u64 >> 32))) |
181 | if (ecb_expect_false ((uint32_t)anfds [fd].egen != (uint32_t)(ev->data.u64 >> 32))) |
180 | { |
182 | { |
181 | /* recreate kernel state */ |
183 | /* recreate kernel state */ |
182 | postfork = 1; |
184 | postfork |= 2; |
183 | continue; |
185 | continue; |
184 | } |
186 | } |
185 | |
187 | |
186 | if (expect_false (got & ~want)) |
188 | if (ecb_expect_false (got & ~want)) |
187 | { |
189 | { |
188 | anfds [fd].emask = want; |
190 | anfds [fd].emask = want; |
189 | |
191 | |
190 | /* |
192 | /* |
191 | * we received an event but are not interested in it, try mod or del |
193 | * we received an event but are not interested in it, try mod or del |
… | |
… | |
201 | |
203 | |
202 | /* pre-2.6.9 kernels require a non-null pointer with EPOLL_CTL_DEL, */ |
204 | /* pre-2.6.9 kernels require a non-null pointer with EPOLL_CTL_DEL, */ |
203 | /* which is fortunately easy to do for us. */ |
205 | /* which is fortunately easy to do for us. */ |
204 | if (epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev)) |
206 | if (epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev)) |
205 | { |
207 | { |
206 | postfork = 1; /* an error occurred, recreate kernel state */ |
208 | postfork |= 2; /* an error occurred, recreate kernel state */ |
207 | continue; |
209 | continue; |
208 | } |
210 | } |
209 | } |
211 | } |
210 | |
212 | |
211 | fd_event (EV_A_ fd, got); |
213 | fd_event (EV_A_ fd, got); |
212 | } |
214 | } |
213 | |
215 | |
214 | /* if the receive array was full, increase its size */ |
216 | /* if the receive array was full, increase its size */ |
215 | if (expect_false (eventcnt == epoll_eventmax)) |
217 | if (ecb_expect_false (eventcnt == epoll_eventmax)) |
216 | { |
218 | { |
217 | ev_free (epoll_events); |
219 | ev_free (epoll_events); |
218 | epoll_eventmax = array_nextsize (sizeof (struct epoll_event), epoll_eventmax, epoll_eventmax + 1); |
220 | epoll_eventmax = array_nextsize (sizeof (struct epoll_event), epoll_eventmax, epoll_eventmax + 1); |
219 | epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); |
221 | epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); |
220 | } |
222 | } |
… | |
… | |
226 | unsigned char events = anfds [fd].events & (EV_READ | EV_WRITE); |
228 | unsigned char events = anfds [fd].events & (EV_READ | EV_WRITE); |
227 | |
229 | |
228 | if (anfds [fd].emask & EV_EMASK_EPERM && events) |
230 | if (anfds [fd].emask & EV_EMASK_EPERM && events) |
229 | fd_event (EV_A_ fd, events); |
231 | fd_event (EV_A_ fd, events); |
230 | else |
232 | else |
|
|
233 | { |
231 | epoll_eperms [i] = epoll_eperms [--epoll_epermcnt]; |
234 | epoll_eperms [i] = epoll_eperms [--epoll_epermcnt]; |
|
|
235 | anfds [fd].emask = 0; |
|
|
236 | } |
|
|
237 | } |
|
|
238 | } |
|
|
239 | |
|
|
240 | static int |
|
|
241 | epoll_epoll_create (void) |
|
|
242 | { |
|
|
243 | int fd; |
|
|
244 | |
|
|
245 | #if defined EPOLL_CLOEXEC && !defined __ANDROID__ |
|
|
246 | fd = epoll_create1 (EPOLL_CLOEXEC); |
|
|
247 | |
|
|
248 | if (fd < 0 && (errno == EINVAL || errno == ENOSYS)) |
|
|
249 | #endif |
232 | } |
250 | { |
233 | } |
251 | fd = epoll_create (256); |
234 | |
252 | |
|
|
253 | if (fd >= 0) |
|
|
254 | fcntl (fd, F_SETFD, FD_CLOEXEC); |
|
|
255 | } |
|
|
256 | |
|
|
257 | return fd; |
|
|
258 | } |
|
|
259 | |
235 | int inline_size |
260 | inline_size |
|
|
261 | int |
236 | epoll_init (EV_P_ int flags) |
262 | epoll_init (EV_P_ int flags) |
237 | { |
263 | { |
238 | #ifdef EPOLL_CLOEXEC |
264 | if ((backend_fd = epoll_epoll_create ()) < 0) |
239 | backend_fd = epoll_create1 (EPOLL_CLOEXEC); |
|
|
240 | |
|
|
241 | if (backend_fd <= 0) |
|
|
242 | #endif |
|
|
243 | backend_fd = epoll_create (256); |
|
|
244 | |
|
|
245 | if (backend_fd < 0) |
|
|
246 | return 0; |
265 | return 0; |
247 | |
266 | |
248 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); |
|
|
249 | |
|
|
250 | backend_mintime = 1./1024.; /* epoll does sometimes return early, this is just to avoid the worst */ |
267 | backend_mintime = 1e-3; /* epoll does sometimes return early, this is just to avoid the worst */ |
251 | backend_modify = epoll_modify; |
268 | backend_modify = epoll_modify; |
252 | backend_poll = epoll_poll; |
269 | backend_poll = epoll_poll; |
253 | |
270 | |
254 | epoll_eventmax = 64; /* initial number of events receivable per poll */ |
271 | epoll_eventmax = 64; /* initial number of events receivable per poll */ |
255 | epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); |
272 | epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax); |
256 | |
273 | |
257 | return EVBACKEND_EPOLL; |
274 | return EVBACKEND_EPOLL; |
258 | } |
275 | } |
259 | |
276 | |
260 | void inline_size |
277 | inline_size |
|
|
278 | void |
261 | epoll_destroy (EV_P) |
279 | epoll_destroy (EV_P) |
262 | { |
280 | { |
263 | ev_free (epoll_events); |
281 | ev_free (epoll_events); |
264 | array_free (epoll_eperm, EMPTY); |
282 | array_free (epoll_eperm, EMPTY); |
265 | } |
283 | } |
266 | |
284 | |
267 | void inline_size |
285 | inline_size |
|
|
286 | void |
268 | epoll_fork (EV_P) |
287 | epoll_fork (EV_P) |
269 | { |
288 | { |
270 | close (backend_fd); |
289 | close (backend_fd); |
271 | |
290 | |
272 | while ((backend_fd = epoll_create (256)) < 0) |
291 | while ((backend_fd = epoll_epoll_create ()) < 0) |
273 | ev_syserr ("(libev) epoll_create"); |
292 | ev_syserr ("(libev) epoll_create"); |
274 | |
293 | |
275 | fcntl (backend_fd, F_SETFD, FD_CLOEXEC); |
|
|
276 | |
|
|
277 | fd_rearm_all (EV_A); |
294 | fd_rearm_all (EV_A); |
278 | } |
295 | } |
279 | |
296 | |