… | |
… | |
35 | * and other provisions required by the GPL. If you do not delete the |
35 | * and other provisions required by the GPL. If you do not delete the |
36 | * provisions above, a recipient may use your version of this file under |
36 | * provisions above, a recipient may use your version of this file under |
37 | * either the BSD or the GPL. |
37 | * either the BSD or the GPL. |
38 | */ |
38 | */ |
39 | |
39 | |
|
|
40 | #define EPOLL_FALLBACK 1 |
|
|
41 | |
40 | #include <sys/time.h> /* actually linux/time.h, but we must assume they are compatible */ |
42 | #include <sys/time.h> /* actually linux/time.h, but we must assume they are compatible */ |
41 | #include <poll.h> |
43 | #include <poll.h> |
42 | #include <linux/aio_abi.h> |
44 | #include <linux/aio_abi.h> |
43 | |
45 | |
|
|
46 | #if EPOLL_FALLBACK |
|
|
47 | # include <sys/epoll.h> |
|
|
48 | #endif |
|
|
49 | |
44 | /* we try to fill 4kB pages exactly. |
50 | /* we try to fill 4kB pages exactly. |
45 | * the ring buffer header is 32 bytes, every io event is 32 bytes. |
51 | * the ring buffer header is 32 bytes, every io event is 32 bytes. |
46 | * the kernel takes the io event number, doubles it, adds 2, adds the ring buffer. |
52 | * the kernel takes the io event number, doubles it, adds 2, adds the ring buffer. |
47 | * therefore the calculation below will use "exactly" 8kB for the ring buffer |
53 | * therefore the calculation below will use "exactly" 4kB for the ring buffer |
48 | */ |
54 | */ |
49 | #define EV_LINUXAIO_DEPTH (256 / 2 - 2 - 1) /* max. number of io events per batch */ |
55 | #define EV_LINUXAIO_DEPTH (128 / 2 - 2 - 1) /* max. number of io events per batch */ |
50 | |
56 | |
51 | /*****************************************************************************/ |
57 | /*****************************************************************************/ |
52 | /* syscall wrapdadoop */ |
58 | /* syscall wrapdadoop - this section has the raw syscall definitions */ |
53 | |
59 | |
54 | #include <sys/syscall.h> /* no glibc wrappers */ |
60 | #include <sys/syscall.h> /* no glibc wrappers */ |
55 | |
61 | |
56 | /* aio_abi.h is not versioned in any way, so we cannot test for its existance */ |
62 | /* aio_abi.h is not versioned in any way, so we cannot test for its existance */ |
57 | #define IOCB_CMD_POLL 5 |
63 | #define IOCB_CMD_POLL 5 |
… | |
… | |
74 | struct io_event io_events[0]; |
80 | struct io_event io_events[0]; |
75 | }; |
81 | }; |
76 | |
82 | |
77 | inline_size |
83 | inline_size |
78 | int |
84 | int |
79 | ev_io_setup (unsigned nr_events, aio_context_t *ctx_idp) |
85 | evsys_io_setup (unsigned nr_events, aio_context_t *ctx_idp) |
80 | { |
86 | { |
81 | return syscall (SYS_io_setup, nr_events, ctx_idp); |
87 | return syscall (SYS_io_setup, nr_events, ctx_idp); |
82 | } |
88 | } |
83 | |
89 | |
84 | inline_size |
90 | inline_size |
85 | int |
91 | int |
86 | ev_io_destroy (aio_context_t ctx_id) |
92 | evsys_io_destroy (aio_context_t ctx_id) |
87 | { |
93 | { |
88 | return syscall (SYS_io_destroy, ctx_id); |
94 | return syscall (SYS_io_destroy, ctx_id); |
89 | } |
95 | } |
90 | |
96 | |
91 | inline_size |
97 | inline_size |
92 | int |
98 | int |
93 | ev_io_submit (aio_context_t ctx_id, long nr, struct iocb *cbp[]) |
99 | evsys_io_submit (aio_context_t ctx_id, long nr, struct iocb *cbp[]) |
94 | { |
100 | { |
95 | return syscall (SYS_io_submit, ctx_id, nr, cbp); |
101 | return syscall (SYS_io_submit, ctx_id, nr, cbp); |
96 | } |
102 | } |
97 | |
103 | |
98 | inline_size |
104 | inline_size |
99 | int |
105 | int |
100 | ev_io_cancel (aio_context_t ctx_id, struct iocb *cbp, struct io_event *result) |
106 | evsys_io_cancel (aio_context_t ctx_id, struct iocb *cbp, struct io_event *result) |
101 | { |
107 | { |
102 | return syscall (SYS_io_cancel, ctx_id, cbp, result); |
108 | return syscall (SYS_io_cancel, ctx_id, cbp, result); |
103 | } |
109 | } |
104 | |
110 | |
105 | inline_size |
111 | inline_size |
106 | int |
112 | int |
107 | ev_io_getevents (aio_context_t ctx_id, long min_nr, long nr, struct io_event *events, struct timespec *timeout) |
113 | evsys_io_getevents (aio_context_t ctx_id, long min_nr, long nr, struct io_event *events, struct timespec *timeout) |
108 | { |
114 | { |
109 | return syscall (SYS_io_getevents, ctx_id, min_nr, nr, events, timeout); |
115 | return syscall (SYS_io_getevents, ctx_id, min_nr, nr, events, timeout); |
110 | } |
116 | } |
111 | |
117 | |
112 | /*****************************************************************************/ |
118 | /*****************************************************************************/ |
… | |
… | |
119 | /*int inuse;*/ |
125 | /*int inuse;*/ |
120 | } *ANIOCBP; |
126 | } *ANIOCBP; |
121 | |
127 | |
122 | inline_size |
128 | inline_size |
123 | void |
129 | void |
124 | linuxaio_array_needsize_iocbp (ANIOCBP *base, int count) |
130 | linuxaio_array_needsize_iocbp (ANIOCBP *base, int offset, int count) |
125 | { |
131 | { |
126 | /* TODO: quite the overhead to allocate every iocb separately, maybe use our own alocator? */ |
|
|
127 | while (count--) |
132 | while (count--) |
128 | { |
133 | { |
|
|
134 | /* TODO: quite the overhead to allocate every iocb separately, maybe use our own alocator? */ |
129 | *base = (ANIOCBP)ev_malloc (sizeof (**base)); |
135 | ANIOCBP iocb = (ANIOCBP)ev_malloc (sizeof (*iocb)); |
130 | /* TODO: full zero initialize required? */ |
136 | |
|
|
137 | /* full zero initialise is probably not required at the moment, but |
|
|
138 | * this is not well documented, so we better do it. |
|
|
139 | */ |
131 | memset (*base, 0, sizeof (**base)); |
140 | memset (iocb, 0, sizeof (*iocb)); |
132 | /* would be nice to initialize fd/data as well, but array_needsize API doesn't support that */ |
141 | |
133 | (*base)->io.aio_lio_opcode = IOCB_CMD_POLL; |
142 | iocb->io.aio_lio_opcode = IOCB_CMD_POLL; |
134 | ++base; |
143 | iocb->io.aio_data = offset; |
|
|
144 | iocb->io.aio_fildes = offset; |
|
|
145 | |
|
|
146 | base [offset++] = iocb; |
135 | } |
147 | } |
136 | } |
148 | } |
137 | |
149 | |
138 | ecb_cold |
150 | ecb_cold |
139 | static void |
151 | static void |
… | |
… | |
147 | |
159 | |
148 | static void |
160 | static void |
149 | linuxaio_modify (EV_P_ int fd, int oev, int nev) |
161 | linuxaio_modify (EV_P_ int fd, int oev, int nev) |
150 | { |
162 | { |
151 | array_needsize (ANIOCBP, linuxaio_iocbps, linuxaio_iocbpmax, fd + 1, linuxaio_array_needsize_iocbp); |
163 | array_needsize (ANIOCBP, linuxaio_iocbps, linuxaio_iocbpmax, fd + 1, linuxaio_array_needsize_iocbp); |
152 | struct aniocb *iocb = linuxaio_iocbps [fd]; |
164 | ANIOCBP iocb = linuxaio_iocbps [fd]; |
|
|
165 | |
|
|
166 | #if EPOLL_FALLBACK |
|
|
167 | if (iocb->io.aio_reqprio < 0) |
|
|
168 | { |
|
|
169 | epoll_ctl (backend_fd, EPOLL_CTL_DEL, fd, 0); |
|
|
170 | iocb->io.aio_reqprio = 0; |
|
|
171 | } |
|
|
172 | #endif |
153 | |
173 | |
154 | if (iocb->io.aio_buf) |
174 | if (iocb->io.aio_buf) |
155 | ev_io_cancel (linuxaio_ctx, &iocb->io, (struct io_event *)0); /* always returns an error relevant kernels */ |
175 | evsys_io_cancel (linuxaio_ctx, &iocb->io, (struct io_event *)0); /* always returns an error relevant kernels */ |
156 | |
176 | |
157 | if (nev) |
177 | if (nev) |
158 | { |
178 | { |
159 | iocb->io.aio_data = fd; |
|
|
160 | iocb->io.aio_fildes = fd; |
|
|
161 | iocb->io.aio_buf = |
179 | iocb->io.aio_buf = |
162 | (nev & EV_READ ? POLLIN : 0) |
180 | (nev & EV_READ ? POLLIN : 0) |
163 | | (nev & EV_WRITE ? POLLOUT : 0); |
181 | | (nev & EV_WRITE ? POLLOUT : 0); |
164 | |
182 | |
165 | /* queue iocb up for io_submit */ |
183 | /* queue iocb up for io_submit */ |
166 | /* this assumes we only ever get one call per fd per loop iteration */ |
184 | /* this assumes we only ever get one call per fd per loop iteration */ |
… | |
… | |
168 | array_needsize (struct iocb *, linuxaio_submits, linuxaio_submitmax, linuxaio_submitcnt, array_needsize_noinit); |
186 | array_needsize (struct iocb *, linuxaio_submits, linuxaio_submitmax, linuxaio_submitcnt, array_needsize_noinit); |
169 | linuxaio_submits [linuxaio_submitcnt - 1] = &iocb->io; |
187 | linuxaio_submits [linuxaio_submitcnt - 1] = &iocb->io; |
170 | } |
188 | } |
171 | } |
189 | } |
172 | |
190 | |
|
|
191 | #if EPOLL_FALLBACK |
|
|
192 | |
|
|
193 | static void |
|
|
194 | linuxaio_rearm_epoll (EV_P_ struct iocb *iocb, int op) |
|
|
195 | { |
|
|
196 | struct epoll_event eev; |
|
|
197 | |
|
|
198 | eev.events = EPOLLONESHOT; |
|
|
199 | if (iocb->aio_buf & POLLIN ) eev.events |= EPOLLIN ; |
|
|
200 | if (iocb->aio_buf & POLLOUT) eev.events |= EPOLLOUT; |
|
|
201 | eev.data.fd = iocb->aio_fildes; |
|
|
202 | |
|
|
203 | if (epoll_ctl (backend_fd, op, iocb->aio_fildes, &eev) < 0) |
|
|
204 | ev_syserr ("(libeio) linuxaio epoll_ctl"); |
|
|
205 | } |
|
|
206 | |
|
|
207 | static void |
|
|
208 | linuxaio_epoll_cb (EV_P_ struct ev_io *w, int revents) |
|
|
209 | { |
|
|
210 | struct epoll_event events[16]; |
|
|
211 | |
|
|
212 | for (;;) |
|
|
213 | { |
|
|
214 | int idx; |
|
|
215 | int res = epoll_wait (backend_fd, events, sizeof (events) / sizeof (events [0]), 0); |
|
|
216 | |
|
|
217 | if (expect_false (res < 0)) |
|
|
218 | ev_syserr ("(libev) linuxaio epoll_wait"); |
|
|
219 | else if (!res) |
|
|
220 | break; |
|
|
221 | |
|
|
222 | for (idx = res; idx--; ) |
|
|
223 | { |
|
|
224 | int fd = events [idx].data.fd; |
|
|
225 | uint32_t ev = events [idx].events; |
|
|
226 | |
|
|
227 | assert (("libev: iocb fd must be in-bounds", fd >= 0 && fd < anfdmax)); |
|
|
228 | |
|
|
229 | linuxaio_rearm_epoll (EV_A_ &linuxaio_iocbps [fd]->io, EPOLL_CTL_MOD); |
|
|
230 | |
|
|
231 | fd_event (EV_A_ fd, |
|
|
232 | (ev & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) |
|
|
233 | | (ev & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0)); |
|
|
234 | } |
|
|
235 | |
|
|
236 | if (res < sizeof (events) / sizeof (events [0])) |
|
|
237 | break; |
|
|
238 | } |
|
|
239 | } |
|
|
240 | |
|
|
241 | #endif |
|
|
242 | |
173 | static void |
243 | static void |
174 | linuxaio_parse_events (EV_P_ struct io_event *ev, int nr) |
244 | linuxaio_parse_events (EV_P_ struct io_event *ev, int nr) |
175 | { |
245 | { |
176 | while (nr) |
246 | while (nr) |
177 | { |
247 | { |
178 | int fd = ev->data; |
248 | int fd = ev->data; |
179 | int res = ev->res; |
249 | int res = ev->res; |
180 | |
250 | |
181 | assert (("libev: iocb fd must be in-bounds", fd >= 0 && fd < anfdmax)); |
251 | assert (("libev: iocb fd must be in-bounds", fd >= 0 && fd < anfdmax)); |
182 | |
252 | |
183 | /* linux aio is oneshot: rearm fd */ |
253 | /* linux aio is oneshot: rearm fd. TODO: this does more work than needed */ |
184 | linuxaio_iocbps [fd]->io.aio_buf = 0; |
254 | linuxaio_iocbps [fd]->io.aio_buf = 0; |
185 | anfds [fd].events = 0; |
255 | anfds [fd].events = 0; |
186 | fd_change (EV_A_ fd, 0); |
256 | fd_change (EV_A_ fd, 0); |
187 | |
257 | |
188 | /* feed events, we do not expect or handle POLLNVAL */ |
258 | /* feed events, we do not expect or handle POLLNVAL */ |
189 | if (ecb_expect_false (res & POLLNVAL)) |
|
|
190 | fd_kill (EV_A_ fd); |
|
|
191 | else |
|
|
192 | fd_event ( |
259 | fd_event ( |
193 | EV_A_ |
260 | EV_A_ |
194 | fd, |
261 | fd, |
195 | (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) |
262 | (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) |
196 | | (res & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) |
263 | | (res & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) |
197 | ); |
264 | ); |
198 | |
265 | |
199 | --nr; |
266 | --nr; |
200 | ++ev; |
267 | ++ev; |
201 | } |
268 | } |
202 | } |
269 | } |
… | |
… | |
205 | static int |
272 | static int |
206 | linuxaio_get_events_from_ring (EV_P) |
273 | linuxaio_get_events_from_ring (EV_P) |
207 | { |
274 | { |
208 | struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx; |
275 | struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx; |
209 | |
276 | |
210 | ECB_MEMORY_FENCE_ACQUIRE; |
277 | /* the kernel reads and writes both of these variables, */ |
211 | |
278 | /* as a C extension, we assume that volatile use here */ |
212 | unsigned head = ring->head; |
279 | /* both makes reads atomic and once-only */ |
|
|
280 | unsigned head = *(volatile unsigned *)&ring->head; |
213 | unsigned tail = *(volatile unsigned *)&ring->tail; |
281 | unsigned tail = *(volatile unsigned *)&ring->tail; |
214 | |
282 | |
215 | if (head == tail) |
283 | if (head == tail) |
216 | return 0; |
284 | return 0; |
217 | |
285 | |
218 | /* bail out if the ring buffer doesn't match the expected layout */ |
286 | /* bail out if the ring buffer doesn't match the expected layout */ |
219 | if (ecb_expect_false (ring->magic != AIO_RING_MAGIC) |
287 | if (expect_false (ring->magic != AIO_RING_MAGIC) |
220 | || ring->incompat_features != AIO_RING_INCOMPAT_FEATURES |
288 | || ring->incompat_features != AIO_RING_INCOMPAT_FEATURES |
221 | || ring->header_length != sizeof (struct aio_ring)) /* TODO: or use it to find io_event[0]? */ |
289 | || ring->header_length != sizeof (struct aio_ring)) /* TODO: or use it to find io_event[0]? */ |
222 | return 0; |
290 | return 0; |
|
|
291 | |
|
|
292 | /* make sure the events up to tail are visible */ |
|
|
293 | ECB_MEMORY_FENCE_ACQUIRE; |
223 | |
294 | |
224 | /* parse all available events, but only once, to avoid starvation */ |
295 | /* parse all available events, but only once, to avoid starvation */ |
225 | if (tail > head) /* normal case around */ |
296 | if (tail > head) /* normal case around */ |
226 | linuxaio_parse_events (EV_A_ ring->io_events + head, tail - head); |
297 | linuxaio_parse_events (EV_A_ ring->io_events + head, tail - head); |
227 | else /* wrapped around */ |
298 | else /* wrapped around */ |
228 | { |
299 | { |
229 | linuxaio_parse_events (EV_A_ ring->io_events + head, ring->nr - head); |
300 | linuxaio_parse_events (EV_A_ ring->io_events + head, ring->nr - head); |
230 | linuxaio_parse_events (EV_A_ ring->io_events, tail); |
301 | linuxaio_parse_events (EV_A_ ring->io_events, tail); |
231 | } |
302 | } |
232 | |
303 | |
233 | ring->head = tail; |
304 | /* TODO: we only need a compiler barrier here, not a read fence */ |
|
|
305 | ECB_MEMORY_FENCE_RELEASE; |
|
|
306 | /* as an extension to C, we hope that the volatile will make this atomic and once-only */ |
|
|
307 | *(volatile unsigned *)&ring->head = tail; |
|
|
308 | /* make sure kernel can see our new head value - probably not required */ |
|
|
309 | ECB_MEMORY_FENCE_RELEASE; |
234 | |
310 | |
235 | return 1; |
311 | return 1; |
236 | } |
312 | } |
237 | |
313 | |
238 | /* read at least one event from kernel, or timeout */ |
314 | /* read at least one event from kernel, or timeout */ |
239 | inline_size |
315 | inline_size |
240 | void |
316 | void |
241 | linuxaio_get_events (EV_P_ ev_tstamp timeout) |
317 | linuxaio_get_events (EV_P_ ev_tstamp timeout) |
242 | { |
318 | { |
243 | struct timespec ts; |
319 | struct timespec ts; |
244 | struct io_event ioev; |
320 | struct io_event ioev[1]; |
245 | int res; |
321 | int res; |
246 | |
322 | |
247 | if (linuxaio_get_events_from_ring (EV_A)) |
323 | if (linuxaio_get_events_from_ring (EV_A)) |
248 | return; |
324 | return; |
249 | |
325 | |
250 | /* no events, so wait for at least one, then poll ring buffer again */ |
326 | /* no events, so wait for at least one, then poll ring buffer again */ |
251 | /* this degrades to one event per loop iteration */ |
327 | /* this degrades to one event per loop iteration */ |
252 | /* if the ring buffer changes layout, but so be it */ |
328 | /* if the ring buffer changes layout, but so be it */ |
253 | |
329 | |
|
|
330 | EV_RELEASE_CB; |
|
|
331 | |
254 | ts.tv_sec = (long)timeout; |
332 | ts.tv_sec = (long)timeout; |
255 | ts.tv_nsec = (long)((timeout - ts.tv_sec) * 1e9); |
333 | ts.tv_nsec = (long)((timeout - ts.tv_sec) * 1e9); |
256 | |
334 | |
257 | res = ev_io_getevents (linuxaio_ctx, 1, 1, &ioev, &ts); |
335 | res = evsys_io_getevents (linuxaio_ctx, 1, sizeof (ioev) / sizeof (ioev [0]), ioev, &ts); |
|
|
336 | |
|
|
337 | EV_ACQUIRE_CB; |
258 | |
338 | |
259 | if (res < 0) |
339 | if (res < 0) |
|
|
340 | if (errno == EINTR) |
|
|
341 | /* ignored */; |
|
|
342 | else |
260 | ev_syserr ("(libev) linuxaio io_getevents"); |
343 | ev_syserr ("(libev) linuxaio io_getevents"); |
261 | else if (res) |
344 | else if (res) |
262 | { |
345 | { |
263 | /* at least one event received, handle it and any remaining ones in the ring buffer */ |
346 | /* at least one event received, handle it and any remaining ones in the ring buffer */ |
264 | linuxaio_parse_events (EV_A_ &ioev, 1); |
347 | linuxaio_parse_events (EV_A_ ioev, res); |
265 | linuxaio_get_events_from_ring (EV_A); |
348 | linuxaio_get_events_from_ring (EV_A); |
266 | } |
349 | } |
267 | } |
350 | } |
268 | |
351 | |
269 | static void |
352 | static void |
… | |
… | |
276 | /* io_submit might return less than the requested number of iocbs */ |
359 | /* io_submit might return less than the requested number of iocbs */ |
277 | /* this is, afaics, only because of errors, but we go by the book and use a loop, */ |
360 | /* this is, afaics, only because of errors, but we go by the book and use a loop, */ |
278 | /* which allows us to pinpoint the errornous iocb */ |
361 | /* which allows us to pinpoint the errornous iocb */ |
279 | for (submitted = 0; submitted < linuxaio_submitcnt; ) |
362 | for (submitted = 0; submitted < linuxaio_submitcnt; ) |
280 | { |
363 | { |
|
|
364 | #if 0 |
|
|
365 | int res; |
|
|
366 | if (linuxaio_submits[submitted]->aio_fildes == backend_fd) |
|
|
367 | res = evsys_io_submit (linuxaio_ctx, 1, linuxaio_submits + submitted); |
|
|
368 | else |
|
|
369 | { res = -1; errno = EINVAL; }; |
|
|
370 | #else |
281 | int res = ev_io_submit (linuxaio_ctx, linuxaio_submitcnt - submitted, linuxaio_submits + submitted); |
371 | int res = evsys_io_submit (linuxaio_ctx, linuxaio_submitcnt - submitted, linuxaio_submits + submitted); |
|
|
372 | #endif |
282 | |
373 | |
283 | if (ecb_expect_false (res < 0)) |
374 | if (expect_false (res < 0)) |
284 | if (errno == EAGAIN) |
375 | if (errno == EAGAIN) |
285 | { |
376 | { |
286 | /* This happens when the ring buffer is full, at least. I assume this means |
377 | /* This happens when the ring buffer is full, at least. I assume this means |
287 | * that the event was queued synchronously during io_submit, and thus |
378 | * that the event was queued synchronously during io_submit, and thus |
288 | * the buffer overflowd. |
379 | * the buffer overflowed. |
289 | * In this case, we just try next loop iteration. |
380 | * In this case, we just try in next loop iteration. |
290 | * This should not result in a few fds taking priority, as the interface |
381 | * This should not result in a few fds taking priority, as the interface |
291 | * is one-shot, and we submit iocb's in a round-robin fashion. |
382 | * is one-shot, and we submit iocb's in a round-robin fashion. |
|
|
383 | * TODO: maybe make "submitted" persistent, so we don't have to memmove? |
292 | */ |
384 | */ |
|
|
385 | if (ecb_expect_false (submitted)) |
|
|
386 | { |
293 | memmove (linuxaio_submits, linuxaio_submits + submitted, (linuxaio_submitcnt - submitted) * sizeof (*linuxaio_submits)); |
387 | memmove (linuxaio_submits, linuxaio_submits + submitted, (linuxaio_submitcnt - submitted) * sizeof (*linuxaio_submits)); |
294 | linuxaio_submitcnt -= submitted; |
388 | linuxaio_submitcnt -= submitted; |
|
|
389 | } |
|
|
390 | |
295 | timeout = 0; |
391 | timeout = 0; |
296 | break; |
392 | break; |
297 | } |
393 | } |
|
|
394 | #if EPOLL_FALLBACK |
|
|
395 | else if (errno == EINVAL) |
|
|
396 | { |
|
|
397 | /* This happens for unsupported fds, officially, but in my testing, |
|
|
398 | * also randomly happens for supported fds. We fall back to good old |
|
|
399 | * poll() here, under the assumption that this is a very rare case. |
|
|
400 | * See https://lore.kernel.org/patchwork/patch/1047453/ to see |
|
|
401 | * discussion about such a case (ttys) where polling for POLLIN |
|
|
402 | * fails but POLLIN|POLLOUT works. |
|
|
403 | */ |
|
|
404 | struct iocb *iocb = linuxaio_submits [submitted]; |
|
|
405 | |
|
|
406 | linuxaio_rearm_epoll (EV_A_ linuxaio_submits [submitted], EPOLL_CTL_ADD); |
|
|
407 | iocb->aio_reqprio = -1; /* mark iocb as epoll */ |
|
|
408 | |
|
|
409 | res = 1; /* skip this iocb */ |
|
|
410 | } |
|
|
411 | #endif |
|
|
412 | else if (errno == EBADF) |
|
|
413 | { |
|
|
414 | fd_kill (EV_A_ linuxaio_submits [submitted]->aio_fildes); |
|
|
415 | |
|
|
416 | res = 1; /* skip this iocb */ |
|
|
417 | } |
298 | else |
418 | else |
299 | ev_syserr ("(libev) linuxaio io_submit"); |
419 | ev_syserr ("(libev) linuxaio io_submit"); |
300 | |
420 | |
301 | submitted += res; |
421 | submitted += res; |
302 | } |
422 | } |
… | |
… | |
312 | int |
432 | int |
313 | linuxaio_init (EV_P_ int flags) |
433 | linuxaio_init (EV_P_ int flags) |
314 | { |
434 | { |
315 | /* would be great to have a nice test for IOCB_CMD_POLL instead */ |
435 | /* would be great to have a nice test for IOCB_CMD_POLL instead */ |
316 | /* also: test some semi-common fd types, such as files and ttys in recommended_backends */ |
436 | /* also: test some semi-common fd types, such as files and ttys in recommended_backends */ |
317 | if (ev_linux_version () < 0x041200) /* 4.18 introduced IOCB_CMD_POLL */ |
437 | #if EPOLL_FALLBACK |
|
|
438 | /* 4.19 made epoll work */ |
|
|
439 | if (ev_linux_version () < 0x041300) |
318 | return 0; |
440 | return 0; |
|
|
441 | #else |
|
|
442 | /* 4.18 introduced IOCB_CMD_POLL */ |
|
|
443 | if (ev_linux_version () < 0x041200) |
|
|
444 | return 0; |
|
|
445 | #endif |
319 | |
446 | |
320 | linuxaio_ctx = 0; |
447 | linuxaio_ctx = 0; |
321 | if (ev_io_setup (EV_LINUXAIO_DEPTH, &linuxaio_ctx) < 0) |
448 | if (evsys_io_setup (EV_LINUXAIO_DEPTH, &linuxaio_ctx) < 0) |
322 | return 0; |
449 | return 0; |
|
|
450 | |
|
|
451 | #if EPOLL_FALLBACK |
|
|
452 | backend_fd = ev_epoll_create (); |
|
|
453 | if (backend_fd < 0) |
|
|
454 | { |
|
|
455 | evsys_io_destroy (linuxaio_ctx); |
|
|
456 | return 0; |
|
|
457 | } |
|
|
458 | |
|
|
459 | ev_io_init (EV_A_ &linuxaio_epoll_w, linuxaio_epoll_cb, backend_fd, EV_READ); |
|
|
460 | ev_set_priority (&linuxaio_epoll_w, EV_MAXPRI); |
|
|
461 | ev_io_start (EV_A_ &linuxaio_epoll_w); |
|
|
462 | ev_unref (EV_A); /* watcher should not keep loop alive */ |
|
|
463 | #endif |
323 | |
464 | |
324 | backend_modify = linuxaio_modify; |
465 | backend_modify = linuxaio_modify; |
325 | backend_poll = linuxaio_poll; |
466 | backend_poll = linuxaio_poll; |
326 | |
467 | |
327 | linuxaio_iocbpmax = 0; |
468 | linuxaio_iocbpmax = 0; |
… | |
… | |
336 | |
477 | |
337 | inline_size |
478 | inline_size |
338 | void |
479 | void |
339 | linuxaio_destroy (EV_P) |
480 | linuxaio_destroy (EV_P) |
340 | { |
481 | { |
|
|
482 | #if EPOLL_FALLBACK |
|
|
483 | close (backend_fd); |
|
|
484 | #endif |
341 | linuxaio_free_iocbp (EV_A); |
485 | linuxaio_free_iocbp (EV_A); |
342 | ev_io_destroy (linuxaio_ctx); |
486 | evsys_io_destroy (linuxaio_ctx); |
343 | } |
487 | } |
344 | |
488 | |
345 | inline_size |
489 | inline_size |
346 | void |
490 | void |
347 | linuxaio_fork (EV_P) |
491 | linuxaio_fork (EV_P) |
… | |
… | |
349 | /* this frees all iocbs, which is very heavy-handed */ |
493 | /* this frees all iocbs, which is very heavy-handed */ |
350 | linuxaio_destroy (EV_A); |
494 | linuxaio_destroy (EV_A); |
351 | linuxaio_submitcnt = 0; /* all pointers were invalidated */ |
495 | linuxaio_submitcnt = 0; /* all pointers were invalidated */ |
352 | |
496 | |
353 | linuxaio_ctx = 0; |
497 | linuxaio_ctx = 0; |
354 | while (ev_io_setup (EV_LINUXAIO_DEPTH, &linuxaio_ctx) < 0) |
498 | while (evsys_io_setup (EV_LINUXAIO_DEPTH, &linuxaio_ctx) < 0) |
355 | ev_syserr ("(libev) linuxaio io_setup"); |
499 | ev_syserr ("(libev) linuxaio io_setup"); |
356 | |
500 | |
|
|
501 | #if EPOLL_FALLBACK |
|
|
502 | while ((backend_fd = ev_epoll_create ()) < 0) |
|
|
503 | ev_syserr ("(libev) linuxaio epoll_create"); |
|
|
504 | |
|
|
505 | ev_io_stop (EV_A_ &linuxaio_epoll_w); |
|
|
506 | ev_io_init (EV_A_ &linuxaio_epoll_w, linuxaio_epoll_cb, backend_fd, EV_READ); |
|
|
507 | ev_io_start (EV_A_ &linuxaio_epoll_w); |
|
|
508 | #endif |
|
|
509 | |
357 | fd_rearm_all (EV_A); |
510 | fd_rearm_all (EV_A); |
358 | } |
511 | } |
359 | |
512 | |