… | |
… | |
35 | * and other provisions required by the GPL. If you do not delete the |
35 | * and other provisions required by the GPL. If you do not delete the |
36 | * provisions above, a recipient may use your version of this file under |
36 | * provisions above, a recipient may use your version of this file under |
37 | * either the BSD or the GPL. |
37 | * either the BSD or the GPL. |
38 | */ |
38 | */ |
39 | |
39 | |
|
|
40 | #define EPOLL_FALLBACK 1 |
|
|
41 | |
40 | #include <sys/time.h> /* actually linux/time.h, but we must assume they are compatible */ |
42 | #include <sys/time.h> /* actually linux/time.h, but we must assume they are compatible */ |
41 | #include <poll.h> |
43 | #include <poll.h> |
42 | #include <linux/aio_abi.h> |
44 | #include <linux/aio_abi.h> |
43 | |
45 | |
|
|
46 | #if EPOLL_FALLBACK |
|
|
47 | # include <sys/epoll.h> |
|
|
48 | #endif |
|
|
49 | |
44 | /* we try to fill 4kB pages exactly. |
50 | /* we try to fill 4kB pages exactly. |
45 | * the ring buffer header is 32 bytes, every io event is 32 bytes. |
51 | * the ring buffer header is 32 bytes, every io event is 32 bytes. |
46 | * the kernel takes the io event number, doubles it, adds 2, adds the ring buffer. |
52 | * the kernel takes the io event number, doubles it, adds 2, adds the ring buffer. |
47 | * therefore the calculation below will use "exactly" 8kB for the ring buffer |
53 | * therefore the calculation below will use "exactly" 4kB for the ring buffer |
48 | */ |
54 | */ |
49 | #define EV_LINUXAIO_DEPTH (256 / 2 - 2 - 1) /* max. number of io events per batch */ |
55 | #define EV_LINUXAIO_DEPTH (128 / 2 - 2 - 1) /* max. number of io events per batch */ |
50 | |
56 | |
51 | /*****************************************************************************/ |
57 | /*****************************************************************************/ |
52 | /* syscall wrapdadoop */ |
58 | /* syscall wrapdadoop */ |
53 | |
59 | |
54 | #include <sys/syscall.h> /* no glibc wrappers */ |
60 | #include <sys/syscall.h> /* no glibc wrappers */ |
… | |
… | |
148 | static void |
154 | static void |
149 | linuxaio_modify (EV_P_ int fd, int oev, int nev) |
155 | linuxaio_modify (EV_P_ int fd, int oev, int nev) |
150 | { |
156 | { |
151 | array_needsize (ANIOCBP, linuxaio_iocbps, linuxaio_iocbpmax, fd + 1, linuxaio_array_needsize_iocbp); |
157 | array_needsize (ANIOCBP, linuxaio_iocbps, linuxaio_iocbpmax, fd + 1, linuxaio_array_needsize_iocbp); |
152 | struct aniocb *iocb = linuxaio_iocbps [fd]; |
158 | struct aniocb *iocb = linuxaio_iocbps [fd]; |
|
|
159 | |
|
|
160 | #if EPOLL_FALLBACK |
|
|
161 | if (iocb->io.aio_reqprio < 0) |
|
|
162 | { |
|
|
163 | epoll_ctl (backend_fd, EPOLL_CTL_DEL, fd, 0); |
|
|
164 | iocb->io.aio_reqprio = 0; |
|
|
165 | } |
|
|
166 | #endif |
153 | |
167 | |
154 | if (iocb->io.aio_buf) |
168 | if (iocb->io.aio_buf) |
155 | ev_io_cancel (linuxaio_ctx, &iocb->io, (struct io_event *)0); /* always returns an error relevant kernels */ |
169 | ev_io_cancel (linuxaio_ctx, &iocb->io, (struct io_event *)0); /* always returns an error relevant kernels */ |
156 | |
170 | |
157 | if (nev) |
171 | if (nev) |
… | |
… | |
205 | static int |
219 | static int |
206 | linuxaio_get_events_from_ring (EV_P) |
220 | linuxaio_get_events_from_ring (EV_P) |
207 | { |
221 | { |
208 | struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx; |
222 | struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx; |
209 | |
223 | |
210 | ECB_MEMORY_FENCE_ACQUIRE; |
|
|
211 | |
|
|
212 | unsigned head = ring->head; |
224 | unsigned head = ring->head; |
213 | unsigned tail = *(volatile unsigned *)&ring->tail; |
225 | unsigned tail = *(volatile unsigned *)&ring->tail; |
214 | |
226 | |
215 | if (head == tail) |
227 | if (head == tail) |
216 | return 0; |
228 | return 0; |
… | |
… | |
219 | if (ecb_expect_false (ring->magic != AIO_RING_MAGIC) |
231 | if (ecb_expect_false (ring->magic != AIO_RING_MAGIC) |
220 | || ring->incompat_features != AIO_RING_INCOMPAT_FEATURES |
232 | || ring->incompat_features != AIO_RING_INCOMPAT_FEATURES |
221 | || ring->header_length != sizeof (struct aio_ring)) /* TODO: or use it to find io_event[0]? */ |
233 | || ring->header_length != sizeof (struct aio_ring)) /* TODO: or use it to find io_event[0]? */ |
222 | return 0; |
234 | return 0; |
223 | |
235 | |
|
|
236 | /* make sure the events up to tail are visible */ |
|
|
237 | ECB_MEMORY_FENCE_ACQUIRE; |
|
|
238 | |
224 | /* parse all available events, but only once, to avoid starvation */ |
239 | /* parse all available events, but only once, to avoid starvation */ |
225 | if (tail > head) /* normal case around */ |
240 | if (tail > head) /* normal case around */ |
226 | linuxaio_parse_events (EV_A_ ring->io_events + head, tail - head); |
241 | linuxaio_parse_events (EV_A_ ring->io_events + head, tail - head); |
227 | else /* wrapped around */ |
242 | else /* wrapped around */ |
228 | { |
243 | { |
229 | linuxaio_parse_events (EV_A_ ring->io_events + head, ring->nr - head); |
244 | linuxaio_parse_events (EV_A_ ring->io_events + head, ring->nr - head); |
230 | linuxaio_parse_events (EV_A_ ring->io_events, tail); |
245 | linuxaio_parse_events (EV_A_ ring->io_events, tail); |
231 | } |
246 | } |
232 | |
247 | |
233 | ring->head = tail; |
248 | *(volatile unsigned *)&ring->head = tail; |
|
|
249 | /* make sure kernel can see our new head value - probably not required */ |
|
|
250 | ECB_MEMORY_FENCE_RELEASE; |
234 | |
251 | |
235 | return 1; |
252 | return 1; |
236 | } |
253 | } |
237 | |
254 | |
238 | /* read at least one event from kernel, or timeout */ |
255 | /* read at least one event from kernel, or timeout */ |
… | |
… | |
255 | ts.tv_nsec = (long)((timeout - ts.tv_sec) * 1e9); |
272 | ts.tv_nsec = (long)((timeout - ts.tv_sec) * 1e9); |
256 | |
273 | |
257 | res = ev_io_getevents (linuxaio_ctx, 1, 1, &ioev, &ts); |
274 | res = ev_io_getevents (linuxaio_ctx, 1, 1, &ioev, &ts); |
258 | |
275 | |
259 | if (res < 0) |
276 | if (res < 0) |
|
|
277 | if (errno == EINTR) |
|
|
278 | /* ignored */; |
|
|
279 | else |
260 | ev_syserr ("(libev) io_getevents"); |
280 | ev_syserr ("(libev) linuxaio io_getevents"); |
261 | else if (res) |
281 | else if (res) |
262 | { |
282 | { |
263 | /* at least one event received, handle it and any remaining ones in the ring buffer */ |
283 | /* at least one event received, handle it and any remaining ones in the ring buffer */ |
264 | linuxaio_parse_events (EV_A_ &ioev, 1); |
284 | linuxaio_parse_events (EV_A_ &ioev, 1); |
265 | linuxaio_get_events_from_ring (EV_A); |
285 | linuxaio_get_events_from_ring (EV_A); |
266 | } |
286 | } |
267 | } |
287 | } |
|
|
288 | |
|
|
289 | #if EPOLL_FALLBACK |
|
|
290 | static void |
|
|
291 | linuxaio_rearm_epoll (EV_P_ struct iocb *iocb, int op) |
|
|
292 | { |
|
|
293 | struct epoll_event eev; |
|
|
294 | |
|
|
295 | eev.events = EPOLLONESHOT; |
|
|
296 | if (iocb->aio_buf & POLLIN ) eev.events |= EPOLLIN ; |
|
|
297 | if (iocb->aio_buf & POLLOUT) eev.events |= EPOLLOUT; |
|
|
298 | eev.data.fd = iocb->aio_fildes; |
|
|
299 | |
|
|
300 | if (epoll_ctl (backend_fd, op, iocb->aio_fildes, &eev) < 0) |
|
|
301 | ev_syserr ("(libeio) linuxaio epoll_ctl"); |
|
|
302 | } |
|
|
303 | #endif |
268 | |
304 | |
269 | static void |
305 | static void |
270 | linuxaio_poll (EV_P_ ev_tstamp timeout) |
306 | linuxaio_poll (EV_P_ ev_tstamp timeout) |
271 | { |
307 | { |
272 | int submitted; |
308 | int submitted; |
… | |
… | |
283 | if (ecb_expect_false (res < 0)) |
319 | if (ecb_expect_false (res < 0)) |
284 | if (errno == EAGAIN) |
320 | if (errno == EAGAIN) |
285 | { |
321 | { |
286 | /* This happens when the ring buffer is full, at least. I assume this means |
322 | /* This happens when the ring buffer is full, at least. I assume this means |
287 | * that the event was queued synchronously during io_submit, and thus |
323 | * that the event was queued synchronously during io_submit, and thus |
288 | * the buffer overflowd. |
324 | * the buffer overflowed. |
289 | * In this case, we just try next loop iteration. |
325 | * In this case, we just try in next loop iteration. |
290 | * This should not result in a few fds taking priority, as the interface |
326 | * This should not result in a few fds taking priority, as the interface |
291 | * is one-shot, and we submit iocb's in a round-robin fashion. |
327 | * is one-shot, and we submit iocb's in a round-robin fashion. |
292 | */ |
328 | */ |
293 | memmove (linuxaio_submits, linuxaio_submits + submitted, (linuxaio_submitcnt - submitted) * sizeof (*linuxaio_submits)); |
329 | memmove (linuxaio_submits, linuxaio_submits + submitted, (linuxaio_submitcnt - submitted) * sizeof (*linuxaio_submits)); |
294 | linuxaio_submitcnt -= submitted; |
330 | linuxaio_submitcnt -= submitted; |
295 | timeout = 0; |
331 | timeout = 0; |
296 | break; |
332 | break; |
297 | } |
333 | } |
|
|
334 | #if EPOLL_FALLBACK |
|
|
335 | else if (errno == EINVAL) |
|
|
336 | { |
|
|
337 | /* This hapĆ¼pens for unsupported fds, officially, but in my testing, |
|
|
338 | * also randomly happens for supported fds. We fall back to good old |
|
|
339 | * poll() here, under the assumption that this is a very rare case. |
|
|
340 | */ |
|
|
341 | struct iocb *iocb = linuxaio_submits [submitted]; |
|
|
342 | res = 1; /* skip this iocb */ |
|
|
343 | |
|
|
344 | linuxaio_rearm_epoll (EV_A_ iocb, EPOLL_CTL_ADD); |
|
|
345 | iocb->aio_reqprio = -1; /* mark iocb as epoll */ |
|
|
346 | } |
|
|
347 | #endif |
298 | else |
348 | else |
299 | ev_syserr ("(libev) io_submit"); |
349 | ev_syserr ("(libev) linuxaio io_submit"); |
300 | |
350 | |
301 | submitted += res; |
351 | submitted += res; |
302 | } |
352 | } |
303 | |
353 | |
304 | linuxaio_submitcnt = 0; |
354 | linuxaio_submitcnt = 0; |
305 | |
355 | |
306 | /* second phase: fetch and parse events */ |
356 | /* second phase: fetch and parse events */ |
307 | |
357 | |
308 | linuxaio_get_events (EV_A_ timeout); |
358 | linuxaio_get_events (EV_A_ timeout); |
309 | } |
359 | } |
|
|
360 | |
|
|
361 | #if EPOLL_FALLBACK |
|
|
362 | |
|
|
363 | static void |
|
|
364 | linuxaio_epoll_cb (EV_P_ struct ev_io *w, int revents) |
|
|
365 | { |
|
|
366 | struct epoll_event events[16]; |
|
|
367 | |
|
|
368 | for (;;) |
|
|
369 | { |
|
|
370 | int idx; |
|
|
371 | int res = epoll_wait (backend_fd, events, sizeof (events) / sizeof (events [0]), 0); |
|
|
372 | |
|
|
373 | if (ecb_expect_false (res < 0)) |
|
|
374 | ev_syserr ("(libev) linuxaio epoll_wait"); |
|
|
375 | else if (!res) |
|
|
376 | break; |
|
|
377 | |
|
|
378 | for (idx = res; idx--; ) |
|
|
379 | { |
|
|
380 | int fd = events [idx].data.fd; |
|
|
381 | uint32_t ev = events [idx].events; |
|
|
382 | |
|
|
383 | assert (("libev: iocb fd must be in-bounds", fd >= 0 && fd < anfdmax)); |
|
|
384 | |
|
|
385 | linuxaio_rearm_epoll (EV_A_ &linuxaio_iocbps [fd]->io, EPOLL_CTL_MOD); |
|
|
386 | |
|
|
387 | fd_event (EV_A_ fd, |
|
|
388 | (ev & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0) |
|
|
389 | | (ev & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0)); |
|
|
390 | } |
|
|
391 | |
|
|
392 | if (res < sizeof (events) / sizeof (events [0])) |
|
|
393 | break; |
|
|
394 | } |
|
|
395 | } |
|
|
396 | |
|
|
397 | #endif |
310 | |
398 | |
311 | inline_size |
399 | inline_size |
312 | int |
400 | int |
313 | linuxaio_init (EV_P_ int flags) |
401 | linuxaio_init (EV_P_ int flags) |
314 | { |
402 | { |
… | |
… | |
319 | |
407 | |
320 | linuxaio_ctx = 0; |
408 | linuxaio_ctx = 0; |
321 | if (ev_io_setup (EV_LINUXAIO_DEPTH, &linuxaio_ctx) < 0) |
409 | if (ev_io_setup (EV_LINUXAIO_DEPTH, &linuxaio_ctx) < 0) |
322 | return 0; |
410 | return 0; |
323 | |
411 | |
|
|
412 | #if EPOLL_FALLBACK |
|
|
413 | backend_fd = ev_epoll_create (); |
|
|
414 | if (backend_fd < 0) |
|
|
415 | { |
|
|
416 | ev_io_destroy (linuxaio_ctx); |
|
|
417 | return 0; |
|
|
418 | } |
|
|
419 | |
|
|
420 | ev_io_init (EV_A_ &linuxaio_epoll_w, linuxaio_epoll_cb, backend_fd, EV_READ); |
|
|
421 | ev_io_start (EV_A_ &linuxaio_epoll_w); |
|
|
422 | #endif |
|
|
423 | |
324 | backend_modify = linuxaio_modify; |
424 | backend_modify = linuxaio_modify; |
325 | backend_poll = linuxaio_poll; |
425 | backend_poll = linuxaio_poll; |
326 | |
426 | |
327 | linuxaio_iocbpmax = 0; |
427 | linuxaio_iocbpmax = 0; |
328 | linuxaio_iocbps = 0; |
428 | linuxaio_iocbps = 0; |
… | |
… | |
336 | |
436 | |
337 | inline_size |
437 | inline_size |
338 | void |
438 | void |
339 | linuxaio_destroy (EV_P) |
439 | linuxaio_destroy (EV_P) |
340 | { |
440 | { |
|
|
441 | #if EPOLL_FALLBACK |
|
|
442 | close (backend_fd); |
|
|
443 | #endif |
341 | linuxaio_free_iocbp (EV_A); |
444 | linuxaio_free_iocbp (EV_A); |
342 | ev_io_destroy (linuxaio_ctx); |
445 | ev_io_destroy (linuxaio_ctx); |
343 | } |
446 | } |
344 | |
447 | |
345 | inline_size |
448 | inline_size |
… | |
… | |
350 | linuxaio_destroy (EV_A); |
453 | linuxaio_destroy (EV_A); |
351 | linuxaio_submitcnt = 0; /* all pointers were invalidated */ |
454 | linuxaio_submitcnt = 0; /* all pointers were invalidated */ |
352 | |
455 | |
353 | linuxaio_ctx = 0; |
456 | linuxaio_ctx = 0; |
354 | while (ev_io_setup (EV_LINUXAIO_DEPTH, &linuxaio_ctx) < 0) |
457 | while (ev_io_setup (EV_LINUXAIO_DEPTH, &linuxaio_ctx) < 0) |
355 | ev_syserr ("(libev) io_setup"); |
458 | ev_syserr ("(libev) linuxaio io_setup"); |
|
|
459 | |
|
|
460 | #if EPOLL_FALLBACK |
|
|
461 | while ((backend_fd = ev_epoll_create ()) < 0) |
|
|
462 | ev_syserr ("(libev) linuxaio epoll_create"); |
|
|
463 | |
|
|
464 | ev_io_stop (EV_A_ &linuxaio_epoll_w); |
|
|
465 | ev_io_init (EV_A_ &linuxaio_epoll_w, linuxaio_epoll_cb, backend_fd, EV_READ); |
|
|
466 | ev_io_start (EV_A_ &linuxaio_epoll_w); |
|
|
467 | ev_unref (EV_A); /* watcher should not keep loop alive */ |
|
|
468 | #endif |
356 | |
469 | |
357 | fd_rearm_all (EV_A); |
470 | fd_rearm_all (EV_A); |
358 | } |
471 | } |
359 | |
472 | |