… | |
… | |
53 | * therefore the calculation below will use "exactly" 4kB for the ring buffer |
53 | * therefore the calculation below will use "exactly" 4kB for the ring buffer |
54 | */ |
54 | */ |
55 | #define EV_LINUXAIO_DEPTH (128 / 2 - 2 - 1) /* max. number of io events per batch */ |
55 | #define EV_LINUXAIO_DEPTH (128 / 2 - 2 - 1) /* max. number of io events per batch */ |
56 | |
56 | |
57 | /*****************************************************************************/ |
57 | /*****************************************************************************/ |
58 | /* syscall wrapdadoop */ |
58 | /* syscall wrapdadoop - this section has the raw syscall definitions */ |
59 | |
59 | |
60 | #include <sys/syscall.h> /* no glibc wrappers */ |
60 | #include <sys/syscall.h> /* no glibc wrappers */ |
61 | |
61 | |
62 | /* aio_abi.h is not versioned in any way, so we cannot test for its existance */ |
62 | /* aio_abi.h is not versioned in any way, so we cannot test for its existance */ |
63 | #define IOCB_CMD_POLL 5 |
63 | #define IOCB_CMD_POLL 5 |
… | |
… | |
80 | struct io_event io_events[0]; |
80 | struct io_event io_events[0]; |
81 | }; |
81 | }; |
82 | |
82 | |
83 | inline_size |
83 | inline_size |
84 | int |
84 | int |
85 | ev_io_setup (unsigned nr_events, aio_context_t *ctx_idp) |
85 | evsys_io_setup (unsigned nr_events, aio_context_t *ctx_idp) |
86 | { |
86 | { |
87 | return syscall (SYS_io_setup, nr_events, ctx_idp); |
87 | return syscall (SYS_io_setup, nr_events, ctx_idp); |
88 | } |
88 | } |
89 | |
89 | |
90 | inline_size |
90 | inline_size |
91 | int |
91 | int |
92 | ev_io_destroy (aio_context_t ctx_id) |
92 | evsys_io_destroy (aio_context_t ctx_id) |
93 | { |
93 | { |
94 | return syscall (SYS_io_destroy, ctx_id); |
94 | return syscall (SYS_io_destroy, ctx_id); |
95 | } |
95 | } |
96 | |
96 | |
97 | inline_size |
97 | inline_size |
98 | int |
98 | int |
99 | ev_io_submit (aio_context_t ctx_id, long nr, struct iocb *cbp[]) |
99 | evsys_io_submit (aio_context_t ctx_id, long nr, struct iocb *cbp[]) |
100 | { |
100 | { |
101 | return syscall (SYS_io_submit, ctx_id, nr, cbp); |
101 | return syscall (SYS_io_submit, ctx_id, nr, cbp); |
102 | } |
102 | } |
103 | |
103 | |
104 | inline_size |
104 | inline_size |
105 | int |
105 | int |
106 | ev_io_cancel (aio_context_t ctx_id, struct iocb *cbp, struct io_event *result) |
106 | evsys_io_cancel (aio_context_t ctx_id, struct iocb *cbp, struct io_event *result) |
107 | { |
107 | { |
108 | return syscall (SYS_io_cancel, ctx_id, cbp, result); |
108 | return syscall (SYS_io_cancel, ctx_id, cbp, result); |
109 | } |
109 | } |
110 | |
110 | |
111 | inline_size |
111 | inline_size |
112 | int |
112 | int |
113 | ev_io_getevents (aio_context_t ctx_id, long min_nr, long nr, struct io_event *events, struct timespec *timeout) |
113 | evsys_io_getevents (aio_context_t ctx_id, long min_nr, long nr, struct io_event *events, struct timespec *timeout) |
114 | { |
114 | { |
115 | return syscall (SYS_io_getevents, ctx_id, min_nr, nr, events, timeout); |
115 | return syscall (SYS_io_getevents, ctx_id, min_nr, nr, events, timeout); |
116 | } |
116 | } |
117 | |
117 | |
118 | /*****************************************************************************/ |
118 | /*****************************************************************************/ |
… | |
… | |
164 | iocb->io.aio_reqprio = 0; |
164 | iocb->io.aio_reqprio = 0; |
165 | } |
165 | } |
166 | #endif |
166 | #endif |
167 | |
167 | |
168 | if (iocb->io.aio_buf) |
168 | if (iocb->io.aio_buf) |
169 | ev_io_cancel (linuxaio_ctx, &iocb->io, (struct io_event *)0); /* always returns an error relevant kernels */ |
169 | evsys_io_cancel (linuxaio_ctx, &iocb->io, (struct io_event *)0); /* always returns an error relevant kernels */ |
170 | |
170 | |
171 | if (nev) |
171 | if (nev) |
172 | { |
172 | { |
173 | iocb->io.aio_data = fd; |
173 | iocb->io.aio_data = fd; |
174 | iocb->io.aio_fildes = fd; |
174 | iocb->io.aio_fildes = fd; |
… | |
… | |
329 | EV_RELEASE_CB; |
329 | EV_RELEASE_CB; |
330 | |
330 | |
331 | ts.tv_sec = (long)timeout; |
331 | ts.tv_sec = (long)timeout; |
332 | ts.tv_nsec = (long)((timeout - ts.tv_sec) * 1e9); |
332 | ts.tv_nsec = (long)((timeout - ts.tv_sec) * 1e9); |
333 | |
333 | |
334 | res = ev_io_getevents (linuxaio_ctx, 1, sizeof (ioev) / sizeof (ioev [0]), ioev, &ts); |
334 | res = evsys_io_getevents (linuxaio_ctx, 1, sizeof (ioev) / sizeof (ioev [0]), ioev, &ts); |
335 | |
335 | |
336 | EV_ACQUIRE_CB; |
336 | EV_ACQUIRE_CB; |
337 | |
337 | |
338 | if (res < 0) |
338 | if (res < 0) |
339 | if (errno == EINTR) |
339 | if (errno == EINTR) |
… | |
… | |
361 | for (submitted = 0; submitted < linuxaio_submitcnt; ) |
361 | for (submitted = 0; submitted < linuxaio_submitcnt; ) |
362 | { |
362 | { |
363 | #if 0 |
363 | #if 0 |
364 | int res; |
364 | int res; |
365 | if (linuxaio_submits[submitted]->aio_fildes == backend_fd) |
365 | if (linuxaio_submits[submitted]->aio_fildes == backend_fd) |
366 | res = ev_io_submit (linuxaio_ctx, 1, linuxaio_submits + submitted); |
366 | res = evsys_io_submit (linuxaio_ctx, 1, linuxaio_submits + submitted); |
367 | else |
367 | else |
368 | { res = -1; errno = EINVAL; }; |
368 | { res = -1; errno = EINVAL; }; |
369 | #else |
369 | #else |
370 | int res = ev_io_submit (linuxaio_ctx, linuxaio_submitcnt - submitted, linuxaio_submits + submitted); |
370 | int res = evsys_io_submit (linuxaio_ctx, linuxaio_submitcnt - submitted, linuxaio_submits + submitted); |
371 | #endif |
371 | #endif |
372 | |
372 | |
373 | if (expect_false (res < 0)) |
373 | if (expect_false (res < 0)) |
374 | if (errno == EAGAIN) |
374 | if (errno == EAGAIN) |
375 | { |
375 | { |
… | |
… | |
435 | if (ev_linux_version () < 0x041200) |
435 | if (ev_linux_version () < 0x041200) |
436 | return 0; |
436 | return 0; |
437 | #endif |
437 | #endif |
438 | |
438 | |
439 | linuxaio_ctx = 0; |
439 | linuxaio_ctx = 0; |
440 | if (ev_io_setup (EV_LINUXAIO_DEPTH, &linuxaio_ctx) < 0) |
440 | if (evsys_io_setup (EV_LINUXAIO_DEPTH, &linuxaio_ctx) < 0) |
441 | return 0; |
441 | return 0; |
442 | |
442 | |
443 | #if EPOLL_FALLBACK |
443 | #if EPOLL_FALLBACK |
444 | backend_fd = ev_epoll_create (); |
444 | backend_fd = ev_epoll_create (); |
445 | if (backend_fd < 0) |
445 | if (backend_fd < 0) |
446 | { |
446 | { |
447 | ev_io_destroy (linuxaio_ctx); |
447 | evsys_io_destroy (linuxaio_ctx); |
448 | return 0; |
448 | return 0; |
449 | } |
449 | } |
450 | |
450 | |
451 | ev_io_init (EV_A_ &linuxaio_epoll_w, linuxaio_epoll_cb, backend_fd, EV_READ); |
451 | ev_io_init (EV_A_ &linuxaio_epoll_w, linuxaio_epoll_cb, backend_fd, EV_READ); |
452 | ev_set_priority (&linuxaio_epoll_w, EV_MAXPRI); |
452 | ev_set_priority (&linuxaio_epoll_w, EV_MAXPRI); |
… | |
… | |
485 | /* this frees all iocbs, which is very heavy-handed */ |
485 | /* this frees all iocbs, which is very heavy-handed */ |
486 | linuxaio_destroy (EV_A); |
486 | linuxaio_destroy (EV_A); |
487 | linuxaio_submitcnt = 0; /* all pointers were invalidated */ |
487 | linuxaio_submitcnt = 0; /* all pointers were invalidated */ |
488 | |
488 | |
489 | linuxaio_ctx = 0; |
489 | linuxaio_ctx = 0; |
490 | while (ev_io_setup (EV_LINUXAIO_DEPTH, &linuxaio_ctx) < 0) |
490 | while (evsys_io_setup (EV_LINUXAIO_DEPTH, &linuxaio_ctx) < 0) |
491 | ev_syserr ("(libev) linuxaio io_setup"); |
491 | ev_syserr ("(libev) linuxaio io_setup"); |
492 | |
492 | |
493 | #if EPOLL_FALLBACK |
493 | #if EPOLL_FALLBACK |
494 | while ((backend_fd = ev_epoll_create ()) < 0) |
494 | while ((backend_fd = ev_epoll_create ()) < 0) |
495 | ev_syserr ("(libev) linuxaio epoll_create"); |
495 | ev_syserr ("(libev) linuxaio epoll_create"); |