… | |
… | |
56 | * POLLOUT|POLLIN, but polling for POLLIN fails. just great, |
56 | * POLLOUT|POLLIN, but polling for POLLIN fails. just great, |
57 | * so we have to fall back to something else (hello, epoll), |
57 | * so we have to fall back to something else (hello, epoll), |
58 | * but at least the fallback can be slow, because these are |
58 | * but at least the fallback can be slow, because these are |
59 | * exceptional cases, right? |
59 | * exceptional cases, right? |
60 | * d) hmm, you have to tell the kernel the maximum number of watchers |
60 | * d) hmm, you have to tell the kernel the maximum number of watchers |
61 | * you want to queue when initialiasing the aio context. but of |
61 | * you want to queue when initialising the aio context. but of |
62 | * course the real limit is magically calculated in the kernel, and |
62 | * course the real limit is magically calculated in the kernel, and |
63 | * is often higher then we asked for. so we just have to destroy |
63 | * is often higher then we asked for. so we just have to destroy |
64 | * the aio context and re-create it a bit larger if we hit the limit. |
64 | * the aio context and re-create it a bit larger if we hit the limit. |
65 | * (starts to remind you of epoll? well, it's a bit more deterministic |
65 | * (starts to remind you of epoll? well, it's a bit more deterministic |
66 | * and less gambling, but still ugly as hell). |
66 | * and less gambling, but still ugly as hell). |
… | |
… | |
68 | * limit. or the kernel simply doesn't want to handle your watchers. |
68 | * limit. or the kernel simply doesn't want to handle your watchers. |
69 | * what the fuck do we do then? you guessed it, in the middle |
69 | * what the fuck do we do then? you guessed it, in the middle |
70 | * of event handling we have to switch to 100% epoll polling. and |
70 | * of event handling we have to switch to 100% epoll polling. and |
71 | * that better is as fast as normal epoll polling, so you practically |
71 | * that better is as fast as normal epoll polling, so you practically |
72 | * have to use the normal epoll backend with all its quirks. |
72 | * have to use the normal epoll backend with all its quirks. |
73 | * f) end result of this trainwreck: it inherits all the disadvantages |
73 | * f) end result of this train wreck: it inherits all the disadvantages |
74 | * from epoll, while adding a number on its own. why even bother to use |
74 | * from epoll, while adding a number on its own. why even bother to use |
75 | * it? because if conditions are right and your fds are supported and you |
75 | * it? because if conditions are right and your fds are supported and you |
76 | * don't hit a limit, this backend is actually faster, doesn't gamble with |
76 | * don't hit a limit, this backend is actually faster, doesn't gamble with |
77 | * your fds, batches watchers and events and doesn't require costly state |
77 | * your fds, batches watchers and events and doesn't require costly state |
78 | * recreates. well, until it does. |
78 | * recreates. well, until it does. |
79 | * g) all of this makes this backend use almost twice as much code as epoll. |
79 | * g) all of this makes this backend use almost twice as much code as epoll. |
80 | * which in turn uses twice as much code as poll. and thats not counting |
80 | * which in turn uses twice as much code as poll. and that#s not counting |
81 | * the fact that this backend also depends on the epoll backend, making |
81 | * the fact that this backend also depends on the epoll backend, making |
82 | * it three times as much code as poll, or kqueue. |
82 | * it three times as much code as poll, or kqueue. |
83 | * h) bleah. why can't linux just do kqueue. sure kqueue is ugly, but by now |
83 | * h) bleah. why can't linux just do kqueue. sure kqueue is ugly, but by now |
84 | * it's clear that whetaver linux comes up with is far, far, far worse. |
84 | * it's clear that whatever linux comes up with is far, far, far worse. |
85 | */ |
85 | */ |
86 | |
86 | |
87 | #include <sys/time.h> /* actually linux/time.h, but we must assume they are compatible */ |
87 | #include <sys/time.h> /* actually linux/time.h, but we must assume they are compatible */ |
88 | #include <poll.h> |
88 | #include <poll.h> |
89 | #include <linux/aio_abi.h> |
89 | #include <linux/aio_abi.h> |
… | |
… | |
116 | unsigned header_length; /* size of aio_ring */ |
116 | unsigned header_length; /* size of aio_ring */ |
117 | |
117 | |
118 | struct io_event io_events[0]; |
118 | struct io_event io_events[0]; |
119 | }; |
119 | }; |
120 | |
120 | |
|
|
121 | /* |
|
|
122 | * define some syscall wrappers for common architectures |
|
|
123 | * this is mostly for nice looks during debugging, not performance. |
|
|
124 | * our syscalls return < 0, not == -1, on error. which is good |
|
|
125 | * enough for linux aio. |
|
|
126 | * TODO: arm is also common nowadays, maybe even mips and x86 |
|
|
127 | * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove... |
|
|
128 | */ |
|
|
129 | #if __GNUC__ && __linux && ECB_AMD64 && !defined __OPTIMIZE_SIZE__ |
|
|
130 | /* the costly errno access probably kills this for size optimisation */ |
|
|
131 | |
|
|
132 | #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5) \ |
|
|
133 | ({ \ |
|
|
134 | long res; \ |
|
|
135 | register unsigned long r5 __asm__ ("r8" ); \ |
|
|
136 | register unsigned long r4 __asm__ ("r10"); \ |
|
|
137 | register unsigned long r3 __asm__ ("rdx"); \ |
|
|
138 | register unsigned long r2 __asm__ ("rsi"); \ |
|
|
139 | register unsigned long r1 __asm__ ("rdi"); \ |
|
|
140 | if (narg >= 5) r5 = (unsigned long)(arg5); \ |
|
|
141 | if (narg >= 4) r4 = (unsigned long)(arg4); \ |
|
|
142 | if (narg >= 3) r3 = (unsigned long)(arg3); \ |
|
|
143 | if (narg >= 2) r2 = (unsigned long)(arg2); \ |
|
|
144 | if (narg >= 1) r1 = (unsigned long)(arg1); \ |
|
|
145 | __asm__ __volatile__ ( \ |
|
|
146 | "syscall\n\t" \ |
|
|
147 | : "=a" (res) \ |
|
|
148 | : "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \ |
|
|
149 | : "cc", "r11", "cx", "memory"); \ |
|
|
150 | errno = -res; \ |
|
|
151 | res; \ |
|
|
152 | }) |
|
|
153 | |
|
|
154 | #endif |
|
|
155 | |
|
|
156 | #ifdef ev_syscall |
|
|
157 | #define ev_syscall0(nr) ev_syscall (nr, 0, 0, 0, 0, 0, 0 |
|
|
158 | #define ev_syscall1(nr,arg1) ev_syscall (nr, 1, arg1, 0, 0, 0, 0) |
|
|
159 | #define ev_syscall2(nr,arg1,arg2) ev_syscall (nr, 2, arg1, arg2, 0, 0, 0) |
|
|
160 | #define ev_syscall3(nr,arg1,arg2,arg3) ev_syscall (nr, 3, arg1, arg2, arg3, 0, 0) |
|
|
161 | #define ev_syscall4(nr,arg1,arg2,arg3,arg4) ev_syscall (nr, 3, arg1, arg2, arg3, arg4, 0) |
|
|
162 | #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5) |
|
|
163 | #else |
|
|
164 | #define ev_syscall0(nr) syscall (nr) |
|
|
165 | #define ev_syscall1(nr,arg1) syscall (nr, arg1) |
|
|
166 | #define ev_syscall2(nr,arg1,arg2) syscall (nr, arg1, arg2) |
|
|
167 | #define ev_syscall3(nr,arg1,arg2,arg3) syscall (nr, arg1, arg2, arg3) |
|
|
168 | #define ev_syscall4(nr,arg1,arg2,arg3,arg4) syscall (nr, arg1, arg2, arg3, arg4) |
|
|
169 | #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) syscall (nr, arg1, arg2, arg3, arg4, arg5) |
|
|
170 | #endif |
|
|
171 | |
121 | inline_size |
172 | inline_size |
122 | int |
173 | int |
123 | evsys_io_setup (unsigned nr_events, aio_context_t *ctx_idp) |
174 | evsys_io_setup (unsigned nr_events, aio_context_t *ctx_idp) |
124 | { |
175 | { |
125 | return syscall (SYS_io_setup, nr_events, ctx_idp); |
176 | return ev_syscall2 (SYS_io_setup, nr_events, ctx_idp); |
126 | } |
177 | } |
127 | |
178 | |
128 | inline_size |
179 | inline_size |
129 | int |
180 | int |
130 | evsys_io_destroy (aio_context_t ctx_id) |
181 | evsys_io_destroy (aio_context_t ctx_id) |
131 | { |
182 | { |
132 | return syscall (SYS_io_destroy, ctx_id); |
183 | return ev_syscall1 (SYS_io_destroy, ctx_id); |
133 | } |
184 | } |
134 | |
185 | |
135 | inline_size |
186 | inline_size |
136 | int |
187 | int |
137 | evsys_io_submit (aio_context_t ctx_id, long nr, struct iocb *cbp[]) |
188 | evsys_io_submit (aio_context_t ctx_id, long nr, struct iocb *cbp[]) |
138 | { |
189 | { |
139 | return syscall (SYS_io_submit, ctx_id, nr, cbp); |
190 | return ev_syscall3 (SYS_io_submit, ctx_id, nr, cbp); |
140 | } |
191 | } |
141 | |
192 | |
142 | inline_size |
193 | inline_size |
143 | int |
194 | int |
144 | evsys_io_cancel (aio_context_t ctx_id, struct iocb *cbp, struct io_event *result) |
195 | evsys_io_cancel (aio_context_t ctx_id, struct iocb *cbp, struct io_event *result) |
145 | { |
196 | { |
146 | return syscall (SYS_io_cancel, ctx_id, cbp, result); |
197 | return ev_syscall3 (SYS_io_cancel, ctx_id, cbp, result); |
147 | } |
198 | } |
148 | |
199 | |
149 | inline_size |
200 | inline_size |
150 | int |
201 | int |
151 | evsys_io_getevents (aio_context_t ctx_id, long min_nr, long nr, struct io_event *events, struct timespec *timeout) |
202 | evsys_io_getevents (aio_context_t ctx_id, long min_nr, long nr, struct io_event *events, struct timespec *timeout) |
152 | { |
203 | { |
153 | return syscall (SYS_io_getevents, ctx_id, min_nr, nr, events, timeout); |
204 | return ev_syscall5 (SYS_io_getevents, ctx_id, min_nr, nr, events, timeout); |
154 | } |
205 | } |
155 | |
206 | |
156 | /*****************************************************************************/ |
207 | /*****************************************************************************/ |
157 | /* actual backed implementation */ |
208 | /* actual backed implementation */ |
158 | |
209 | |
… | |
… | |
190 | requests = requests / one_page * one_page + first_page; |
241 | requests = requests / one_page * one_page + first_page; |
191 | |
242 | |
192 | return requests; |
243 | return requests; |
193 | } |
244 | } |
194 | |
245 | |
195 | /* we use out own wrapper structure in acse we ever want to do something "clever" */ |
246 | /* we use out own wrapper structure in case we ever want to do something "clever" */ |
196 | typedef struct aniocb |
247 | typedef struct aniocb |
197 | { |
248 | { |
198 | struct iocb io; |
249 | struct iocb io; |
199 | /*int inuse;*/ |
250 | /*int inuse;*/ |
200 | } *ANIOCBP; |
251 | } *ANIOCBP; |
… | |
… | |
203 | void |
254 | void |
204 | linuxaio_array_needsize_iocbp (ANIOCBP *base, int offset, int count) |
255 | linuxaio_array_needsize_iocbp (ANIOCBP *base, int offset, int count) |
205 | { |
256 | { |
206 | while (count--) |
257 | while (count--) |
207 | { |
258 | { |
208 | /* TODO: quite the overhead to allocate every iocb separately, maybe use our own alocator? */ |
259 | /* TODO: quite the overhead to allocate every iocb separately, maybe use our own allocator? */ |
209 | ANIOCBP iocb = (ANIOCBP)ev_malloc (sizeof (*iocb)); |
260 | ANIOCBP iocb = (ANIOCBP)ev_malloc (sizeof (*iocb)); |
210 | |
261 | |
211 | /* full zero initialise is probably not required at the moment, but |
262 | /* full zero initialise is probably not required at the moment, but |
212 | * this is not well documented, so we better do it. |
263 | * this is not well documented, so we better do it. |
213 | */ |
264 | */ |
… | |
… | |
238 | ANIOCBP iocb = linuxaio_iocbps [fd]; |
289 | ANIOCBP iocb = linuxaio_iocbps [fd]; |
239 | |
290 | |
240 | if (iocb->io.aio_reqprio < 0) |
291 | if (iocb->io.aio_reqprio < 0) |
241 | { |
292 | { |
242 | /* we handed this fd over to epoll, so undo this first */ |
293 | /* we handed this fd over to epoll, so undo this first */ |
243 | /* we do it manually becvause the optimisations on epoll_modfy won't do us any good */ |
294 | /* we do it manually because the optimisations on epoll_modfy won't do us any good */ |
244 | epoll_ctl (backend_fd, EPOLL_CTL_DEL, fd, 0); |
295 | epoll_ctl (backend_fd, EPOLL_CTL_DEL, fd, 0); |
|
|
296 | anfds [fd].emask = 0; |
245 | iocb->io.aio_reqprio = 0; |
297 | iocb->io.aio_reqprio = 0; |
246 | } |
298 | } |
247 | |
299 | |
248 | if (iocb->io.aio_buf) |
300 | if (iocb->io.aio_buf) |
249 | /* io_cancel always returns some error on relevant kernels, but works */ |
301 | /* io_cancel always returns some error on relevant kernels, but works */ |
… | |
… | |
301 | --nr; |
353 | --nr; |
302 | ++ev; |
354 | ++ev; |
303 | } |
355 | } |
304 | } |
356 | } |
305 | |
357 | |
306 | /* get any events from ringbuffer, return true if any were handled */ |
358 | /* get any events from ring buffer, return true if any were handled */ |
307 | static int |
359 | static int |
308 | linuxaio_get_events_from_ring (EV_P) |
360 | linuxaio_get_events_from_ring (EV_P) |
309 | { |
361 | { |
310 | struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx; |
362 | struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx; |
311 | |
363 | |
… | |
… | |
334 | { |
386 | { |
335 | linuxaio_parse_events (EV_A_ ring->io_events + head, ring->nr - head); |
387 | linuxaio_parse_events (EV_A_ ring->io_events + head, ring->nr - head); |
336 | linuxaio_parse_events (EV_A_ ring->io_events, tail); |
388 | linuxaio_parse_events (EV_A_ ring->io_events, tail); |
337 | } |
389 | } |
338 | |
390 | |
339 | ECB_MEMORY_FENCE_RELAXED; |
391 | ECB_MEMORY_FENCE_RELEASE; |
340 | /* as an extension to C, we hope that the volatile will make this atomic and once-only */ |
392 | /* as an extension to C, we hope that the volatile will make this atomic and once-only */ |
341 | *(volatile unsigned *)&ring->head = tail; |
393 | *(volatile unsigned *)&ring->head = tail; |
342 | /* make sure kernel can see our new head value - probably not required */ |
|
|
343 | ECB_MEMORY_FENCE_RELEASE; |
|
|
344 | |
394 | |
345 | return 1; |
395 | return 1; |
346 | } |
396 | } |
347 | |
397 | |
348 | /* read at least one event from kernel, or timeout */ |
398 | /* read at least one event from kernel, or timeout */ |
… | |
… | |
375 | /* ignored */; |
425 | /* ignored */; |
376 | else |
426 | else |
377 | ev_syserr ("(libev) linuxaio io_getevents"); |
427 | ev_syserr ("(libev) linuxaio io_getevents"); |
378 | else if (res) |
428 | else if (res) |
379 | { |
429 | { |
380 | /* at least one event received, handle it and any remaining ones in the ring buffer */ |
430 | /* at least one event available, handle it and any remaining ones in the ring buffer */ |
381 | linuxaio_parse_events (EV_A_ ioev, res); |
431 | linuxaio_parse_events (EV_A_ ioev, res); |
382 | linuxaio_get_events_from_ring (EV_A); |
432 | linuxaio_get_events_from_ring (EV_A); |
383 | } |
433 | } |
384 | } |
434 | } |
385 | |
435 | |
386 | static int |
436 | inline_size |
|
|
437 | int |
387 | linuxaio_io_setup (EV_P) |
438 | linuxaio_io_setup (EV_P) |
388 | { |
439 | { |
389 | linuxaio_ctx = 0; |
440 | linuxaio_ctx = 0; |
390 | return evsys_io_setup (linuxaio_nr_events (EV_A), &linuxaio_ctx); |
441 | return evsys_io_setup (linuxaio_nr_events (EV_A), &linuxaio_ctx); |
391 | } |
442 | } |
… | |
… | |
397 | |
448 | |
398 | /* first phase: submit new iocbs */ |
449 | /* first phase: submit new iocbs */ |
399 | |
450 | |
400 | /* io_submit might return less than the requested number of iocbs */ |
451 | /* io_submit might return less than the requested number of iocbs */ |
401 | /* this is, afaics, only because of errors, but we go by the book and use a loop, */ |
452 | /* this is, afaics, only because of errors, but we go by the book and use a loop, */ |
402 | /* which allows us to pinpoint the errornous iocb */ |
453 | /* which allows us to pinpoint the erroneous iocb */ |
403 | for (submitted = 0; submitted < linuxaio_submitcnt; ) |
454 | for (submitted = 0; submitted < linuxaio_submitcnt; ) |
404 | { |
455 | { |
405 | int res = evsys_io_submit (linuxaio_ctx, linuxaio_submitcnt - submitted, linuxaio_submits + submitted); |
456 | int res = evsys_io_submit (linuxaio_ctx, linuxaio_submitcnt - submitted, linuxaio_submits + submitted); |
406 | |
457 | |
407 | if (expect_false (res < 0)) |
458 | if (expect_false (res < 0)) |
… | |
… | |
421 | res = 1; /* skip this iocb - another iocb, another chance */ |
472 | res = 1; /* skip this iocb - another iocb, another chance */ |
422 | } |
473 | } |
423 | else if (errno == EAGAIN) |
474 | else if (errno == EAGAIN) |
424 | { |
475 | { |
425 | /* This happens when the ring buffer is full, or some other shit we |
476 | /* This happens when the ring buffer is full, or some other shit we |
426 | * dont' know and isn't documented. Most likely because we have too |
477 | * don't know and isn't documented. Most likely because we have too |
427 | * many requests and linux aio can't be assed to handle them. |
478 | * many requests and linux aio can't be assed to handle them. |
428 | * In this case, we try to allocate a larger ring buffer, freeing |
479 | * In this case, we try to allocate a larger ring buffer, freeing |
429 | * ours first. This might fail, in which case we have to fall back to 100% |
480 | * ours first. This might fail, in which case we have to fall back to 100% |
430 | * epoll. |
481 | * epoll. |
431 | * God, how I hate linux not getting its act together. Ever. |
482 | * God, how I hate linux not getting its act together. Ever. |
… | |
… | |
480 | int |
531 | int |
481 | linuxaio_init (EV_P_ int flags) |
532 | linuxaio_init (EV_P_ int flags) |
482 | { |
533 | { |
483 | /* would be great to have a nice test for IOCB_CMD_POLL instead */ |
534 | /* would be great to have a nice test for IOCB_CMD_POLL instead */ |
484 | /* also: test some semi-common fd types, such as files and ttys in recommended_backends */ |
535 | /* also: test some semi-common fd types, such as files and ttys in recommended_backends */ |
485 | /* 4.18 introduced IOCB_CMD_POLL, 4.19 made epoll work */ |
536 | /* 4.18 introduced IOCB_CMD_POLL, 4.19 made epoll work, and we need that */ |
486 | if (ev_linux_version () < 0x041300) |
537 | if (ev_linux_version () < 0x041300) |
487 | return 0; |
538 | return 0; |
488 | |
539 | |
489 | if (!epoll_init (EV_A_ 0)) |
540 | if (!epoll_init (EV_A_ 0)) |
490 | return 0; |
541 | return 0; |