… | |
… | |
87 | /* TODO: resize cq/sq size independently */ |
87 | /* TODO: resize cq/sq size independently */ |
88 | |
88 | |
89 | #include <sys/timerfd.h> |
89 | #include <sys/timerfd.h> |
90 | #include <sys/mman.h> |
90 | #include <sys/mman.h> |
91 | #include <poll.h> |
91 | #include <poll.h> |
|
|
92 | #include <stdint.h> |
92 | |
93 | |
93 | #define IOURING_INIT_ENTRIES 32 |
94 | #define IOURING_INIT_ENTRIES 32 |
94 | |
95 | |
95 | /*****************************************************************************/ |
96 | /*****************************************************************************/ |
96 | /* syscall wrapdadoop - this section has the raw api/abi definitions */ |
97 | /* syscall wrapdadoop - this section has the raw api/abi definitions */ |
… | |
… | |
173 | __u32 resv[4]; |
174 | __u32 resv[4]; |
174 | struct io_sqring_offsets sq_off; |
175 | struct io_sqring_offsets sq_off; |
175 | struct io_cqring_offsets cq_off; |
176 | struct io_cqring_offsets cq_off; |
176 | }; |
177 | }; |
177 | |
178 | |
|
|
179 | #define IORING_SETUP_CQSIZE 0x00000008 |
|
|
180 | |
178 | #define IORING_OP_POLL_ADD 6 |
181 | #define IORING_OP_POLL_ADD 6 |
179 | #define IORING_OP_POLL_REMOVE 7 |
182 | #define IORING_OP_POLL_REMOVE 7 |
|
|
183 | #define IORING_OP_TIMEOUT 11 |
|
|
184 | #define IORING_OP_TIMEOUT_REMOVE 12 |
|
|
185 | |
|
|
186 | /* relative or absolute, reference clock is CLOCK_MONOTONIC */ |
|
|
187 | struct iouring_kernel_timespec |
|
|
188 | { |
|
|
189 | int64_t tv_sec; |
|
|
190 | long long tv_nsec; |
|
|
191 | }; |
|
|
192 | |
|
|
193 | #define IORING_TIMEOUT_ABS 0x00000001 |
180 | |
194 | |
181 | #define IORING_ENTER_GETEVENTS 0x01 |
195 | #define IORING_ENTER_GETEVENTS 0x01 |
182 | |
196 | |
183 | #define IORING_OFF_SQ_RING 0x00000000ULL |
197 | #define IORING_OFF_SQ_RING 0x00000000ULL |
184 | #define IORING_OFF_CQ_RING 0x08000000ULL |
198 | #define IORING_OFF_CQ_RING 0x08000000ULL |
185 | #define IORING_OFF_SQES 0x10000000ULL |
199 | #define IORING_OFF_SQES 0x10000000ULL |
186 | |
200 | |
187 | #define IORING_FEAT_SINGLE_MMAP 0x1 |
201 | #define IORING_FEAT_SINGLE_MMAP 0x00000001 |
188 | #define IORING_FEAT_NODROP 0x2 |
202 | #define IORING_FEAT_NODROP 0x00000002 |
189 | #define IORING_FEAT_SUBMIT_STABLE 0x4 |
203 | #define IORING_FEAT_SUBMIT_STABLE 0x00000004 |
190 | |
204 | |
191 | inline_size |
205 | inline_size |
192 | int |
206 | int |
193 | evsys_io_uring_setup (unsigned entries, struct io_uring_params *params) |
207 | evsys_io_uring_setup (unsigned entries, struct io_uring_params *params) |
194 | { |
208 | { |
… | |
… | |
214 | |
228 | |
215 | /* the submit/completion queue entries */ |
229 | /* the submit/completion queue entries */ |
216 | #define EV_SQES ((struct io_uring_sqe *) iouring_sqes) |
230 | #define EV_SQES ((struct io_uring_sqe *) iouring_sqes) |
217 | #define EV_CQES ((struct io_uring_cqe *)((char *)iouring_cq_ring + iouring_cq_cqes)) |
231 | #define EV_CQES ((struct io_uring_cqe *)((char *)iouring_cq_ring + iouring_cq_cqes)) |
218 | |
232 | |
|
|
233 | /* TODO: this is not enough, we might have to reap events */ |
|
|
234 | /* TODO: but we can't, as that will re-arm events, causing */ |
|
|
235 | /* TODO: an endless loop in fd_reify */ |
|
|
236 | static int |
|
|
237 | iouring_enter (EV_P_ ev_tstamp timeout) |
|
|
238 | { |
|
|
239 | int res; |
|
|
240 | |
|
|
241 | EV_RELEASE_CB; |
|
|
242 | |
|
|
243 | res = evsys_io_uring_enter (iouring_fd, iouring_to_submit, 1, |
|
|
244 | timeout > EV_TS_CONST (0.) ? IORING_ENTER_GETEVENTS : 0, 0, 0); |
|
|
245 | |
|
|
246 | assert (("libev: io_uring_enter did not consume all sqes", (res < 0 || res == iouring_to_submit))); |
|
|
247 | |
|
|
248 | iouring_to_submit = 0; |
|
|
249 | |
|
|
250 | EV_ACQUIRE_CB; |
|
|
251 | |
|
|
252 | return res; |
|
|
253 | } |
|
|
254 | |
219 | static |
255 | static |
220 | struct io_uring_sqe * |
256 | struct io_uring_sqe * |
221 | iouring_sqe_get (EV_P) |
257 | iouring_sqe_get (EV_P) |
222 | { |
258 | { |
223 | unsigned tail = EV_SQ_VAR (tail); |
259 | unsigned tail = EV_SQ_VAR (tail); |
224 | |
260 | |
225 | if (tail + 1 - EV_SQ_VAR (head) > EV_SQ_VAR (ring_entries)) |
261 | while (ecb_expect_false (tail + 1 - EV_SQ_VAR (head) > EV_SQ_VAR (ring_entries))) |
226 | { |
262 | { |
227 | /* queue full, flush */ |
263 | /* queue full, need to flush */ |
228 | evsys_io_uring_enter (iouring_fd, iouring_to_submit, 0, 0, 0, 0); |
|
|
229 | iouring_to_submit = 0; |
|
|
230 | } |
|
|
231 | |
264 | |
|
|
265 | int res = iouring_enter (EV_A_ EV_TS_CONST (0.)); |
|
|
266 | |
|
|
267 | /* io_uring_enter might fail with EBUSY and won't submit anything */ |
|
|
268 | /* unfortunately, we can't handle this at the moment */ |
|
|
269 | |
|
|
270 | if (res < 0 && errno == EBUSY) |
|
|
271 | //TODO |
|
|
272 | ev_syserr ("(libev) io_uring_enter could not clear sq"); |
|
|
273 | else |
|
|
274 | break; |
|
|
275 | |
|
|
276 | /* iouring_poll should have done ECB_MEMORY_FENCE_ACQUIRE */ |
|
|
277 | } |
|
|
278 | |
232 | assert (("libev: io_uring queue full after flush", tail + 1 - EV_SQ_VAR (head) <= EV_SQ_VAR (ring_entries))); |
279 | /*assert (("libev: io_uring queue full after flush", tail + 1 - EV_SQ_VAR (head) <= EV_SQ_VAR (ring_entries)));*/ |
233 | |
280 | |
234 | return EV_SQES + (tail & EV_SQ_VAR (ring_mask)); |
281 | return EV_SQES + (tail & EV_SQ_VAR (ring_mask)); |
235 | } |
282 | } |
236 | |
283 | |
237 | inline_size |
284 | inline_size |
… | |
… | |
289 | iouring_tfd = -1; |
336 | iouring_tfd = -1; |
290 | iouring_sq_ring = MAP_FAILED; |
337 | iouring_sq_ring = MAP_FAILED; |
291 | iouring_cq_ring = MAP_FAILED; |
338 | iouring_cq_ring = MAP_FAILED; |
292 | iouring_sqes = MAP_FAILED; |
339 | iouring_sqes = MAP_FAILED; |
293 | |
340 | |
|
|
341 | if (!have_monotonic) /* cannot really happen, but what if11 */ |
|
|
342 | return -1; |
|
|
343 | |
294 | for (;;) |
344 | for (;;) |
295 | { |
345 | { |
296 | iouring_fd = evsys_io_uring_setup (iouring_entries, ¶ms); |
346 | iouring_fd = evsys_io_uring_setup (iouring_entries, ¶ms); |
297 | |
347 | |
298 | if (iouring_fd >= 0) |
348 | if (iouring_fd >= 0) |
299 | break; /* yippie */ |
349 | break; /* yippie */ |
300 | |
350 | |
301 | if (errno != EINVAL) |
351 | if (errno != EINVAL) |
302 | return -1; /* we failed */ |
352 | return -1; /* we failed */ |
|
|
353 | |
|
|
354 | #if TODO |
|
|
355 | if ((~params.features) & (IORING_FEAT_NODROP | IORING_FEATURE_SINGLE_MMAP)) |
|
|
356 | return -1; /* we require the above features */ |
|
|
357 | #endif |
303 | |
358 | |
304 | /* EINVAL: lots of possible reasons, but maybe |
359 | /* EINVAL: lots of possible reasons, but maybe |
305 | * it is because we hit the unqueryable hardcoded size limit |
360 | * it is because we hit the unqueryable hardcoded size limit |
306 | */ |
361 | */ |
307 | |
362 | |
… | |
… | |
378 | { |
433 | { |
379 | /* we assume the sqe's are all "properly" initialised */ |
434 | /* we assume the sqe's are all "properly" initialised */ |
380 | struct io_uring_sqe *sqe = iouring_sqe_get (EV_A); |
435 | struct io_uring_sqe *sqe = iouring_sqe_get (EV_A); |
381 | sqe->opcode = IORING_OP_POLL_REMOVE; |
436 | sqe->opcode = IORING_OP_POLL_REMOVE; |
382 | sqe->fd = fd; |
437 | sqe->fd = fd; |
383 | sqe->user_data = -1; |
438 | /* Jens Axboe notified me that user_data is not what is documented, but is |
|
|
439 | * some kind of unique ID that has to match, otherwise the request cannot |
|
|
440 | * be removed. Since we don't *really* have that, we pass in the old |
|
|
441 | * generation counter - if that fails, too bad, it will hopefully be removed |
|
|
442 | * at close time and then be ignored. */ |
|
|
443 | sqe->user_data = (uint32_t)fd | ((__u64)(uint32_t)anfds [fd].egen << 32); |
384 | iouring_sqe_submit (EV_A_ sqe); |
444 | iouring_sqe_submit (EV_A_ sqe); |
385 | |
445 | |
386 | /* increment generation counter to avoid handling old events */ |
446 | /* increment generation counter to avoid handling old events */ |
387 | ++anfds [fd].egen; |
447 | ++anfds [fd].egen; |
388 | } |
448 | } |
… | |
… | |
429 | { |
489 | { |
430 | int fd = cqe->user_data & 0xffffffffU; |
490 | int fd = cqe->user_data & 0xffffffffU; |
431 | uint32_t gen = cqe->user_data >> 32; |
491 | uint32_t gen = cqe->user_data >> 32; |
432 | int res = cqe->res; |
492 | int res = cqe->res; |
433 | |
493 | |
434 | /* ignore fd removal events, if there are any. TODO: verify */ |
|
|
435 | if (cqe->user_data == (__u64)-1) |
|
|
436 | abort ();//D |
|
|
437 | |
|
|
438 | assert (("libev: io_uring fd must be in-bounds", fd >= 0 && fd < anfdmax)); |
494 | assert (("libev: io_uring fd must be in-bounds", fd >= 0 && fd < anfdmax)); |
439 | |
495 | |
440 | /* documentation lies, of course. the result value is NOT like |
496 | /* documentation lies, of course. the result value is NOT like |
441 | * normal syscalls, but like linux raw syscalls, i.e. negative |
497 | * normal syscalls, but like linux raw syscalls, i.e. negative |
442 | * error numbers. fortunate, as otherwise there would be no way |
498 | * error numbers. fortunate, as otherwise there would be no way |
443 | * to get error codes at all. still, why not document this? |
499 | * to get error codes at all. still, why not document this? |
444 | */ |
500 | */ |
445 | |
501 | |
446 | /* ignore event if generation doesn't match */ |
502 | /* ignore event if generation doesn't match */ |
|
|
503 | /* other than skipping removal events, */ |
447 | /* this should actually be very rare */ |
504 | /* this should actually be very rare */ |
448 | if (ecb_expect_false (gen != (uint32_t)anfds [fd].egen)) |
505 | if (ecb_expect_false (gen != (uint32_t)anfds [fd].egen)) |
449 | return; |
506 | return; |
450 | |
507 | |
451 | if (ecb_expect_false (res < 0)) |
508 | if (ecb_expect_false (res < 0)) |
452 | { |
509 | { |
453 | //TODO: EINVAL handling (was something failed with this fd) |
510 | /*TODO: EINVAL handling (was something failed with this fd)*/ |
454 | //TODO: EBUSY happens when? |
511 | /*TODO: EBUSY happens when?*/ |
455 | |
512 | |
456 | if (res == -EBADF) |
513 | if (res == -EBADF) |
457 | { |
514 | { |
458 | assert (("libev: event loop rejected bad fd", res != -EBADF)); |
515 | assert (("libev: event loop rejected bad fd", res != -EBADF)); |
459 | fd_kill (EV_A_ fd); |
516 | fd_kill (EV_A_ fd); |
… | |
… | |
487 | iouring_overflow (EV_P) |
544 | iouring_overflow (EV_P) |
488 | { |
545 | { |
489 | /* we have two options, resize the queue (by tearing down |
546 | /* we have two options, resize the queue (by tearing down |
490 | * everything and recreating it, or living with it |
547 | * everything and recreating it, or living with it |
491 | * and polling. |
548 | * and polling. |
492 | * we implement this by resizing tghe queue, and, if that fails, |
549 | * we implement this by resizing the queue, and, if that fails, |
493 | * we just recreate the state on every failure, which |
550 | * we just recreate the state on every failure, which |
494 | * kind of is a very inefficient poll. |
551 | * kind of is a very inefficient poll. |
495 | * one danger is, due to the bios toward lower fds, |
552 | * one danger is, due to the bios toward lower fds, |
496 | * we will only really get events for those, so |
553 | * we will only really get events for those, so |
497 | * maybe we need a poll() fallback, after all. |
554 | * maybe we need a poll() fallback, after all. |
… | |
… | |
509 | else |
566 | else |
510 | { |
567 | { |
511 | /* we hit the kernel limit, we should fall back to something else. |
568 | /* we hit the kernel limit, we should fall back to something else. |
512 | * we can either poll() a few times and hope for the best, |
569 | * we can either poll() a few times and hope for the best, |
513 | * poll always, or switch to epoll. |
570 | * poll always, or switch to epoll. |
514 | * since we use epoll anyways, go epoll. |
571 | * TODO: is this necessary with newer kernels? |
515 | */ |
572 | */ |
516 | |
573 | |
517 | iouring_internal_destroy (EV_A); |
574 | iouring_internal_destroy (EV_A); |
518 | |
575 | |
519 | /* this should make it so that on return, we don'T call any uring functions */ |
576 | /* this should make it so that on return, we don't call any uring functions */ |
520 | iouring_to_submit = 0; |
577 | iouring_to_submit = 0; |
521 | |
578 | |
522 | for (;;) |
579 | for (;;) |
523 | { |
580 | { |
524 | backend = epoll_init (EV_A_ 0); |
581 | backend = epoll_init (EV_A_ 0); |
… | |
… | |
574 | iouring_tfd_update (EV_A_ timeout); |
631 | iouring_tfd_update (EV_A_ timeout); |
575 | |
632 | |
576 | /* only enter the kernel if we have something to submit, or we need to wait */ |
633 | /* only enter the kernel if we have something to submit, or we need to wait */ |
577 | if (timeout || iouring_to_submit) |
634 | if (timeout || iouring_to_submit) |
578 | { |
635 | { |
579 | int res; |
636 | int res = iouring_enter (EV_A_ timeout); |
580 | |
|
|
581 | EV_RELEASE_CB; |
|
|
582 | |
|
|
583 | res = evsys_io_uring_enter (iouring_fd, iouring_to_submit, 1, |
|
|
584 | timeout > EV_TS_CONST (0.) ? IORING_ENTER_GETEVENTS : 0, 0, 0); |
|
|
585 | iouring_to_submit = 0; |
|
|
586 | |
|
|
587 | EV_ACQUIRE_CB; |
|
|
588 | |
637 | |
589 | if (ecb_expect_false (res < 0)) |
638 | if (ecb_expect_false (res < 0)) |
590 | if (errno == EINTR) |
639 | if (errno == EINTR) |
591 | /* ignore */; |
640 | /* ignore */; |
|
|
641 | else if (errno == EBUSY) |
|
|
642 | /* cq full, cannot submit - should be rare because we flush the cq first, so simply ignore */; |
592 | else |
643 | else |
593 | ev_syserr ("(libev) iouring setup"); |
644 | ev_syserr ("(libev) iouring setup"); |
594 | else |
645 | else |
595 | iouring_handle_cq (EV_A); |
646 | iouring_handle_cq (EV_A); |
596 | } |
647 | } |