… | |
… | |
80 | * k) overall, the *API* itself is, I dare to say, not a total trainwreck. |
80 | * k) overall, the *API* itself is, I dare to say, not a total trainwreck. |
81 | * the big isuess with it are the bugs requiring epoll, which might |
81 | * the big isuess with it are the bugs requiring epoll, which might |
82 | * or might not get fixed (do I hold my breath?). |
82 | * or might not get fixed (do I hold my breath?). |
83 | */ |
83 | */ |
84 | |
84 | |
|
|
85 | /* TODO: use internal TIMEOUT */ |
|
|
86 | /* TODO: take advantage of single mmap, NODROP etc. */ |
|
|
87 | /* TODO: resize cq/sq size independently */ |
|
|
88 | |
85 | #include <sys/timerfd.h> |
89 | #include <sys/timerfd.h> |
86 | #include <sys/mman.h> |
90 | #include <sys/mman.h> |
87 | #include <poll.h> |
91 | #include <poll.h> |
88 | |
92 | |
89 | #define IOURING_INIT_ENTRIES 32 |
93 | #define IOURING_INIT_ENTRIES 32 |
… | |
… | |
426 | int fd = cqe->user_data & 0xffffffffU; |
430 | int fd = cqe->user_data & 0xffffffffU; |
427 | uint32_t gen = cqe->user_data >> 32; |
431 | uint32_t gen = cqe->user_data >> 32; |
428 | int res = cqe->res; |
432 | int res = cqe->res; |
429 | |
433 | |
430 | /* ignore fd removal events, if there are any. TODO: verify */ |
434 | /* ignore fd removal events, if there are any. TODO: verify */ |
|
|
435 | /* TODO: yes, this triggers */ |
431 | if (cqe->user_data == (__u64)-1) |
436 | if (cqe->user_data == (__u64)-1) |
432 | abort ();//D |
437 | return; |
433 | |
438 | |
434 | assert (("libev: io_uring fd must be in-bounds", fd >= 0 && fd < anfdmax)); |
439 | assert (("libev: io_uring fd must be in-bounds", fd >= 0 && fd < anfdmax)); |
435 | |
440 | |
436 | /* documentation lies, of course. the result value is NOT like |
441 | /* documentation lies, of course. the result value is NOT like |
437 | * normal syscalls, but like linux raw syscalls, i.e. negative |
442 | * normal syscalls, but like linux raw syscalls, i.e. negative |
… | |
… | |
483 | iouring_overflow (EV_P) |
488 | iouring_overflow (EV_P) |
484 | { |
489 | { |
485 | /* we have two options, resize the queue (by tearing down |
490 | /* we have two options, resize the queue (by tearing down |
486 | * everything and recreating it, or living with it |
491 | * everything and recreating it, or living with it |
487 | * and polling. |
492 | * and polling. |
488 | * we implement this by resizing tghe queue, and, if that fails, |
493 | * we implement this by resizing the queue, and, if that fails, |
489 | * we just recreate the state on every failure, which |
494 | * we just recreate the state on every failure, which |
490 | * kind of is a very inefficient poll. |
495 | * kind of is a very inefficient poll. |
491 | * one danger is, due to the bios toward lower fds, |
496 | * one danger is, due to the bios toward lower fds, |
492 | * we will only really get events for those, so |
497 | * we will only really get events for those, so |
493 | * maybe we need a poll() fallback, after all. |
498 | * maybe we need a poll() fallback, after all. |
… | |
… | |
505 | else |
510 | else |
506 | { |
511 | { |
507 | /* we hit the kernel limit, we should fall back to something else. |
512 | /* we hit the kernel limit, we should fall back to something else. |
508 | * we can either poll() a few times and hope for the best, |
513 | * we can either poll() a few times and hope for the best, |
509 | * poll always, or switch to epoll. |
514 | * poll always, or switch to epoll. |
510 | * since we use epoll anyways, go epoll. |
515 | * TODO: is this necessary with newer kernels? |
511 | */ |
516 | */ |
512 | |
517 | |
513 | iouring_internal_destroy (EV_A); |
518 | iouring_internal_destroy (EV_A); |
514 | |
519 | |
515 | /* this should make it so that on return, we don'T call any uring functions */ |
520 | /* this should make it so that on return, we don't call any uring functions */ |
516 | iouring_to_submit = 0; |
521 | iouring_to_submit = 0; |
517 | |
522 | |
518 | for (;;) |
523 | for (;;) |
519 | { |
524 | { |
520 | backend = epoll_init (EV_A_ 0); |
525 | backend = epoll_init (EV_A_ 0); |