… | |
… | |
80 | * k) overall, the *API* itself is, I dare to say, not a total trainwreck. |
80 | * k) overall, the *API* itself is, I dare to say, not a total trainwreck. |
81 | * the big isuess with it are the bugs requiring epoll, which might |
81 | * the big isuess with it are the bugs requiring epoll, which might |
82 | * or might not get fixed (do I hold my breath?). |
82 | * or might not get fixed (do I hold my breath?). |
83 | */ |
83 | */ |
84 | |
84 | |
|
|
85 | /* TODO: use internal TIMEOUT */ |
|
|
86 | /* TODO: take advantage of single mmap, NODROP etc. */ |
|
|
87 | /* TODO: resize cq/sq size independently */ |
|
|
88 | |
85 | #include <sys/timerfd.h> |
89 | #include <sys/timerfd.h> |
86 | #include <sys/mman.h> |
90 | #include <sys/mman.h> |
87 | #include <poll.h> |
91 | #include <poll.h> |
88 | |
92 | |
89 | #define IOURING_INIT_ENTRIES 32 |
93 | #define IOURING_INIT_ENTRIES 32 |
… | |
… | |
295 | break; /* yippie */ |
299 | break; /* yippie */ |
296 | |
300 | |
297 | if (errno != EINVAL) |
301 | if (errno != EINVAL) |
298 | return -1; /* we failed */ |
302 | return -1; /* we failed */ |
299 | |
303 | |
|
|
304 | #if TODO |
|
|
305 | if ((~params.features) & (IORING_FEAT_NODROP | IORING_FEATURE_SINGLE_MMAP)) |
|
|
306 | return -1; /* we require the above features */ |
|
|
307 | #endif |
|
|
308 | |
300 | /* EINVAL: lots of possible reasons, but maybe |
309 | /* EINVAL: lots of possible reasons, but maybe |
301 | * it is because we hit the unqueryable hardcoded size limit |
310 | * it is because we hit the unqueryable hardcoded size limit |
302 | */ |
311 | */ |
303 | |
312 | |
304 | /* we hit the limit already, give up */ |
313 | /* we hit the limit already, give up */ |
… | |
… | |
426 | int fd = cqe->user_data & 0xffffffffU; |
435 | int fd = cqe->user_data & 0xffffffffU; |
427 | uint32_t gen = cqe->user_data >> 32; |
436 | uint32_t gen = cqe->user_data >> 32; |
428 | int res = cqe->res; |
437 | int res = cqe->res; |
429 | |
438 | |
430 | /* ignore fd removal events, if there are any. TODO: verify */ |
439 | /* ignore fd removal events, if there are any. TODO: verify */ |
|
|
440 | /* TODO: yes, this triggers */ |
431 | if (cqe->user_data == (__u64)-1) |
441 | if (cqe->user_data == (__u64)-1) |
432 | abort ();//D |
442 | return; |
433 | |
443 | |
434 | assert (("libev: io_uring fd must be in-bounds", fd >= 0 && fd < anfdmax)); |
444 | assert (("libev: io_uring fd must be in-bounds", fd >= 0 && fd < anfdmax)); |
435 | |
445 | |
436 | /* documentation lies, of course. the result value is NOT like |
446 | /* documentation lies, of course. the result value is NOT like |
437 | * normal syscalls, but like linux raw syscalls, i.e. negative |
447 | * normal syscalls, but like linux raw syscalls, i.e. negative |
… | |
… | |
444 | if (ecb_expect_false (gen != (uint32_t)anfds [fd].egen)) |
454 | if (ecb_expect_false (gen != (uint32_t)anfds [fd].egen)) |
445 | return; |
455 | return; |
446 | |
456 | |
447 | if (ecb_expect_false (res < 0)) |
457 | if (ecb_expect_false (res < 0)) |
448 | { |
458 | { |
449 | //TODO: EINVAL handling (was something failed with this fd) |
459 | /*TODO: EINVAL handling (was something failed with this fd)*/ |
450 | //TODO: EBUSY happens when? |
460 | /*TODO: EBUSY happens when?*/ |
451 | |
461 | |
452 | if (res == -EBADF) |
462 | if (res == -EBADF) |
453 | { |
463 | { |
454 | assert (("libev: event loop rejected bad fd", res != -EBADF)); |
464 | assert (("libev: event loop rejected bad fd", res != -EBADF)); |
455 | fd_kill (EV_A_ fd); |
465 | fd_kill (EV_A_ fd); |
… | |
… | |
483 | iouring_overflow (EV_P) |
493 | iouring_overflow (EV_P) |
484 | { |
494 | { |
485 | /* we have two options, resize the queue (by tearing down |
495 | /* we have two options, resize the queue (by tearing down |
486 | * everything and recreating it, or living with it |
496 | * everything and recreating it, or living with it |
487 | * and polling. |
497 | * and polling. |
488 | * we implement this by resizing tghe queue, and, if that fails, |
498 | * we implement this by resizing the queue, and, if that fails, |
489 | * we just recreate the state on every failure, which |
499 | * we just recreate the state on every failure, which |
490 | * kind of is a very inefficient poll. |
500 | * kind of is a very inefficient poll. |
491 | * one danger is, due to the bios toward lower fds, |
501 | * one danger is, due to the bios toward lower fds, |
492 | * we will only really get events for those, so |
502 | * we will only really get events for those, so |
493 | * maybe we need a poll() fallback, after all. |
503 | * maybe we need a poll() fallback, after all. |
… | |
… | |
505 | else |
515 | else |
506 | { |
516 | { |
507 | /* we hit the kernel limit, we should fall back to something else. |
517 | /* we hit the kernel limit, we should fall back to something else. |
508 | * we can either poll() a few times and hope for the best, |
518 | * we can either poll() a few times and hope for the best, |
509 | * poll always, or switch to epoll. |
519 | * poll always, or switch to epoll. |
510 | * since we use epoll anyways, go epoll. |
520 | * TODO: is this necessary with newer kernels? |
511 | */ |
521 | */ |
512 | |
522 | |
513 | iouring_internal_destroy (EV_A); |
523 | iouring_internal_destroy (EV_A); |
514 | |
524 | |
515 | /* this should make it so that on return, we don'T call any uring functions */ |
525 | /* this should make it so that on return, we don't call any uring functions */ |
516 | iouring_to_submit = 0; |
526 | iouring_to_submit = 0; |
517 | |
527 | |
518 | for (;;) |
528 | for (;;) |
519 | { |
529 | { |
520 | backend = epoll_init (EV_A_ 0); |
530 | backend = epoll_init (EV_A_ 0); |