… | |
… | |
58 | * before. sure, you can use a timerfd, but that's another syscall |
58 | * before. sure, you can use a timerfd, but that's another syscall |
59 | * you could have avoided. overall, this bizarre omission smells |
59 | * you could have avoided. overall, this bizarre omission smells |
60 | * like a µ-optimisation by the io_uring author for his personal |
60 | * like a µ-optimisation by the io_uring author for his personal |
61 | * applications, to the detriment of everybody else who just wants |
61 | * applications, to the detriment of everybody else who just wants |
62 | * an event loop. but, umm, ok, if that's all, it could be worse. |
62 | * an event loop. but, umm, ok, if that's all, it could be worse. |
63 | * (from what I gather form Jens Axboe, it simply didn't occur to him, |
63 | * (from what I gather from the author Jens Axboe, it simply didn't |
64 | * and he made good on it by adding an unlimited nuber of timeouts |
64 | * occur to him, and he made good on it by adding an unlimited nuber |
65 | * later :). |
65 | * of timeouts later :). |
66 | * h) initially there was a hardcoded limit of 4096 outstanding events. |
66 | * h) initially there was a hardcoded limit of 4096 outstanding events. |
67 | * later versions not onlyx bump this to 32k, but also can handle |
67 | * later versions not only bump this to 32k, but also can handle |
68 | * an unlimited amount of events, so this only affects the batch size. |
68 | * an unlimited amount of events, so this only affects the batch size. |
69 | * i) unlike linux aio, you *can* register more then the limit |
69 | * i) unlike linux aio, you *can* register more then the limit |
70 | * of fd events. while early verisons of io_uring signalled an overflow |
70 | * of fd events. while early verisons of io_uring signalled an overflow |
71 | * and you ended up getting wet. 5.5+ does not do this anymore. |
71 | * and you ended up getting wet. 5.5+ does not do this anymore. |
72 | * j) but, oh my! it had exactly the same bugs as the linux aio backend, |
72 | * j) but, oh my! it had exactly the same bugs as the linux aio backend, |
… | |
… | |
456 | if (nev) |
456 | if (nev) |
457 | { |
457 | { |
458 | struct io_uring_sqe *sqe = iouring_sqe_get (EV_A); |
458 | struct io_uring_sqe *sqe = iouring_sqe_get (EV_A); |
459 | sqe->opcode = IORING_OP_POLL_ADD; |
459 | sqe->opcode = IORING_OP_POLL_ADD; |
460 | sqe->fd = fd; |
460 | sqe->fd = fd; |
|
|
461 | sqe->addr = 0; |
461 | sqe->user_data = (uint32_t)fd | ((__u64)(uint32_t)anfds [fd].egen << 32); |
462 | sqe->user_data = (uint32_t)fd | ((__u64)(uint32_t)anfds [fd].egen << 32); |
462 | sqe->poll_events = |
463 | sqe->poll_events = |
463 | (nev & EV_READ ? POLLIN : 0) |
464 | (nev & EV_READ ? POLLIN : 0) |
464 | | (nev & EV_WRITE ? POLLOUT : 0); |
465 | | (nev & EV_WRITE ? POLLOUT : 0); |
465 | iouring_sqe_submit (EV_A_ sqe); |
466 | iouring_sqe_submit (EV_A_ sqe); |
… | |
… | |
516 | return; |
517 | return; |
517 | |
518 | |
518 | if (ecb_expect_false (res < 0)) |
519 | if (ecb_expect_false (res < 0)) |
519 | { |
520 | { |
520 | /*TODO: EINVAL handling (was something failed with this fd)*/ |
521 | /*TODO: EINVAL handling (was something failed with this fd)*/ |
521 | /*TODO: EBUSY happens when?*/ |
|
|
522 | |
522 | |
523 | if (res == -EBADF) |
523 | if (res == -EBADF) |
524 | { |
524 | { |
525 | assert (("libev: event loop rejected bad fd", res != -EBADF)); |
525 | assert (("libev: event loop rejected bad fd", res != -EBADF)); |
526 | fd_kill (EV_A_ fd); |
526 | fd_kill (EV_A_ fd); |