… | |
… | |
299 | break; /* yippie */ |
299 | break; /* yippie */ |
300 | |
300 | |
301 | if (errno != EINVAL) |
301 | if (errno != EINVAL) |
302 | return -1; /* we failed */ |
302 | return -1; /* we failed */ |
303 | |
303 | |
|
|
304 | #if TODO |
|
|
305 | if ((~params.features) & (IORING_FEAT_NODROP | IORING_FEATURE_SINGLE_MMAP)) |
|
|
306 | return -1; /* we require the above features */ |
|
|
307 | #endif |
|
|
308 | |
304 | /* EINVAL: lots of possible reasons, but maybe |
309 | /* EINVAL: lots of possible reasons, but maybe |
305 | * it is because we hit the unqueryable hardcoded size limit |
310 | * it is because we hit the unqueryable hardcoded size limit |
306 | */ |
311 | */ |
307 | |
312 | |
308 | /* we hit the limit already, give up */ |
313 | /* we hit the limit already, give up */ |
… | |
… | |
430 | int fd = cqe->user_data & 0xffffffffU; |
435 | int fd = cqe->user_data & 0xffffffffU; |
431 | uint32_t gen = cqe->user_data >> 32; |
436 | uint32_t gen = cqe->user_data >> 32; |
432 | int res = cqe->res; |
437 | int res = cqe->res; |
433 | |
438 | |
434 | /* ignore fd removal events, if there are any. TODO: verify */ |
439 | /* ignore fd removal events, if there are any. TODO: verify */ |
|
|
440 | /* TODO: yes, this triggers */ |
435 | if (cqe->user_data == (__u64)-1) |
441 | if (cqe->user_data == (__u64)-1) |
436 | abort ();//D |
442 | return; |
437 | |
443 | |
438 | assert (("libev: io_uring fd must be in-bounds", fd >= 0 && fd < anfdmax)); |
444 | assert (("libev: io_uring fd must be in-bounds", fd >= 0 && fd < anfdmax)); |
439 | |
445 | |
440 | /* documentation lies, of course. the result value is NOT like |
446 | /* documentation lies, of course. the result value is NOT like |
441 | * normal syscalls, but like linux raw syscalls, i.e. negative |
447 | * normal syscalls, but like linux raw syscalls, i.e. negative |
… | |
… | |
448 | if (ecb_expect_false (gen != (uint32_t)anfds [fd].egen)) |
454 | if (ecb_expect_false (gen != (uint32_t)anfds [fd].egen)) |
449 | return; |
455 | return; |
450 | |
456 | |
451 | if (ecb_expect_false (res < 0)) |
457 | if (ecb_expect_false (res < 0)) |
452 | { |
458 | { |
453 | //TODO: EINVAL handling (was something failed with this fd) |
459 | /*TODO: EINVAL handling (was something failed with this fd)*/ |
454 | //TODO: EBUSY happens when? |
460 | /*TODO: EBUSY happens when?*/ |
455 | |
461 | |
456 | if (res == -EBADF) |
462 | if (res == -EBADF) |
457 | { |
463 | { |
458 | assert (("libev: event loop rejected bad fd", res != -EBADF)); |
464 | assert (("libev: event loop rejected bad fd", res != -EBADF)); |
459 | fd_kill (EV_A_ fd); |
465 | fd_kill (EV_A_ fd); |
… | |
… | |
487 | iouring_overflow (EV_P) |
493 | iouring_overflow (EV_P) |
488 | { |
494 | { |
489 | /* we have two options, resize the queue (by tearing down |
495 | /* we have two options, resize the queue (by tearing down |
490 | * everything and recreating it, or living with it |
496 | * everything and recreating it, or living with it |
491 | * and polling. |
497 | * and polling. |
492 | * we implement this by resizing tghe queue, and, if that fails, |
498 | * we implement this by resizing the queue, and, if that fails, |
493 | * we just recreate the state on every failure, which |
499 | * we just recreate the state on every failure, which |
494 | * kind of is a very inefficient poll. |
500 | * kind of is a very inefficient poll. |
495 | * one danger is, due to the bios toward lower fds, |
501 | * one danger is, due to the bios toward lower fds, |
496 | * we will only really get events for those, so |
502 | * we will only really get events for those, so |
497 | * maybe we need a poll() fallback, after all. |
503 | * maybe we need a poll() fallback, after all. |
… | |
… | |
509 | else |
515 | else |
510 | { |
516 | { |
511 | /* we hit the kernel limit, we should fall back to something else. |
517 | /* we hit the kernel limit, we should fall back to something else. |
512 | * we can either poll() a few times and hope for the best, |
518 | * we can either poll() a few times and hope for the best, |
513 | * poll always, or switch to epoll. |
519 | * poll always, or switch to epoll. |
514 | * since we use epoll anyways, go epoll. |
520 | * TODO: is this necessary with newer kernels? |
515 | */ |
521 | */ |
516 | |
522 | |
517 | iouring_internal_destroy (EV_A); |
523 | iouring_internal_destroy (EV_A); |
518 | |
524 | |
519 | /* this should make it so that on return, we don'T call any uring functions */ |
525 | /* this should make it so that on return, we don't call any uring functions */ |
520 | iouring_to_submit = 0; |
526 | iouring_to_submit = 0; |
521 | |
527 | |
522 | for (;;) |
528 | for (;;) |
523 | { |
529 | { |
524 | backend = epoll_init (EV_A_ 0); |
530 | backend = epoll_init (EV_A_ 0); |