… | |
… | |
361 | /*****************************************************************************/ |
361 | /*****************************************************************************/ |
362 | |
362 | |
363 | static void |
363 | static void |
364 | iouring_modify (EV_P_ int fd, int oev, int nev) |
364 | iouring_modify (EV_P_ int fd, int oev, int nev) |
365 | { |
365 | { |
366 | fprintf (stderr,"modify %d (%d, %d) %d\n", fd, oev,nev, anfds[fd].eflags);//D |
|
|
367 | if (ecb_expect_false (anfds [fd].eflags)) |
366 | if (ecb_expect_false (anfds [fd].eflags)) |
368 | { |
367 | { |
369 | /* we handed this fd over to epoll, so undo this first */ |
368 | /* we handed this fd over to epoll, so undo this first */ |
370 | /* we do it manually because the optimisations on epoll_modify won't do us any good */ |
369 | /* we do it manually because the optimisations on epoll_modify won't do us any good */ |
371 | epoll_ctl (iouring_fd, EPOLL_CTL_DEL, fd, 0); |
370 | epoll_ctl (iouring_fd, EPOLL_CTL_DEL, fd, 0); |
… | |
… | |
379 | struct io_uring_sqe *sqe = iouring_sqe_get (EV_A); |
378 | struct io_uring_sqe *sqe = iouring_sqe_get (EV_A); |
380 | sqe->opcode = IORING_OP_POLL_REMOVE; |
379 | sqe->opcode = IORING_OP_POLL_REMOVE; |
381 | sqe->fd = fd; |
380 | sqe->fd = fd; |
382 | sqe->user_data = -1; |
381 | sqe->user_data = -1; |
383 | iouring_sqe_submit (EV_A_ sqe); |
382 | iouring_sqe_submit (EV_A_ sqe); |
384 | } |
|
|
385 | |
383 | |
386 | /* increment generation counter to avoid handling old events */ |
384 | /* increment generation counter to avoid handling old events */ |
387 | ++anfds [fd].egen; |
385 | ++anfds [fd].egen; |
|
|
386 | } |
388 | |
387 | |
389 | if (nev) |
388 | if (nev) |
390 | { |
389 | { |
391 | struct io_uring_sqe *sqe = iouring_sqe_get (EV_A); |
390 | struct io_uring_sqe *sqe = iouring_sqe_get (EV_A); |
392 | sqe->opcode = IORING_OP_POLL_ADD; |
391 | sqe->opcode = IORING_OP_POLL_ADD; |
393 | sqe->fd = fd; |
392 | sqe->fd = fd; |
394 | sqe->user_data = (uint32_t)fd | ((__u64)anfds [fd].egen << 32); |
393 | sqe->user_data = (uint32_t)fd | ((__u64)(uint32_t)anfds [fd].egen << 32); |
395 | sqe->poll_events = |
394 | sqe->poll_events = |
396 | (nev & EV_READ ? POLLIN : 0) |
395 | (nev & EV_READ ? POLLIN : 0) |
397 | | (nev & EV_WRITE ? POLLOUT : 0); |
396 | | (nev & EV_WRITE ? POLLOUT : 0); |
398 | iouring_sqe_submit (EV_A_ sqe); |
397 | iouring_sqe_submit (EV_A_ sqe); |
399 | } |
398 | } |
… | |
… | |
442 | * to get error codes at all. still, why not document this? |
441 | * to get error codes at all. still, why not document this? |
443 | */ |
442 | */ |
444 | |
443 | |
445 | /* ignore event if generation doesn't match */ |
444 | /* ignore event if generation doesn't match */ |
446 | /* this should actually be very rare */ |
445 | /* this should actually be very rare */ |
447 | if (ecb_expect_false ((uint32_t)anfds [fd].egen != gen)) |
446 | if (ecb_expect_false (gen != (uint32_t)anfds [fd].egen)) |
448 | return; |
447 | return; |
449 | |
448 | |
450 | if (ecb_expect_false (res < 0)) |
449 | if (ecb_expect_false (res < 0)) |
451 | { |
450 | { |
452 | if (res == -EINVAL) |
451 | if (res == -EINVAL) |
… | |
… | |
471 | } |
470 | } |
472 | |
471 | |
473 | return; |
472 | return; |
474 | } |
473 | } |
475 | |
474 | |
476 | fprintf (stderr, "fd %d event, rearm\n", fd);//D |
|
|
477 | |
|
|
478 | /* feed events, we do not expect or handle POLLNVAL */ |
475 | /* feed events, we do not expect or handle POLLNVAL */ |
479 | fd_event ( |
476 | fd_event ( |
480 | EV_A_ |
477 | EV_A_ |
481 | fd, |
478 | fd, |
482 | (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) |
479 | (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) |
… | |
… | |
574 | static void |
571 | static void |
575 | iouring_poll (EV_P_ ev_tstamp timeout) |
572 | iouring_poll (EV_P_ ev_tstamp timeout) |
576 | { |
573 | { |
577 | /* if we have events, no need for extra syscalls, but we might have to queue events */ |
574 | /* if we have events, no need for extra syscalls, but we might have to queue events */ |
578 | if (iouring_handle_cq (EV_A)) |
575 | if (iouring_handle_cq (EV_A)) |
579 | timeout = 0.; |
576 | timeout = EV_TS_CONST (0.); |
580 | else |
577 | else |
581 | /* no events, so maybe wait for some */ |
578 | /* no events, so maybe wait for some */ |
582 | iouring_tfd_update (EV_A_ timeout); |
579 | iouring_tfd_update (EV_A_ timeout); |
583 | |
580 | |
584 | /* only enter the kernel if we have somethign to submit, or we need to wait */ |
581 | /* only enter the kernel if we have somethign to submit, or we need to wait */ |
… | |
… | |
587 | int res; |
584 | int res; |
588 | |
585 | |
589 | EV_RELEASE_CB; |
586 | EV_RELEASE_CB; |
590 | |
587 | |
591 | res = evsys_io_uring_enter (iouring_fd, iouring_to_submit, 1, |
588 | res = evsys_io_uring_enter (iouring_fd, iouring_to_submit, 1, |
592 | timeout ? IORING_ENTER_GETEVENTS : 0, 0, 0); |
589 | timeout > EV_TS_CONST (0.) ? IORING_ENTER_GETEVENTS : 0, 0, 0); |
593 | iouring_to_submit = 0; |
590 | iouring_to_submit = 0; |
594 | |
591 | |
595 | EV_ACQUIRE_CB; |
592 | EV_ACQUIRE_CB; |
596 | |
593 | |
597 | if (ecb_expect_false (res < 0)) |
594 | if (ecb_expect_false (res < 0)) |
… | |
… | |
609 | iouring_init (EV_P_ int flags) |
606 | iouring_init (EV_P_ int flags) |
610 | { |
607 | { |
611 | if (!epoll_init (EV_A_ 0)) |
608 | if (!epoll_init (EV_A_ 0)) |
612 | return 0; |
609 | return 0; |
613 | |
610 | |
614 | ev_io_init (EV_A_ &iouring_epoll_w, iouring_epoll_cb, backend_fd, EV_READ); |
611 | ev_io_init (&iouring_epoll_w, iouring_epoll_cb, backend_fd, EV_READ); |
615 | ev_set_priority (&iouring_epoll_w, EV_MAXPRI); |
612 | ev_set_priority (&iouring_epoll_w, EV_MAXPRI); |
616 | |
613 | |
617 | ev_io_init (&iouring_tfd_w, iouring_tfd_cb, iouring_tfd, EV_READ); |
614 | ev_io_init (&iouring_tfd_w, iouring_tfd_cb, iouring_tfd, EV_READ); |
618 | ev_set_priority (&iouring_tfd_w, EV_MAXPRI); |
615 | ev_set_priority (&iouring_tfd_w, EV_MAXPRI); |
619 | |
616 | |