ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev_iouring.c
(Generate patch)

Comparing libev/ev_iouring.c (file contents):
Revision 1.3 by root, Sat Aug 17 05:30:16 2019 UTC vs.
Revision 1.13 by root, Sat Dec 28 03:29:50 2019 UTC

47 * without any syscalls. what's not to like? 47 * without any syscalls. what's not to like?
48 * d) ok, it's vastly more complex, but that's ok, really. 48 * d) ok, it's vastly more complex, but that's ok, really.
49 * e) why 3 mmaps instead of one? one would be more space-efficient, 49 * e) why 3 mmaps instead of one? one would be more space-efficient,
50 * and I can't see what benefit three would have (other than being 50 * and I can't see what benefit three would have (other than being
51 * somehow resizable/relocatable, but that's apparently not possible). 51 * somehow resizable/relocatable, but that's apparently not possible).
52 * (FIXME: newer kernels can use 2 mmaps only, need to look into this).
52 * f) hmm, it's practiclaly undebuggable (gdb can't access the memory, and 53 * f) hmm, it's practiclaly undebuggable (gdb can't access the memory, and
53 the bizarre way structure offsets are commuinicated makes it hard to 54 * the bizarre way structure offsets are communicated makes it hard to
54 * just print the ring buffer heads, even *iff* the memory were visible 55 * just print the ring buffer heads, even *iff* the memory were visible
55 * in gdb. but then, that's also ok, really. 56 * in gdb. but then, that's also ok, really.
56 * g) well, you cannot specify a timeout when waiting for events. no, 57 * g) well, you cannot specify a timeout when waiting for events. no,
57 * seriously, the interface doesn't support a timeout. never seen _that_ 58 * seriously, the interface doesn't support a timeout. never seen _that_
58 * before. sure, you can use a timerfd, but that's another syscall 59 * before. sure, you can use a timerfd, but that's another syscall
59 * you could have avoided. overall, this bizarre omission smells 60 * you could have avoided. overall, this bizarre omission smells
60 * like a µ-optimisation by the io_uring author for his personal 61 * like a µ-optimisation by the io_uring author for his personal
61 * applications, to the detriment of everybody else who just wants 62 * applications, to the detriment of everybody else who just wants
62 * an event loop. but, umm, ok, if that's all, it could be worse. 63 * an event loop. but, umm, ok, if that's all, it could be worse.
64 * (FIXME: jens mentioned timeout commands, need to investigate)
63 * h) there is a hardcoded limit of 4096 outstanding events. okay, 65 * h) there is a hardcoded limit of 4096 outstanding events. okay,
64 * at least there is no arbitrary low system-wide limit... 66 * at least there is no arbitrary low system-wide limit...
67 * (FIXME: apparently, this was increased to 32768 in later kernels(
65 * i) unlike linux aio, you *can* register more then the limit 68 * i) unlike linux aio, you *can* register more then the limit
66 * of fd events, and the kernel will "gracefully" signal an 69 * of fd events, and the kernel will "gracefully" signal an
67 * overflow, after which you could destroy and recreate the kernel 70 * overflow, after which you could destroy and recreate the kernel
68 * state, a bit bigger, or fall back to e.g. poll. thats not 71 * state, a bit bigger, or fall back to e.g. poll. thats not
69 * totally insane, but kind of questions the point a high 72 * totally insane, but kind of questions the point a high
70 * performance I/O framework when it doesn't really work 73 * performance I/O framework when it doesn't really work
71 * under stress. 74 * under stress.
75 * (FIXME: iouring should no longer drop events, need to investigate)
72 * j) but, oh my! is has exactly the same bugs as the linux aio backend, 76 * j) but, oh my! is has exactly the same bugs as the linux aio backend,
73 * where some undocumented poll combinations just fail. 77 * where some undocumented poll combinations just fail.
74 * so we need epoll AGAIN as a fallback. AGAIN! epoll!! and of course, 78 * so we need epoll AGAIN as a fallback. AGAIN! epoll!! and of course,
75 * this is completely undocumented, have I mantioned this already? 79 * this is completely undocumented, have I mantioned this already?
76 * k) overall, the *API* itself is, I dare to say, not a total trainwreck. 80 * k) overall, the *API* itself is, I dare to say, not a total trainwreck.
77 * the big isuess with it are the bugs requiring epoll, which might 81 * the big isuess with it are the bugs requiring epoll, which might
78 * or might not get fixed (do I hold my breath?). 82 * or might not get fixed (do I hold my breath?).
79 */ 83 */
80 84
85/* TODO: use internal TIMEOUT */
86/* TODO: take advantage of single mmap, NODROP etc. */
87/* TODO: resize cq/sq size independently */
88
81#include <sys/timerfd.h> 89#include <sys/timerfd.h>
82#include <sys/mman.h> 90#include <sys/mman.h>
83#include <poll.h> 91#include <poll.h>
92#include <stdint.h>
84 93
85#define IOURING_INIT_ENTRIES 32 94#define IOURING_INIT_ENTRIES 32
86 95
87/*****************************************************************************/ 96/*****************************************************************************/
88/* syscall wrapdadoop - this section has the raw api/abi definitions */ 97/* syscall wrapdadoop - this section has the raw api/abi definitions */
96{ 105{
97 __u8 opcode; 106 __u8 opcode;
98 __u8 flags; 107 __u8 flags;
99 __u16 ioprio; 108 __u16 ioprio;
100 __s32 fd; 109 __s32 fd;
110 union {
101 __u64 off; 111 __u64 off;
112 __u64 addr2;
113 };
102 __u64 addr; 114 __u64 addr;
103 __u32 len; 115 __u32 len;
104 union { 116 union {
105 __kernel_rwf_t rw_flags; 117 __kernel_rwf_t rw_flags;
106 __u32 fsync_flags; 118 __u32 fsync_flags;
107 __u16 poll_events; 119 __u16 poll_events;
108 __u32 sync_range_flags; 120 __u32 sync_range_flags;
109 __u32 msg_flags; 121 __u32 msg_flags;
122 __u32 timeout_flags;
123 __u32 accept_flags;
124 __u32 cancel_flags;
125 __u32 open_flags;
126 __u32 statx_flags;
110 }; 127 };
111 __u64 user_data; 128 __u64 user_data;
112 union { 129 union {
113 __u16 buf_index; 130 __u16 buf_index;
114 __u64 __pad2[3]; 131 __u64 __pad2[3];
151 __u32 sq_entries; 168 __u32 sq_entries;
152 __u32 cq_entries; 169 __u32 cq_entries;
153 __u32 flags; 170 __u32 flags;
154 __u32 sq_thread_cpu; 171 __u32 sq_thread_cpu;
155 __u32 sq_thread_idle; 172 __u32 sq_thread_idle;
173 __u32 features;
156 __u32 resv[5]; 174 __u32 resv[4];
157 struct io_sqring_offsets sq_off; 175 struct io_sqring_offsets sq_off;
158 struct io_cqring_offsets cq_off; 176 struct io_cqring_offsets cq_off;
159}; 177};
160 178
179#define IORING_SETUP_CQSIZE 0x00000008
180
161#define IORING_OP_POLL_ADD 6 181#define IORING_OP_POLL_ADD 6
162#define IORING_OP_POLL_REMOVE 7 182#define IORING_OP_POLL_REMOVE 7
183#define IORING_OP_TIMEOUT 11
184#define IORING_OP_TIMEOUT_REMOVE 12
185
186/* relative or absolute, reference clock is CLOCK_MONOTONIC */
187struct iouring_kernel_timespec
188{
189 int64_t tv_sec;
190 long long tv_nsec;
191};
192
193#define IORING_TIMEOUT_ABS 0x00000001
163 194
164#define IORING_ENTER_GETEVENTS 0x01 195#define IORING_ENTER_GETEVENTS 0x01
165 196
166#define IORING_OFF_SQ_RING 0x00000000ULL 197#define IORING_OFF_SQ_RING 0x00000000ULL
167#define IORING_OFF_CQ_RING 0x08000000ULL 198#define IORING_OFF_CQ_RING 0x08000000ULL
168#define IORING_OFF_SQES 0x10000000ULL 199#define IORING_OFF_SQES 0x10000000ULL
200
201#define IORING_FEAT_SINGLE_MMAP 0x00000001
202#define IORING_FEAT_NODROP 0x00000002
203#define IORING_FEAT_SUBMIT_STABLE 0x00000004
169 204
170inline_size 205inline_size
171int 206int
172evsys_io_uring_setup (unsigned entries, struct io_uring_params *params) 207evsys_io_uring_setup (unsigned entries, struct io_uring_params *params)
173{ 208{
236iouring_tfd_cb (EV_P_ struct ev_io *w, int revents) 271iouring_tfd_cb (EV_P_ struct ev_io *w, int revents)
237{ 272{
238 iouring_tfd_to = EV_TSTAMP_HUGE; 273 iouring_tfd_to = EV_TSTAMP_HUGE;
239} 274}
240 275
241static void
242iouring_epoll_cb (EV_P_ struct ev_io *w, int revents)
243{
244 epoll_poll (EV_A_ 0);
245}
246
247/* called for full and partial cleanup */ 276/* called for full and partial cleanup */
248ecb_cold 277ecb_cold
249static int 278static int
250iouring_internal_destroy (EV_P) 279iouring_internal_destroy (EV_P)
251{ 280{
254 283
255 if (iouring_sq_ring != MAP_FAILED) munmap (iouring_sq_ring, iouring_sq_ring_size); 284 if (iouring_sq_ring != MAP_FAILED) munmap (iouring_sq_ring, iouring_sq_ring_size);
256 if (iouring_cq_ring != MAP_FAILED) munmap (iouring_cq_ring, iouring_cq_ring_size); 285 if (iouring_cq_ring != MAP_FAILED) munmap (iouring_cq_ring, iouring_cq_ring_size);
257 if (iouring_sqes != MAP_FAILED) munmap (iouring_sqes , iouring_sqes_size ); 286 if (iouring_sqes != MAP_FAILED) munmap (iouring_sqes , iouring_sqes_size );
258 287
259 if (ev_is_active (&iouring_epoll_w)) ev_ref (EV_A); ev_io_stop (EV_A_ &iouring_epoll_w); 288 if (ev_is_active (&iouring_tfd_w))
260 if (ev_is_active (&iouring_tfd_w )) ev_ref (EV_A); ev_io_stop (EV_A_ &iouring_tfd_w ); 289 {
290 ev_ref (EV_A);
291 ev_io_stop (EV_A_ &iouring_tfd_w);
292 }
261} 293}
262 294
263ecb_cold 295ecb_cold
264static int 296static int
265iouring_internal_init (EV_P) 297iouring_internal_init (EV_P)
271 iouring_tfd = -1; 303 iouring_tfd = -1;
272 iouring_sq_ring = MAP_FAILED; 304 iouring_sq_ring = MAP_FAILED;
273 iouring_cq_ring = MAP_FAILED; 305 iouring_cq_ring = MAP_FAILED;
274 iouring_sqes = MAP_FAILED; 306 iouring_sqes = MAP_FAILED;
275 307
308 if (!have_monotonic) /* cannot really happen, but what if11 */
309 return -1;
310
276 for (;;) 311 for (;;)
277 { 312 {
278 iouring_fd = evsys_io_uring_setup (iouring_entries, &params); 313 iouring_fd = evsys_io_uring_setup (iouring_entries, &params);
279 314
280 if (iouring_fd >= 0) 315 if (iouring_fd >= 0)
281 break; /* yippie */ 316 break; /* yippie */
282 317
283 if (errno != EINVAL) 318 if (errno != EINVAL)
284 return -1; /* we failed */ 319 return -1; /* we failed */
320
321#if TODO
322 if ((~params.features) & (IORING_FEAT_NODROP | IORING_FEATURE_SINGLE_MMAP))
323 return -1; /* we require the above features */
324#endif
285 325
286 /* EINVAL: lots of possible reasons, but maybe 326 /* EINVAL: lots of possible reasons, but maybe
287 * it is because we hit the unqueryable hardcoded size limit 327 * it is because we hit the unqueryable hardcoded size limit
288 */ 328 */
289 329
342 iouring_internal_destroy (EV_A); 382 iouring_internal_destroy (EV_A);
343 383
344 while (iouring_internal_init (EV_A) < 0) 384 while (iouring_internal_init (EV_A) < 0)
345 ev_syserr ("(libev) io_uring_setup"); 385 ev_syserr ("(libev) io_uring_setup");
346 386
347 /* forking epoll should also effectively unregister all fds from the backend */
348 epoll_fork (EV_A);
349 /* epoll_fork already did this. hopefully */
350 /*fd_rearm_all (EV_A);*/ 387 fd_rearm_all (EV_A);
351
352 ev_io_stop (EV_A_ &iouring_epoll_w);
353 ev_io_set (EV_A_ &iouring_epoll_w, backend_fd, EV_READ);
354 ev_io_start (EV_A_ &iouring_epoll_w);
355 388
356 ev_io_stop (EV_A_ &iouring_tfd_w); 389 ev_io_stop (EV_A_ &iouring_tfd_w);
357 ev_io_set (EV_A_ &iouring_tfd_w, iouring_tfd, EV_READ); 390 ev_io_set (EV_A_ &iouring_tfd_w, iouring_tfd, EV_READ);
358 ev_io_start (EV_A_ &iouring_tfd_w); 391 ev_io_start (EV_A_ &iouring_tfd_w);
359} 392}
361/*****************************************************************************/ 394/*****************************************************************************/
362 395
363static void 396static void
364iouring_modify (EV_P_ int fd, int oev, int nev) 397iouring_modify (EV_P_ int fd, int oev, int nev)
365{ 398{
366 fprintf (stderr,"modify %d (%d, %d) %d\n", fd, oev,nev, anfds[fd].eflags);//D
367 if (ecb_expect_false (anfds [fd].eflags))
368 {
369 /* we handed this fd over to epoll, so undo this first */
370 /* we do it manually because the optimisations on epoll_modify won't do us any good */
371 epoll_ctl (iouring_fd, EPOLL_CTL_DEL, fd, 0);
372 anfds [fd].eflags = 0;
373 oev = 0;
374 }
375
376 if (oev) 399 if (oev)
377 { 400 {
378 /* we assume the sqe's are all "properly" initialised */ 401 /* we assume the sqe's are all "properly" initialised */
379 struct io_uring_sqe *sqe = iouring_sqe_get (EV_A); 402 struct io_uring_sqe *sqe = iouring_sqe_get (EV_A);
380 sqe->opcode = IORING_OP_POLL_REMOVE; 403 sqe->opcode = IORING_OP_POLL_REMOVE;
381 sqe->fd = fd; 404 sqe->fd = fd;
382 sqe->user_data = -1; 405 /* Jens Axboe notified me that user_data is not what is documented, but is
406 * some kind of unique ID that has to match, otherwise the request cannot
407 * be removed. Since we don't *really* have that, we pass in the old
408 * generation counter - if that fails, too bad, it will hopefully be removed
409 * at close time and then be ignored. */
410 sqe->user_data = (uint32_t)fd | ((__u64)(uint32_t)anfds [fd].egen << 32);
383 iouring_sqe_submit (EV_A_ sqe); 411 iouring_sqe_submit (EV_A_ sqe);
384 412
385 /* increment generation counter to avoid handling old events */ 413 /* increment generation counter to avoid handling old events */
386 ++anfds [fd].egen; 414 ++anfds [fd].egen;
387 } 415 }
428{ 456{
429 int fd = cqe->user_data & 0xffffffffU; 457 int fd = cqe->user_data & 0xffffffffU;
430 uint32_t gen = cqe->user_data >> 32; 458 uint32_t gen = cqe->user_data >> 32;
431 int res = cqe->res; 459 int res = cqe->res;
432 460
433 /* ignore fd removal events, if there are any. TODO: verify */
434 if (cqe->user_data == (__u64)-1)
435 abort ();//D
436
437 assert (("libev: io_uring fd must be in-bounds", fd >= 0 && fd < anfdmax)); 461 assert (("libev: io_uring fd must be in-bounds", fd >= 0 && fd < anfdmax));
438 462
439 /* documentation lies, of course. the result value is NOT like 463 /* documentation lies, of course. the result value is NOT like
440 * normal syscalls, but like linux raw syscalls, i.e. negative 464 * normal syscalls, but like linux raw syscalls, i.e. negative
441 * error numbers. fortunate, as otherwise there would be no way 465 * error numbers. fortunate, as otherwise there would be no way
442 * to get error codes at all. still, why not document this? 466 * to get error codes at all. still, why not document this?
443 */ 467 */
444 468
445 /* ignore event if generation doesn't match */ 469 /* ignore event if generation doesn't match */
470 /* other than skipping removal events, */
446 /* this should actually be very rare */ 471 /* this should actually be very rare */
447 if (ecb_expect_false (gen != (uint32_t)anfds [fd].egen)) 472 if (ecb_expect_false (gen != (uint32_t)anfds [fd].egen))
448 return; 473 return;
449 474
450 if (ecb_expect_false (res < 0)) 475 if (ecb_expect_false (res < 0))
451 { 476 {
452 if (res == -EINVAL) 477 /*TODO: EINVAL handling (was something failed with this fd)*/
453 { 478 /*TODO: EBUSY happens when?*/
454 /* we assume this error code means the fd/poll combination is buggy
455 * and fall back to epoll.
456 * this error code might also indicate a bug, but the kernel doesn't
457 * distinguish between those two conditions, so... sigh...
458 */
459 479
460 epoll_modify (EV_A_ fd, 0, anfds [fd].events);
461 }
462 else if (res == -EBADF) 480 if (res == -EBADF)
463 { 481 {
464 assert (("libev: event loop rejected bad fd", res != -EBADF)); 482 assert (("libev: event loop rejected bad fd", res != -EBADF));
465 fd_kill (EV_A_ fd); 483 fd_kill (EV_A_ fd);
466 } 484 }
467 else 485 else
471 } 489 }
472 490
473 return; 491 return;
474 } 492 }
475 493
476 fprintf (stderr, "fd %d event, rearm\n", fd);//D
477
478 /* feed events, we do not expect or handle POLLNVAL */ 494 /* feed events, we do not expect or handle POLLNVAL */
479 fd_event ( 495 fd_event (
480 EV_A_ 496 EV_A_
481 fd, 497 fd,
482 (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) 498 (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0)
495iouring_overflow (EV_P) 511iouring_overflow (EV_P)
496{ 512{
497 /* we have two options, resize the queue (by tearing down 513 /* we have two options, resize the queue (by tearing down
498 * everything and recreating it, or living with it 514 * everything and recreating it, or living with it
499 * and polling. 515 * and polling.
500 * we implement this by resizing tghe queue, and, if that fails, 516 * we implement this by resizing the queue, and, if that fails,
501 * we just recreate the state on every failure, which 517 * we just recreate the state on every failure, which
502 * kind of is a very inefficient poll. 518 * kind of is a very inefficient poll.
503 * one danger is, due to the bios toward lower fds, 519 * one danger is, due to the bios toward lower fds,
504 * we will only really get events for those, so 520 * we will only really get events for those, so
505 * maybe we need a poll() fallback, after all. 521 * maybe we need a poll() fallback, after all.
517 else 533 else
518 { 534 {
519 /* we hit the kernel limit, we should fall back to something else. 535 /* we hit the kernel limit, we should fall back to something else.
520 * we can either poll() a few times and hope for the best, 536 * we can either poll() a few times and hope for the best,
521 * poll always, or switch to epoll. 537 * poll always, or switch to epoll.
522 * since we use epoll anyways, go epoll. 538 * TODO: is this necessary with newer kernels?
523 */ 539 */
524 540
525 iouring_internal_destroy (EV_A); 541 iouring_internal_destroy (EV_A);
526 542
527 /* this should make it so that on return, we don'T call any uring functions */ 543 /* this should make it so that on return, we don't call any uring functions */
528 iouring_to_submit = 0; 544 iouring_to_submit = 0;
529 545
530 for (;;) 546 for (;;)
531 { 547 {
532 backend = epoll_init (EV_A_ 0); 548 backend = epoll_init (EV_A_ 0);
579 timeout = EV_TS_CONST (0.); 595 timeout = EV_TS_CONST (0.);
580 else 596 else
581 /* no events, so maybe wait for some */ 597 /* no events, so maybe wait for some */
582 iouring_tfd_update (EV_A_ timeout); 598 iouring_tfd_update (EV_A_ timeout);
583 599
584 /* only enter the kernel if we have somethign to submit, or we need to wait */ 600 /* only enter the kernel if we have something to submit, or we need to wait */
585 if (timeout || iouring_to_submit) 601 if (timeout || iouring_to_submit)
586 { 602 {
587 int res; 603 int res;
588 604
589 EV_RELEASE_CB; 605 EV_RELEASE_CB;
606 622
607inline_size 623inline_size
608int 624int
609iouring_init (EV_P_ int flags) 625iouring_init (EV_P_ int flags)
610{ 626{
611 if (!epoll_init (EV_A_ 0))
612 return 0;
613
614 ev_io_init (EV_A_ &iouring_epoll_w, iouring_epoll_cb, backend_fd, EV_READ);
615 ev_set_priority (&iouring_epoll_w, EV_MAXPRI);
616
617 ev_io_init (&iouring_tfd_w, iouring_tfd_cb, iouring_tfd, EV_READ);
618 ev_set_priority (&iouring_tfd_w, EV_MAXPRI);
619
620 iouring_entries = IOURING_INIT_ENTRIES; 627 iouring_entries = IOURING_INIT_ENTRIES;
621 iouring_max_entries = 0; 628 iouring_max_entries = 0;
622 629
623 if (iouring_internal_init (EV_A) < 0) 630 if (iouring_internal_init (EV_A) < 0)
624 { 631 {
625 iouring_internal_destroy (EV_A); 632 iouring_internal_destroy (EV_A);
626 return 0; 633 return 0;
627 } 634 }
628 635
629 ev_io_start (EV_A_ &iouring_epoll_w); 636 ev_io_init (&iouring_tfd_w, iouring_tfd_cb, iouring_tfd, EV_READ);
630 ev_unref (EV_A); /* watcher should not keep loop alive */ 637 ev_set_priority (&iouring_tfd_w, EV_MINPRI);
631
632 ev_io_start (EV_A_ &iouring_tfd_w); 638 ev_io_start (EV_A_ &iouring_tfd_w);
633 ev_unref (EV_A); /* watcher should not keep loop alive */ 639 ev_unref (EV_A); /* watcher should not keep loop alive */
634 640
635 backend_modify = iouring_modify; 641 backend_modify = iouring_modify;
636 backend_poll = iouring_poll; 642 backend_poll = iouring_poll;
641inline_size 647inline_size
642void 648void
643iouring_destroy (EV_P) 649iouring_destroy (EV_P)
644{ 650{
645 iouring_internal_destroy (EV_A); 651 iouring_internal_destroy (EV_A);
646 epoll_destroy (EV_A);
647} 652}
648 653

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines