ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev_iouring.c
Revision: 1.16
Committed: Sat Dec 28 07:37:07 2019 UTC (4 years, 6 months ago) by root
Content type: text/plain
Branch: MAIN
Changes since 1.15: +31 -19 lines
Log Message:
*** empty log message ***

File Contents

# Content
1 /*
2 * libev linux io_uring fd activity backend
3 *
4 * Copyright (c) 2019 Marc Alexander Lehmann <libev@schmorp.de>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without modifica-
8 * tion, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
19 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
20 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
21 * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
25 * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
26 * OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Alternatively, the contents of this file may be used under the terms of
29 * the GNU General Public License ("GPL") version 2 or any later version,
30 * in which case the provisions of the GPL are applicable instead of
31 * the above. If you wish to allow the use of your version of this file
32 * only under the terms of the GPL and not to allow others to use your
33 * version of this file under the BSD license, indicate your decision
34 * by deleting the provisions above and replace them with the notice
35 * and other provisions required by the GPL. If you do not delete the
36 * provisions above, a recipient may use your version of this file under
37 * either the BSD or the GPL.
38 */
39
40 /*
41 * general notes about linux io_uring:
42 *
43 * a) it's the best interface I have seen so far. on linux.
44 * b) best is not necessarily very good.
45 * c) it's better than the aio mess, doesn't suffer from the fork problems
46 * of linux aio or epoll and so on and so on. and you could do event stuff
47 * without any syscalls. what's not to like?
48 * d) ok, it's vastly more complex, but that's ok, really.
49 * e) why 3 mmaps instead of one? one would be more space-efficient,
50 * and I can't see what benefit three would have (other than being
51 * somehow resizable/relocatable, but that's apparently not possible).
52 * (FIXME: newer kernels can use 2 mmaps only, need to look into this).
53 * f) hmm, it's practiclaly undebuggable (gdb can't access the memory, and
54 * the bizarre way structure offsets are communicated makes it hard to
55 * just print the ring buffer heads, even *iff* the memory were visible
56 * in gdb. but then, that's also ok, really.
57 * g) well, you cannot specify a timeout when waiting for events. no,
58 * seriously, the interface doesn't support a timeout. never seen _that_
59 * before. sure, you can use a timerfd, but that's another syscall
60 * you could have avoided. overall, this bizarre omission smells
61 * like a ยต-optimisation by the io_uring author for his personal
62 * applications, to the detriment of everybody else who just wants
63 * an event loop. but, umm, ok, if that's all, it could be worse.
64 * (FIXME: jens mentioned timeout commands, need to investigate)
65 * h) there is a hardcoded limit of 4096 outstanding events. okay,
66 * at least there is no arbitrary low system-wide limit...
67 * (FIXME: apparently, this was increased to 32768 in later kernels(
68 * i) unlike linux aio, you *can* register more then the limit
69 * of fd events, and the kernel will "gracefully" signal an
70 * overflow, after which you could destroy and recreate the kernel
71 * state, a bit bigger, or fall back to e.g. poll. thats not
72 * totally insane, but kind of questions the point a high
73 * performance I/O framework when it doesn't really work
74 * under stress.
75 * (FIXME: iouring should no longer drop events, need to investigate)
76 * j) but, oh my! is has exactly the same bugs as the linux aio backend,
77 * where some undocumented poll combinations just fail.
78 * so we need epoll AGAIN as a fallback. AGAIN! epoll!! and of course,
79 * this is completely undocumented, have I mantioned this already?
80 * k) overall, the *API* itself is, I dare to say, not a total trainwreck.
81 * the big isuess with it are the bugs requiring epoll, which might
82 * or might not get fixed (do I hold my breath?).
83 */
84
85 /* TODO: use internal TIMEOUT */
86 /* TODO: take advantage of single mmap, NODROP etc. */
87 /* TODO: resize cq/sq size independently */
88
89 #include <sys/timerfd.h>
90 #include <sys/mman.h>
91 #include <poll.h>
92 #include <stdint.h>
93
94 #define IOURING_INIT_ENTRIES 32
95
96 /*****************************************************************************/
97 /* syscall wrapdadoop - this section has the raw api/abi definitions */
98
99 #include <linux/fs.h>
100 #include <linux/types.h>
101
102 /* mostly directly taken from the kernel or documentation */
103
104 struct io_uring_sqe
105 {
106 __u8 opcode;
107 __u8 flags;
108 __u16 ioprio;
109 __s32 fd;
110 union {
111 __u64 off;
112 __u64 addr2;
113 };
114 __u64 addr;
115 __u32 len;
116 union {
117 __kernel_rwf_t rw_flags;
118 __u32 fsync_flags;
119 __u16 poll_events;
120 __u32 sync_range_flags;
121 __u32 msg_flags;
122 __u32 timeout_flags;
123 __u32 accept_flags;
124 __u32 cancel_flags;
125 __u32 open_flags;
126 __u32 statx_flags;
127 };
128 __u64 user_data;
129 union {
130 __u16 buf_index;
131 __u64 __pad2[3];
132 };
133 };
134
135 struct io_uring_cqe
136 {
137 __u64 user_data;
138 __s32 res;
139 __u32 flags;
140 };
141
142 struct io_sqring_offsets
143 {
144 __u32 head;
145 __u32 tail;
146 __u32 ring_mask;
147 __u32 ring_entries;
148 __u32 flags;
149 __u32 dropped;
150 __u32 array;
151 __u32 resv1;
152 __u64 resv2;
153 };
154
155 struct io_cqring_offsets
156 {
157 __u32 head;
158 __u32 tail;
159 __u32 ring_mask;
160 __u32 ring_entries;
161 __u32 overflow;
162 __u32 cqes;
163 __u64 resv[2];
164 };
165
166 struct io_uring_params
167 {
168 __u32 sq_entries;
169 __u32 cq_entries;
170 __u32 flags;
171 __u32 sq_thread_cpu;
172 __u32 sq_thread_idle;
173 __u32 features;
174 __u32 resv[4];
175 struct io_sqring_offsets sq_off;
176 struct io_cqring_offsets cq_off;
177 };
178
179 #define IORING_SETUP_CQSIZE 0x00000008
180
181 #define IORING_OP_POLL_ADD 6
182 #define IORING_OP_POLL_REMOVE 7
183 #define IORING_OP_TIMEOUT 11
184 #define IORING_OP_TIMEOUT_REMOVE 12
185
186 /* relative or absolute, reference clock is CLOCK_MONOTONIC */
187 struct iouring_kernel_timespec
188 {
189 int64_t tv_sec;
190 long long tv_nsec;
191 };
192
193 #define IORING_TIMEOUT_ABS 0x00000001
194
195 #define IORING_ENTER_GETEVENTS 0x01
196
197 #define IORING_OFF_SQ_RING 0x00000000ULL
198 #define IORING_OFF_CQ_RING 0x08000000ULL
199 #define IORING_OFF_SQES 0x10000000ULL
200
201 #define IORING_FEAT_SINGLE_MMAP 0x00000001
202 #define IORING_FEAT_NODROP 0x00000002
203 #define IORING_FEAT_SUBMIT_STABLE 0x00000004
204
205 inline_size
206 int
207 evsys_io_uring_setup (unsigned entries, struct io_uring_params *params)
208 {
209 return ev_syscall2 (SYS_io_uring_setup, entries, params);
210 }
211
212 inline_size
213 int
214 evsys_io_uring_enter (int fd, unsigned to_submit, unsigned min_complete, unsigned flags, const sigset_t *sig, size_t sigsz)
215 {
216 return ev_syscall6 (SYS_io_uring_enter, fd, to_submit, min_complete, flags, sig, sigsz);
217 }
218
219 /*****************************************************************************/
220 /* actual backed implementation */
221
222 /* we hope that volatile will make the compiler access this variables only once */
223 #define EV_SQ_VAR(name) *(volatile unsigned *)((char *)iouring_sq_ring + iouring_sq_ ## name)
224 #define EV_CQ_VAR(name) *(volatile unsigned *)((char *)iouring_cq_ring + iouring_cq_ ## name)
225
226 /* the index array */
227 #define EV_SQ_ARRAY ((unsigned *)((char *)iouring_sq_ring + iouring_sq_array))
228
229 /* the submit/completion queue entries */
230 #define EV_SQES ((struct io_uring_sqe *) iouring_sqes)
231 #define EV_CQES ((struct io_uring_cqe *)((char *)iouring_cq_ring + iouring_cq_cqes))
232
233 inline_speed
234 int
235 iouring_enter (EV_P_ ev_tstamp timeout)
236 {
237 int res;
238
239 EV_RELEASE_CB;
240
241 res = evsys_io_uring_enter (iouring_fd, iouring_to_submit, 1,
242 timeout > EV_TS_CONST (0.) ? IORING_ENTER_GETEVENTS : 0, 0, 0);
243
244 assert (("libev: io_uring_enter did not consume all sqes", (res < 0 || res == iouring_to_submit)));
245
246 iouring_to_submit = 0;
247
248 EV_ACQUIRE_CB;
249
250 return res;
251 }
252
253 /* TODO: can we move things around so we don't need this forward-reference? */
254 static void
255 iouring_poll (EV_P_ ev_tstamp timeout);
256
257 static
258 struct io_uring_sqe *
259 iouring_sqe_get (EV_P)
260 {
261 unsigned tail;
262
263 for (;;)
264 {
265 tail = EV_SQ_VAR (tail);
266
267 if (ecb_expect_true (tail + 1 - EV_SQ_VAR (head) <= EV_SQ_VAR (ring_entries)))
268 break; /* whats the problem, we have free sqes */
269
270 /* queue full, need to flush and possibly handle some events */
271
272 #if EV_FEATURE_CODE
273 /* first we ask the kernel nicely, most often this frees up some sqes */
274 int res = iouring_enter (EV_A_ EV_TS_CONST (0.));
275
276 ECB_MEMORY_FENCE_ACQUIRE; /* better safe than sorry */
277
278 if (res >= 0)
279 continue; /* yes, it worked, try again */
280 #endif
281
282 /* some problem, possibly EBUSY - do the full poll and let it handle any issues */
283
284 iouring_poll (EV_A_ EV_TS_CONST (0.));
285 /* iouring_poll should have done ECB_MEMORY_FENCE_ACQUIRE for us */
286 }
287
288 /*assert (("libev: io_uring queue full after flush", tail + 1 - EV_SQ_VAR (head) <= EV_SQ_VAR (ring_entries)));*/
289
290 return EV_SQES + (tail & EV_SQ_VAR (ring_mask));
291 }
292
293 inline_size
294 struct io_uring_sqe *
295 iouring_sqe_submit (EV_P_ struct io_uring_sqe *sqe)
296 {
297 unsigned idx = sqe - EV_SQES;
298
299 EV_SQ_ARRAY [idx] = idx;
300 ECB_MEMORY_FENCE_RELEASE;
301 ++EV_SQ_VAR (tail);
302 /*ECB_MEMORY_FENCE_RELEASE; /* for the time being we assume this is not needed */
303 ++iouring_to_submit;
304 }
305
306 /*****************************************************************************/
307
308 /* when the timerfd expires we simply note the fact,
309 * as the purpose of the timerfd is to wake us up, nothing else.
310 * the next iteration should re-set it.
311 */
312 static void
313 iouring_tfd_cb (EV_P_ struct ev_io *w, int revents)
314 {
315 iouring_tfd_to = EV_TSTAMP_HUGE;
316 }
317
318 /* called for full and partial cleanup */
319 ecb_cold
320 static int
321 iouring_internal_destroy (EV_P)
322 {
323 close (iouring_tfd);
324 close (iouring_fd);
325
326 if (iouring_sq_ring != MAP_FAILED) munmap (iouring_sq_ring, iouring_sq_ring_size);
327 if (iouring_cq_ring != MAP_FAILED) munmap (iouring_cq_ring, iouring_cq_ring_size);
328 if (iouring_sqes != MAP_FAILED) munmap (iouring_sqes , iouring_sqes_size );
329
330 if (ev_is_active (&iouring_tfd_w))
331 {
332 ev_ref (EV_A);
333 ev_io_stop (EV_A_ &iouring_tfd_w);
334 }
335 }
336
337 ecb_cold
338 static int
339 iouring_internal_init (EV_P)
340 {
341 struct io_uring_params params = { 0 };
342
343 iouring_to_submit = 0;
344
345 iouring_tfd = -1;
346 iouring_sq_ring = MAP_FAILED;
347 iouring_cq_ring = MAP_FAILED;
348 iouring_sqes = MAP_FAILED;
349
350 if (!have_monotonic) /* cannot really happen, but what if11 */
351 return -1;
352
353 for (;;)
354 {
355 iouring_fd = evsys_io_uring_setup (iouring_entries, &params);
356
357 if (iouring_fd >= 0)
358 break; /* yippie */
359
360 if (errno != EINVAL)
361 return -1; /* we failed */
362
363 #if TODO
364 if ((~params.features) & (IORING_FEAT_NODROP | IORING_FEATURE_SINGLE_MMAP))
365 return -1; /* we require the above features */
366 #endif
367
368 /* EINVAL: lots of possible reasons, but maybe
369 * it is because we hit the unqueryable hardcoded size limit
370 */
371
372 /* we hit the limit already, give up */
373 if (iouring_max_entries)
374 return -1;
375
376 /* first time we hit EINVAL? assume we hit the limit, so go back and retry */
377 iouring_entries >>= 1;
378 iouring_max_entries = iouring_entries;
379 }
380
381 iouring_sq_ring_size = params.sq_off.array + params.sq_entries * sizeof (unsigned);
382 iouring_cq_ring_size = params.cq_off.cqes + params.cq_entries * sizeof (struct io_uring_cqe);
383 iouring_sqes_size = params.sq_entries * sizeof (struct io_uring_sqe);
384
385 iouring_sq_ring = mmap (0, iouring_sq_ring_size, PROT_READ | PROT_WRITE,
386 MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_SQ_RING);
387 iouring_cq_ring = mmap (0, iouring_cq_ring_size, PROT_READ | PROT_WRITE,
388 MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_CQ_RING);
389 iouring_sqes = mmap (0, iouring_sqes_size, PROT_READ | PROT_WRITE,
390 MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_SQES);
391
392 if (iouring_sq_ring == MAP_FAILED || iouring_cq_ring == MAP_FAILED || iouring_sqes == MAP_FAILED)
393 return -1;
394
395 iouring_sq_head = params.sq_off.head;
396 iouring_sq_tail = params.sq_off.tail;
397 iouring_sq_ring_mask = params.sq_off.ring_mask;
398 iouring_sq_ring_entries = params.sq_off.ring_entries;
399 iouring_sq_flags = params.sq_off.flags;
400 iouring_sq_dropped = params.sq_off.dropped;
401 iouring_sq_array = params.sq_off.array;
402
403 iouring_cq_head = params.cq_off.head;
404 iouring_cq_tail = params.cq_off.tail;
405 iouring_cq_ring_mask = params.cq_off.ring_mask;
406 iouring_cq_ring_entries = params.cq_off.ring_entries;
407 iouring_cq_overflow = params.cq_off.overflow;
408 iouring_cq_cqes = params.cq_off.cqes;
409
410 iouring_tfd = timerfd_create (CLOCK_MONOTONIC, TFD_CLOEXEC);
411
412 if (iouring_tfd < 0)
413 return iouring_tfd;
414
415 iouring_tfd_to = EV_TSTAMP_HUGE;
416
417 return 0;
418 }
419
420 ecb_cold
421 static void
422 iouring_fork (EV_P)
423 {
424 iouring_internal_destroy (EV_A);
425
426 while (iouring_internal_init (EV_A) < 0)
427 ev_syserr ("(libev) io_uring_setup");
428
429 fd_rearm_all (EV_A);
430
431 ev_io_stop (EV_A_ &iouring_tfd_w);
432 ev_io_set (EV_A_ &iouring_tfd_w, iouring_tfd, EV_READ);
433 ev_io_start (EV_A_ &iouring_tfd_w);
434 }
435
436 /*****************************************************************************/
437
438 static void
439 iouring_modify (EV_P_ int fd, int oev, int nev)
440 {
441 if (oev)
442 {
443 /* we assume the sqe's are all "properly" initialised */
444 struct io_uring_sqe *sqe = iouring_sqe_get (EV_A);
445 sqe->opcode = IORING_OP_POLL_REMOVE;
446 sqe->fd = fd;
447 /* Jens Axboe notified me that user_data is not what is documented, but is
448 * some kind of unique ID that has to match, otherwise the request cannot
449 * be removed. Since we don't *really* have that, we pass in the old
450 * generation counter - if that fails, too bad, it will hopefully be removed
451 * at close time and then be ignored. */
452 sqe->addr = (uint32_t)fd | ((__u64)(uint32_t)anfds [fd].egen << 32);
453 sqe->user_data = (uint64_t)-1;
454 iouring_sqe_submit (EV_A_ sqe);
455
456 /* increment generation counter to avoid handling old events */
457 ++anfds [fd].egen;
458 }
459
460 if (nev)
461 {
462 struct io_uring_sqe *sqe = iouring_sqe_get (EV_A);
463 sqe->opcode = IORING_OP_POLL_ADD;
464 sqe->fd = fd;
465 sqe->user_data = (uint32_t)fd | ((__u64)(uint32_t)anfds [fd].egen << 32);
466 sqe->poll_events =
467 (nev & EV_READ ? POLLIN : 0)
468 | (nev & EV_WRITE ? POLLOUT : 0);
469 iouring_sqe_submit (EV_A_ sqe);
470 }
471 }
472
473 inline_size
474 void
475 iouring_tfd_update (EV_P_ ev_tstamp timeout)
476 {
477 ev_tstamp tfd_to = mn_now + timeout;
478
479 /* we assume there will be many iterations per timer change, so
480 * we only re-set the timerfd when we have to because its expiry
481 * is too late.
482 */
483 if (ecb_expect_false (tfd_to < iouring_tfd_to))
484 {
485 struct itimerspec its;
486
487 iouring_tfd_to = tfd_to;
488 EV_TS_SET (its.it_interval, 0.);
489 EV_TS_SET (its.it_value, tfd_to);
490
491 if (timerfd_settime (iouring_tfd, TFD_TIMER_ABSTIME, &its, 0) < 0)
492 assert (("libev: iouring timerfd_settime failed", 0));
493 }
494 }
495
496 inline_size
497 void
498 iouring_process_cqe (EV_P_ struct io_uring_cqe *cqe)
499 {
500 int fd = cqe->user_data & 0xffffffffU;
501 uint32_t gen = cqe->user_data >> 32;
502 int res = cqe->res;
503
504 /* user_data -1 is a remove that we are not atm. interested in */
505 if (cqe->user_data == (uint64_t)-1)
506 return;
507
508 assert (("libev: io_uring fd must be in-bounds", fd >= 0 && fd < anfdmax));
509
510 /* documentation lies, of course. the result value is NOT like
511 * normal syscalls, but like linux raw syscalls, i.e. negative
512 * error numbers. fortunate, as otherwise there would be no way
513 * to get error codes at all. still, why not document this?
514 */
515
516 /* ignore event if generation doesn't match */
517 /* other than skipping removal events, */
518 /* this should actually be very rare */
519 if (ecb_expect_false (gen != (uint32_t)anfds [fd].egen))
520 return;
521
522 if (ecb_expect_false (res < 0))
523 {
524 /*TODO: EINVAL handling (was something failed with this fd)*/
525 /*TODO: EBUSY happens when?*/
526
527 if (res == -EBADF)
528 {
529 assert (("libev: event loop rejected bad fd", res != -EBADF));
530 fd_kill (EV_A_ fd);
531 }
532 else
533 {
534 errno = -res;
535 ev_syserr ("(libev) IORING_OP_POLL_ADD");
536 }
537
538 return;
539 }
540
541 /* feed events, we do not expect or handle POLLNVAL */
542 fd_event (
543 EV_A_
544 fd,
545 (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0)
546 | (res & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0)
547 );
548
549 /* io_uring is oneshot, so we need to re-arm the fd next iteration */
550 /* this also means we usually have to do at least one syscall per iteration */
551 anfds [fd].events = 0;
552 fd_change (EV_A_ fd, EV_ANFD_REIFY);
553 }
554
555 /* called when the event queue overflows */
556 ecb_cold
557 static void
558 iouring_overflow (EV_P)
559 {
560 /* we have two options, resize the queue (by tearing down
561 * everything and recreating it, or living with it
562 * and polling.
563 * we implement this by resizing the queue, and, if that fails,
564 * we just recreate the state on every failure, which
565 * kind of is a very inefficient poll.
566 * one danger is, due to the bios toward lower fds,
567 * we will only really get events for those, so
568 * maybe we need a poll() fallback, after all.
569 */
570 /*EV_CQ_VAR (overflow) = 0;*/ /* need to do this if we keep the state and poll manually */
571
572 fd_rearm_all (EV_A);
573
574 /* we double the size until we hit the hard-to-probe maximum */
575 if (!iouring_max_entries)
576 {
577 iouring_entries <<= 1;
578 iouring_fork (EV_A);
579 }
580 else
581 {
582 /* we hit the kernel limit, we should fall back to something else.
583 * we can either poll() a few times and hope for the best,
584 * poll always, or switch to epoll.
585 * TODO: is this necessary with newer kernels?
586 */
587
588 iouring_internal_destroy (EV_A);
589
590 /* this should make it so that on return, we don't call any uring functions */
591 iouring_to_submit = 0;
592
593 for (;;)
594 {
595 backend = epoll_init (EV_A_ 0);
596
597 if (backend)
598 break;
599
600 ev_syserr ("(libev) iouring switch to epoll");
601 }
602 }
603 }
604
605 /* handle any events in the completion queue, return true if there were any */
606 static int
607 iouring_handle_cq (EV_P)
608 {
609 unsigned head, tail, mask;
610
611 head = EV_CQ_VAR (head);
612 ECB_MEMORY_FENCE_ACQUIRE;
613 tail = EV_CQ_VAR (tail);
614
615 if (head == tail)
616 return 0;
617
618 /* it can only overflow if we have events, yes, yes? */
619 if (ecb_expect_false (EV_CQ_VAR (overflow)))
620 {
621 iouring_overflow (EV_A);
622 return 1;
623 }
624
625 mask = EV_CQ_VAR (ring_mask);
626
627 do
628 iouring_process_cqe (EV_A_ &EV_CQES [head++ & mask]);
629 while (head != tail);
630
631 EV_CQ_VAR (head) = head;
632 ECB_MEMORY_FENCE_RELEASE;
633
634 return 1;
635 }
636
637 static void
638 iouring_poll (EV_P_ ev_tstamp timeout)
639 {
640 /* if we have events, no need for extra syscalls, but we might have to queue events */
641 /* we also clar the timeout if there are outstanding fdchanges */
642 /* the latter should only happen if both the sq and cq are full, most likely */
643 /* because we have a lot of event sources that immediately complete */
644 /* TODO: fdchacngecnt is always 0 because fd_reify does not have two buffers yet */
645 if (iouring_handle_cq (EV_A) || fdchangecnt)
646 timeout = EV_TS_CONST (0.);
647 else
648 /* no events, so maybe wait for some */
649 iouring_tfd_update (EV_A_ timeout);
650
651 /* only enter the kernel if we have something to submit, or we need to wait */
652 if (timeout || iouring_to_submit)
653 {
654 int res = iouring_enter (EV_A_ timeout);
655
656 if (ecb_expect_false (res < 0))
657 if (errno == EINTR)
658 /* ignore */;
659 else if (errno == EBUSY)
660 /* cq full, cannot submit - should be rare because we flush the cq first, so simply ignore */;
661 else
662 ev_syserr ("(libev) iouring setup");
663 else
664 iouring_handle_cq (EV_A);
665 }
666 }
667
668 inline_size
669 int
670 iouring_init (EV_P_ int flags)
671 {
672 iouring_entries = IOURING_INIT_ENTRIES;
673 iouring_max_entries = 0;
674
675 if (iouring_internal_init (EV_A) < 0)
676 {
677 iouring_internal_destroy (EV_A);
678 return 0;
679 }
680
681 ev_io_init (&iouring_tfd_w, iouring_tfd_cb, iouring_tfd, EV_READ);
682 ev_set_priority (&iouring_tfd_w, EV_MINPRI);
683 ev_io_start (EV_A_ &iouring_tfd_w);
684 ev_unref (EV_A); /* watcher should not keep loop alive */
685
686 backend_modify = iouring_modify;
687 backend_poll = iouring_poll;
688
689 return EVBACKEND_IOURING;
690 }
691
692 inline_size
693 void
694 iouring_destroy (EV_P)
695 {
696 iouring_internal_destroy (EV_A);
697 }
698