ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev_iouring.c
Revision: 1.15
Committed: Sat Dec 28 05:53:48 2019 UTC (4 years, 6 months ago) by root
Content type: text/plain
Branch: MAIN
Changes since 1.14: +7 -1 lines
Log Message:
*** empty log message ***

File Contents

# Content
1 /*
2 * libev linux io_uring fd activity backend
3 *
4 * Copyright (c) 2019 Marc Alexander Lehmann <libev@schmorp.de>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without modifica-
8 * tion, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
19 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
20 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
21 * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
25 * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
26 * OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Alternatively, the contents of this file may be used under the terms of
29 * the GNU General Public License ("GPL") version 2 or any later version,
30 * in which case the provisions of the GPL are applicable instead of
31 * the above. If you wish to allow the use of your version of this file
32 * only under the terms of the GPL and not to allow others to use your
33 * version of this file under the BSD license, indicate your decision
34 * by deleting the provisions above and replace them with the notice
35 * and other provisions required by the GPL. If you do not delete the
36 * provisions above, a recipient may use your version of this file under
37 * either the BSD or the GPL.
38 */
39
40 /*
41 * general notes about linux io_uring:
42 *
43 * a) it's the best interface I have seen so far. on linux.
44 * b) best is not necessarily very good.
45 * c) it's better than the aio mess, doesn't suffer from the fork problems
46 * of linux aio or epoll and so on and so on. and you could do event stuff
47 * without any syscalls. what's not to like?
48 * d) ok, it's vastly more complex, but that's ok, really.
49 * e) why 3 mmaps instead of one? one would be more space-efficient,
50 * and I can't see what benefit three would have (other than being
51 * somehow resizable/relocatable, but that's apparently not possible).
52 * (FIXME: newer kernels can use 2 mmaps only, need to look into this).
53 * f) hmm, it's practiclaly undebuggable (gdb can't access the memory, and
54 * the bizarre way structure offsets are communicated makes it hard to
55 * just print the ring buffer heads, even *iff* the memory were visible
56 * in gdb. but then, that's also ok, really.
57 * g) well, you cannot specify a timeout when waiting for events. no,
58 * seriously, the interface doesn't support a timeout. never seen _that_
59 * before. sure, you can use a timerfd, but that's another syscall
60 * you could have avoided. overall, this bizarre omission smells
61 * like a ยต-optimisation by the io_uring author for his personal
62 * applications, to the detriment of everybody else who just wants
63 * an event loop. but, umm, ok, if that's all, it could be worse.
64 * (FIXME: jens mentioned timeout commands, need to investigate)
65 * h) there is a hardcoded limit of 4096 outstanding events. okay,
66 * at least there is no arbitrary low system-wide limit...
67 * (FIXME: apparently, this was increased to 32768 in later kernels(
68 * i) unlike linux aio, you *can* register more then the limit
69 * of fd events, and the kernel will "gracefully" signal an
70 * overflow, after which you could destroy and recreate the kernel
71 * state, a bit bigger, or fall back to e.g. poll. thats not
72 * totally insane, but kind of questions the point a high
73 * performance I/O framework when it doesn't really work
74 * under stress.
75 * (FIXME: iouring should no longer drop events, need to investigate)
76 * j) but, oh my! is has exactly the same bugs as the linux aio backend,
77 * where some undocumented poll combinations just fail.
78 * so we need epoll AGAIN as a fallback. AGAIN! epoll!! and of course,
79 * this is completely undocumented, have I mantioned this already?
80 * k) overall, the *API* itself is, I dare to say, not a total trainwreck.
81 * the big isuess with it are the bugs requiring epoll, which might
82 * or might not get fixed (do I hold my breath?).
83 */
84
85 /* TODO: use internal TIMEOUT */
86 /* TODO: take advantage of single mmap, NODROP etc. */
87 /* TODO: resize cq/sq size independently */
88
89 #include <sys/timerfd.h>
90 #include <sys/mman.h>
91 #include <poll.h>
92 #include <stdint.h>
93
94 #define IOURING_INIT_ENTRIES 32
95
96 /*****************************************************************************/
97 /* syscall wrapdadoop - this section has the raw api/abi definitions */
98
99 #include <linux/fs.h>
100 #include <linux/types.h>
101
102 /* mostly directly taken from the kernel or documentation */
103
104 struct io_uring_sqe
105 {
106 __u8 opcode;
107 __u8 flags;
108 __u16 ioprio;
109 __s32 fd;
110 union {
111 __u64 off;
112 __u64 addr2;
113 };
114 __u64 addr;
115 __u32 len;
116 union {
117 __kernel_rwf_t rw_flags;
118 __u32 fsync_flags;
119 __u16 poll_events;
120 __u32 sync_range_flags;
121 __u32 msg_flags;
122 __u32 timeout_flags;
123 __u32 accept_flags;
124 __u32 cancel_flags;
125 __u32 open_flags;
126 __u32 statx_flags;
127 };
128 __u64 user_data;
129 union {
130 __u16 buf_index;
131 __u64 __pad2[3];
132 };
133 };
134
135 struct io_uring_cqe
136 {
137 __u64 user_data;
138 __s32 res;
139 __u32 flags;
140 };
141
142 struct io_sqring_offsets
143 {
144 __u32 head;
145 __u32 tail;
146 __u32 ring_mask;
147 __u32 ring_entries;
148 __u32 flags;
149 __u32 dropped;
150 __u32 array;
151 __u32 resv1;
152 __u64 resv2;
153 };
154
155 struct io_cqring_offsets
156 {
157 __u32 head;
158 __u32 tail;
159 __u32 ring_mask;
160 __u32 ring_entries;
161 __u32 overflow;
162 __u32 cqes;
163 __u64 resv[2];
164 };
165
166 struct io_uring_params
167 {
168 __u32 sq_entries;
169 __u32 cq_entries;
170 __u32 flags;
171 __u32 sq_thread_cpu;
172 __u32 sq_thread_idle;
173 __u32 features;
174 __u32 resv[4];
175 struct io_sqring_offsets sq_off;
176 struct io_cqring_offsets cq_off;
177 };
178
179 #define IORING_SETUP_CQSIZE 0x00000008
180
181 #define IORING_OP_POLL_ADD 6
182 #define IORING_OP_POLL_REMOVE 7
183 #define IORING_OP_TIMEOUT 11
184 #define IORING_OP_TIMEOUT_REMOVE 12
185
186 /* relative or absolute, reference clock is CLOCK_MONOTONIC */
187 struct iouring_kernel_timespec
188 {
189 int64_t tv_sec;
190 long long tv_nsec;
191 };
192
193 #define IORING_TIMEOUT_ABS 0x00000001
194
195 #define IORING_ENTER_GETEVENTS 0x01
196
197 #define IORING_OFF_SQ_RING 0x00000000ULL
198 #define IORING_OFF_CQ_RING 0x08000000ULL
199 #define IORING_OFF_SQES 0x10000000ULL
200
201 #define IORING_FEAT_SINGLE_MMAP 0x00000001
202 #define IORING_FEAT_NODROP 0x00000002
203 #define IORING_FEAT_SUBMIT_STABLE 0x00000004
204
205 inline_size
206 int
207 evsys_io_uring_setup (unsigned entries, struct io_uring_params *params)
208 {
209 return ev_syscall2 (SYS_io_uring_setup, entries, params);
210 }
211
212 inline_size
213 int
214 evsys_io_uring_enter (int fd, unsigned to_submit, unsigned min_complete, unsigned flags, const sigset_t *sig, size_t sigsz)
215 {
216 return ev_syscall6 (SYS_io_uring_enter, fd, to_submit, min_complete, flags, sig, sigsz);
217 }
218
219 /*****************************************************************************/
220 /* actual backed implementation */
221
222 /* we hope that volatile will make the compiler access this variables only once */
223 #define EV_SQ_VAR(name) *(volatile unsigned *)((char *)iouring_sq_ring + iouring_sq_ ## name)
224 #define EV_CQ_VAR(name) *(volatile unsigned *)((char *)iouring_cq_ring + iouring_cq_ ## name)
225
226 /* the index array */
227 #define EV_SQ_ARRAY ((unsigned *)((char *)iouring_sq_ring + iouring_sq_array))
228
229 /* the submit/completion queue entries */
230 #define EV_SQES ((struct io_uring_sqe *) iouring_sqes)
231 #define EV_CQES ((struct io_uring_cqe *)((char *)iouring_cq_ring + iouring_cq_cqes))
232
233 /* TODO: this is not enough, we might have to reap events */
234 /* TODO: but we can't, as that will re-arm events, causing */
235 /* TODO: an endless loop in fd_reify */
236 static int
237 iouring_enter (EV_P_ ev_tstamp timeout)
238 {
239 int res;
240
241 EV_RELEASE_CB;
242
243 res = evsys_io_uring_enter (iouring_fd, iouring_to_submit, 1,
244 timeout > EV_TS_CONST (0.) ? IORING_ENTER_GETEVENTS : 0, 0, 0);
245
246 assert (("libev: io_uring_enter did not consume all sqes", (res < 0 || res == iouring_to_submit)));
247
248 iouring_to_submit = 0;
249
250 EV_ACQUIRE_CB;
251
252 return res;
253 }
254
255 static
256 struct io_uring_sqe *
257 iouring_sqe_get (EV_P)
258 {
259 unsigned tail = EV_SQ_VAR (tail);
260
261 while (ecb_expect_false (tail + 1 - EV_SQ_VAR (head) > EV_SQ_VAR (ring_entries)))
262 {
263 /* queue full, need to flush */
264
265 int res = iouring_enter (EV_A_ EV_TS_CONST (0.));
266
267 /* io_uring_enter might fail with EBUSY and won't submit anything */
268 /* unfortunately, we can't handle this at the moment */
269
270 if (res < 0 && errno == EBUSY)
271 /* the sane thing might be to resize, but we can't */
272 //TODO
273 ev_syserr ("(libev) io_uring_enter could not clear sq");
274 else
275 break;
276
277 /* iouring_poll should have done ECB_MEMORY_FENCE_ACQUIRE */
278 }
279
280 /*assert (("libev: io_uring queue full after flush", tail + 1 - EV_SQ_VAR (head) <= EV_SQ_VAR (ring_entries)));*/
281
282 return EV_SQES + (tail & EV_SQ_VAR (ring_mask));
283 }
284
285 inline_size
286 struct io_uring_sqe *
287 iouring_sqe_submit (EV_P_ struct io_uring_sqe *sqe)
288 {
289 unsigned idx = sqe - EV_SQES;
290
291 EV_SQ_ARRAY [idx] = idx;
292 ECB_MEMORY_FENCE_RELEASE;
293 ++EV_SQ_VAR (tail);
294 /*ECB_MEMORY_FENCE_RELEASE; /* for the time being we assume this is not needed */
295 ++iouring_to_submit;
296 }
297
298 /*****************************************************************************/
299
300 /* when the timerfd expires we simply note the fact,
301 * as the purpose of the timerfd is to wake us up, nothing else.
302 * the next iteration should re-set it.
303 */
304 static void
305 iouring_tfd_cb (EV_P_ struct ev_io *w, int revents)
306 {
307 iouring_tfd_to = EV_TSTAMP_HUGE;
308 }
309
310 /* called for full and partial cleanup */
311 ecb_cold
312 static int
313 iouring_internal_destroy (EV_P)
314 {
315 close (iouring_tfd);
316 close (iouring_fd);
317
318 if (iouring_sq_ring != MAP_FAILED) munmap (iouring_sq_ring, iouring_sq_ring_size);
319 if (iouring_cq_ring != MAP_FAILED) munmap (iouring_cq_ring, iouring_cq_ring_size);
320 if (iouring_sqes != MAP_FAILED) munmap (iouring_sqes , iouring_sqes_size );
321
322 if (ev_is_active (&iouring_tfd_w))
323 {
324 ev_ref (EV_A);
325 ev_io_stop (EV_A_ &iouring_tfd_w);
326 }
327 }
328
329 ecb_cold
330 static int
331 iouring_internal_init (EV_P)
332 {
333 struct io_uring_params params = { 0 };
334
335 iouring_to_submit = 0;
336
337 iouring_tfd = -1;
338 iouring_sq_ring = MAP_FAILED;
339 iouring_cq_ring = MAP_FAILED;
340 iouring_sqes = MAP_FAILED;
341
342 if (!have_monotonic) /* cannot really happen, but what if11 */
343 return -1;
344
345 for (;;)
346 {
347 iouring_fd = evsys_io_uring_setup (iouring_entries, &params);
348
349 if (iouring_fd >= 0)
350 break; /* yippie */
351
352 if (errno != EINVAL)
353 return -1; /* we failed */
354
355 #if TODO
356 if ((~params.features) & (IORING_FEAT_NODROP | IORING_FEATURE_SINGLE_MMAP))
357 return -1; /* we require the above features */
358 #endif
359
360 /* EINVAL: lots of possible reasons, but maybe
361 * it is because we hit the unqueryable hardcoded size limit
362 */
363
364 /* we hit the limit already, give up */
365 if (iouring_max_entries)
366 return -1;
367
368 /* first time we hit EINVAL? assume we hit the limit, so go back and retry */
369 iouring_entries >>= 1;
370 iouring_max_entries = iouring_entries;
371 }
372
373 iouring_sq_ring_size = params.sq_off.array + params.sq_entries * sizeof (unsigned);
374 iouring_cq_ring_size = params.cq_off.cqes + params.cq_entries * sizeof (struct io_uring_cqe);
375 iouring_sqes_size = params.sq_entries * sizeof (struct io_uring_sqe);
376
377 iouring_sq_ring = mmap (0, iouring_sq_ring_size, PROT_READ | PROT_WRITE,
378 MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_SQ_RING);
379 iouring_cq_ring = mmap (0, iouring_cq_ring_size, PROT_READ | PROT_WRITE,
380 MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_CQ_RING);
381 iouring_sqes = mmap (0, iouring_sqes_size, PROT_READ | PROT_WRITE,
382 MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_SQES);
383
384 if (iouring_sq_ring == MAP_FAILED || iouring_cq_ring == MAP_FAILED || iouring_sqes == MAP_FAILED)
385 return -1;
386
387 iouring_sq_head = params.sq_off.head;
388 iouring_sq_tail = params.sq_off.tail;
389 iouring_sq_ring_mask = params.sq_off.ring_mask;
390 iouring_sq_ring_entries = params.sq_off.ring_entries;
391 iouring_sq_flags = params.sq_off.flags;
392 iouring_sq_dropped = params.sq_off.dropped;
393 iouring_sq_array = params.sq_off.array;
394
395 iouring_cq_head = params.cq_off.head;
396 iouring_cq_tail = params.cq_off.tail;
397 iouring_cq_ring_mask = params.cq_off.ring_mask;
398 iouring_cq_ring_entries = params.cq_off.ring_entries;
399 iouring_cq_overflow = params.cq_off.overflow;
400 iouring_cq_cqes = params.cq_off.cqes;
401
402 iouring_tfd = timerfd_create (CLOCK_MONOTONIC, TFD_CLOEXEC);
403
404 if (iouring_tfd < 0)
405 return iouring_tfd;
406
407 iouring_tfd_to = EV_TSTAMP_HUGE;
408
409 return 0;
410 }
411
412 ecb_cold
413 static void
414 iouring_fork (EV_P)
415 {
416 iouring_internal_destroy (EV_A);
417
418 while (iouring_internal_init (EV_A) < 0)
419 ev_syserr ("(libev) io_uring_setup");
420
421 fd_rearm_all (EV_A);
422
423 ev_io_stop (EV_A_ &iouring_tfd_w);
424 ev_io_set (EV_A_ &iouring_tfd_w, iouring_tfd, EV_READ);
425 ev_io_start (EV_A_ &iouring_tfd_w);
426 }
427
428 /*****************************************************************************/
429
430 static void
431 iouring_modify (EV_P_ int fd, int oev, int nev)
432 {
433 if (oev)
434 {
435 /* we assume the sqe's are all "properly" initialised */
436 struct io_uring_sqe *sqe = iouring_sqe_get (EV_A);
437 sqe->opcode = IORING_OP_POLL_REMOVE;
438 sqe->fd = fd;
439 /* Jens Axboe notified me that user_data is not what is documented, but is
440 * some kind of unique ID that has to match, otherwise the request cannot
441 * be removed. Since we don't *really* have that, we pass in the old
442 * generation counter - if that fails, too bad, it will hopefully be removed
443 * at close time and then be ignored. */
444 sqe->addr = (uint32_t)fd | ((__u64)(uint32_t)anfds [fd].egen << 32);
445 sqe->user_data = (uint64_t)-1;
446 iouring_sqe_submit (EV_A_ sqe);
447
448 /* increment generation counter to avoid handling old events */
449 ++anfds [fd].egen;
450 }
451
452 if (nev)
453 {
454 struct io_uring_sqe *sqe = iouring_sqe_get (EV_A);
455 sqe->opcode = IORING_OP_POLL_ADD;
456 sqe->fd = fd;
457 sqe->user_data = (uint32_t)fd | ((__u64)(uint32_t)anfds [fd].egen << 32);
458 sqe->poll_events =
459 (nev & EV_READ ? POLLIN : 0)
460 | (nev & EV_WRITE ? POLLOUT : 0);
461 iouring_sqe_submit (EV_A_ sqe);
462 }
463 }
464
465 inline_size
466 void
467 iouring_tfd_update (EV_P_ ev_tstamp timeout)
468 {
469 ev_tstamp tfd_to = mn_now + timeout;
470
471 /* we assume there will be many iterations per timer change, so
472 * we only re-set the timerfd when we have to because its expiry
473 * is too late.
474 */
475 if (ecb_expect_false (tfd_to < iouring_tfd_to))
476 {
477 struct itimerspec its;
478
479 iouring_tfd_to = tfd_to;
480 EV_TS_SET (its.it_interval, 0.);
481 EV_TS_SET (its.it_value, tfd_to);
482
483 if (timerfd_settime (iouring_tfd, TFD_TIMER_ABSTIME, &its, 0) < 0)
484 assert (("libev: iouring timerfd_settime failed", 0));
485 }
486 }
487
488 inline_size
489 void
490 iouring_process_cqe (EV_P_ struct io_uring_cqe *cqe)
491 {
492 int fd = cqe->user_data & 0xffffffffU;
493 uint32_t gen = cqe->user_data >> 32;
494 int res = cqe->res;
495
496 /* user_data -1 is a remove that we are not atm. interested in */
497 if (cqe->user_data == (uint64_t)-1)
498 return;
499
500 assert (("libev: io_uring fd must be in-bounds", fd >= 0 && fd < anfdmax));
501
502 /* documentation lies, of course. the result value is NOT like
503 * normal syscalls, but like linux raw syscalls, i.e. negative
504 * error numbers. fortunate, as otherwise there would be no way
505 * to get error codes at all. still, why not document this?
506 */
507
508 /* ignore event if generation doesn't match */
509 /* other than skipping removal events, */
510 /* this should actually be very rare */
511 if (ecb_expect_false (gen != (uint32_t)anfds [fd].egen))
512 return;
513
514 if (ecb_expect_false (res < 0))
515 {
516 /*TODO: EINVAL handling (was something failed with this fd)*/
517 /*TODO: EBUSY happens when?*/
518
519 if (res == -EBADF)
520 {
521 assert (("libev: event loop rejected bad fd", res != -EBADF));
522 fd_kill (EV_A_ fd);
523 }
524 else
525 {
526 errno = -res;
527 ev_syserr ("(libev) IORING_OP_POLL_ADD");
528 }
529
530 return;
531 }
532
533 /* feed events, we do not expect or handle POLLNVAL */
534 fd_event (
535 EV_A_
536 fd,
537 (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0)
538 | (res & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0)
539 );
540
541 /* io_uring is oneshot, so we need to re-arm the fd next iteration */
542 /* this also means we usually have to do at least one syscall per iteration */
543 anfds [fd].events = 0;
544 fd_change (EV_A_ fd, EV_ANFD_REIFY);
545 }
546
547 /* called when the event queue overflows */
548 ecb_cold
549 static void
550 iouring_overflow (EV_P)
551 {
552 /* we have two options, resize the queue (by tearing down
553 * everything and recreating it, or living with it
554 * and polling.
555 * we implement this by resizing the queue, and, if that fails,
556 * we just recreate the state on every failure, which
557 * kind of is a very inefficient poll.
558 * one danger is, due to the bios toward lower fds,
559 * we will only really get events for those, so
560 * maybe we need a poll() fallback, after all.
561 */
562 /*EV_CQ_VAR (overflow) = 0;*/ /* need to do this if we keep the state and poll manually */
563
564 fd_rearm_all (EV_A);
565
566 /* we double the size until we hit the hard-to-probe maximum */
567 if (!iouring_max_entries)
568 {
569 iouring_entries <<= 1;
570 iouring_fork (EV_A);
571 }
572 else
573 {
574 /* we hit the kernel limit, we should fall back to something else.
575 * we can either poll() a few times and hope for the best,
576 * poll always, or switch to epoll.
577 * TODO: is this necessary with newer kernels?
578 */
579
580 iouring_internal_destroy (EV_A);
581
582 /* this should make it so that on return, we don't call any uring functions */
583 iouring_to_submit = 0;
584
585 for (;;)
586 {
587 backend = epoll_init (EV_A_ 0);
588
589 if (backend)
590 break;
591
592 ev_syserr ("(libev) iouring switch to epoll");
593 }
594 }
595 }
596
597 /* handle any events in the completion queue, return true if there were any */
598 static int
599 iouring_handle_cq (EV_P)
600 {
601 unsigned head, tail, mask;
602
603 head = EV_CQ_VAR (head);
604 ECB_MEMORY_FENCE_ACQUIRE;
605 tail = EV_CQ_VAR (tail);
606
607 if (head == tail)
608 return 0;
609
610 /* it can only overflow if we have events, yes, yes? */
611 if (ecb_expect_false (EV_CQ_VAR (overflow)))
612 {
613 iouring_overflow (EV_A);
614 return 1;
615 }
616
617 mask = EV_CQ_VAR (ring_mask);
618
619 do
620 iouring_process_cqe (EV_A_ &EV_CQES [head++ & mask]);
621 while (head != tail);
622
623 EV_CQ_VAR (head) = head;
624 ECB_MEMORY_FENCE_RELEASE;
625
626 return 1;
627 }
628
629 static void
630 iouring_poll (EV_P_ ev_tstamp timeout)
631 {
632 /* if we have events, no need for extra syscalls, but we might have to queue events */
633 if (iouring_handle_cq (EV_A))
634 timeout = EV_TS_CONST (0.);
635 else
636 /* no events, so maybe wait for some */
637 iouring_tfd_update (EV_A_ timeout);
638
639 /* only enter the kernel if we have something to submit, or we need to wait */
640 if (timeout || iouring_to_submit)
641 {
642 int res = iouring_enter (EV_A_ timeout);
643
644 if (ecb_expect_false (res < 0))
645 if (errno == EINTR)
646 /* ignore */;
647 else if (errno == EBUSY)
648 /* cq full, cannot submit - should be rare because we flush the cq first, so simply ignore */;
649 else
650 ev_syserr ("(libev) iouring setup");
651 else
652 iouring_handle_cq (EV_A);
653 }
654 }
655
656 inline_size
657 int
658 iouring_init (EV_P_ int flags)
659 {
660 iouring_entries = IOURING_INIT_ENTRIES;
661 iouring_max_entries = 0;
662
663 if (iouring_internal_init (EV_A) < 0)
664 {
665 iouring_internal_destroy (EV_A);
666 return 0;
667 }
668
669 ev_io_init (&iouring_tfd_w, iouring_tfd_cb, iouring_tfd, EV_READ);
670 ev_set_priority (&iouring_tfd_w, EV_MINPRI);
671 ev_io_start (EV_A_ &iouring_tfd_w);
672 ev_unref (EV_A); /* watcher should not keep loop alive */
673
674 backend_modify = iouring_modify;
675 backend_poll = iouring_poll;
676
677 return EVBACKEND_IOURING;
678 }
679
680 inline_size
681 void
682 iouring_destroy (EV_P)
683 {
684 iouring_internal_destroy (EV_A);
685 }
686