ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev_iouring.c
Revision: 1.18
Committed: Sat Dec 28 07:57:48 2019 UTC (4 years, 6 months ago) by root
Content type: text/plain
Branch: MAIN
Changes since 1.17: +17 -21 lines
Log Message:
*** empty log message ***

File Contents

# User Rev Content
1 root 1.1 /*
2     * libev linux io_uring fd activity backend
3     *
4     * Copyright (c) 2019 Marc Alexander Lehmann <libev@schmorp.de>
5     * All rights reserved.
6     *
7     * Redistribution and use in source and binary forms, with or without modifica-
8     * tion, are permitted provided that the following conditions are met:
9     *
10     * 1. Redistributions of source code must retain the above copyright notice,
11     * this list of conditions and the following disclaimer.
12     *
13     * 2. Redistributions in binary form must reproduce the above copyright
14     * notice, this list of conditions and the following disclaimer in the
15     * documentation and/or other materials provided with the distribution.
16     *
17     * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
18     * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
19     * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
20     * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
21     * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22     * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23     * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24     * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
25     * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
26     * OF THE POSSIBILITY OF SUCH DAMAGE.
27     *
28     * Alternatively, the contents of this file may be used under the terms of
29     * the GNU General Public License ("GPL") version 2 or any later version,
30     * in which case the provisions of the GPL are applicable instead of
31     * the above. If you wish to allow the use of your version of this file
32     * only under the terms of the GPL and not to allow others to use your
33     * version of this file under the BSD license, indicate your decision
34     * by deleting the provisions above and replace them with the notice
35     * and other provisions required by the GPL. If you do not delete the
36     * provisions above, a recipient may use your version of this file under
37     * either the BSD or the GPL.
38     */
39    
40     /*
41     * general notes about linux io_uring:
42     *
43     * a) it's the best interface I have seen so far. on linux.
44     * b) best is not necessarily very good.
45     * c) it's better than the aio mess, doesn't suffer from the fork problems
46     * of linux aio or epoll and so on and so on. and you could do event stuff
47     * without any syscalls. what's not to like?
48     * d) ok, it's vastly more complex, but that's ok, really.
49 root 1.18 * e) why two mmaps instead of one? one would be more space-efficient,
50     * and I can't see what benefit two would have (other than being
51 root 1.1 * somehow resizable/relocatable, but that's apparently not possible).
52 root 1.18 * f) hmm, it's practically undebuggable (gdb can't access the memory, and
53 root 1.7 * the bizarre way structure offsets are communicated makes it hard to
54 root 1.1 * just print the ring buffer heads, even *iff* the memory were visible
55     * in gdb. but then, that's also ok, really.
56     * g) well, you cannot specify a timeout when waiting for events. no,
57     * seriously, the interface doesn't support a timeout. never seen _that_
58     * before. sure, you can use a timerfd, but that's another syscall
59     * you could have avoided. overall, this bizarre omission smells
60     * like a ยต-optimisation by the io_uring author for his personal
61     * applications, to the detriment of everybody else who just wants
62     * an event loop. but, umm, ok, if that's all, it could be worse.
63 root 1.18 * (from what I gather form Jens Axboe, it simply didn't occur to him,
64     * and he made good on it by adding an unlimited nuber of timeouts
65     * later :).
66     * h) initially there was a hardcoded limit of 4096 outstanding events.
67     * later versions not onlyx bump this to 32k, but also can handle
68     * an unlimited amount of events, so this only affects the batch size.
69 root 1.1 * i) unlike linux aio, you *can* register more then the limit
70 root 1.18 * of fd events. while early verisons of io_uring signalled an overflow
71     * and you ended up getting wet. 5.5+ does not do this anymore.
72     * j) but, oh my! it had exactly the same bugs as the linux aio backend,
73     * where some undocumented poll combinations just fail. fortunately,
74     * after finally reaching the author, he was more than willing to fix
75     * this probably in 5.6+.
76 root 1.1 * k) overall, the *API* itself is, I dare to say, not a total trainwreck.
77 root 1.18 * once the bugs ae fixed (probably in 5.6+), it will be without
78     * competition.
79 root 1.1 */
80    
81 root 1.9 /* TODO: use internal TIMEOUT */
82     /* TODO: take advantage of single mmap, NODROP etc. */
83     /* TODO: resize cq/sq size independently */
84    
85 root 1.1 #include <sys/timerfd.h>
86     #include <sys/mman.h>
87     #include <poll.h>
88 root 1.12 #include <stdint.h>
89 root 1.1
90     #define IOURING_INIT_ENTRIES 32
91    
92     /*****************************************************************************/
93     /* syscall wrapdadoop - this section has the raw api/abi definitions */
94    
95     #include <linux/fs.h>
96     #include <linux/types.h>
97    
98     /* mostly directly taken from the kernel or documentation */
99    
100     struct io_uring_sqe
101     {
102     __u8 opcode;
103     __u8 flags;
104     __u16 ioprio;
105     __s32 fd;
106 root 1.8 union {
107     __u64 off;
108     __u64 addr2;
109     };
110 root 1.1 __u64 addr;
111     __u32 len;
112     union {
113     __kernel_rwf_t rw_flags;
114     __u32 fsync_flags;
115     __u16 poll_events;
116     __u32 sync_range_flags;
117     __u32 msg_flags;
118 root 1.8 __u32 timeout_flags;
119     __u32 accept_flags;
120     __u32 cancel_flags;
121     __u32 open_flags;
122     __u32 statx_flags;
123 root 1.1 };
124     __u64 user_data;
125     union {
126     __u16 buf_index;
127     __u64 __pad2[3];
128     };
129     };
130    
131     struct io_uring_cqe
132     {
133     __u64 user_data;
134     __s32 res;
135     __u32 flags;
136     };
137    
138     struct io_sqring_offsets
139     {
140     __u32 head;
141     __u32 tail;
142     __u32 ring_mask;
143     __u32 ring_entries;
144     __u32 flags;
145     __u32 dropped;
146     __u32 array;
147     __u32 resv1;
148     __u64 resv2;
149     };
150    
151     struct io_cqring_offsets
152     {
153     __u32 head;
154     __u32 tail;
155     __u32 ring_mask;
156     __u32 ring_entries;
157     __u32 overflow;
158     __u32 cqes;
159     __u64 resv[2];
160     };
161    
162     struct io_uring_params
163     {
164     __u32 sq_entries;
165     __u32 cq_entries;
166     __u32 flags;
167     __u32 sq_thread_cpu;
168     __u32 sq_thread_idle;
169 root 1.8 __u32 features;
170     __u32 resv[4];
171 root 1.1 struct io_sqring_offsets sq_off;
172     struct io_cqring_offsets cq_off;
173     };
174    
175 root 1.12 #define IORING_SETUP_CQSIZE 0x00000008
176    
177     #define IORING_OP_POLL_ADD 6
178     #define IORING_OP_POLL_REMOVE 7
179     #define IORING_OP_TIMEOUT 11
180     #define IORING_OP_TIMEOUT_REMOVE 12
181    
182     /* relative or absolute, reference clock is CLOCK_MONOTONIC */
183     struct iouring_kernel_timespec
184     {
185     int64_t tv_sec;
186     long long tv_nsec;
187     };
188    
189     #define IORING_TIMEOUT_ABS 0x00000001
190 root 1.1
191     #define IORING_ENTER_GETEVENTS 0x01
192    
193     #define IORING_OFF_SQ_RING 0x00000000ULL
194     #define IORING_OFF_CQ_RING 0x08000000ULL
195     #define IORING_OFF_SQES 0x10000000ULL
196    
197 root 1.12 #define IORING_FEAT_SINGLE_MMAP 0x00000001
198     #define IORING_FEAT_NODROP 0x00000002
199     #define IORING_FEAT_SUBMIT_STABLE 0x00000004
200 root 1.8
201 root 1.1 inline_size
202     int
203     evsys_io_uring_setup (unsigned entries, struct io_uring_params *params)
204     {
205     return ev_syscall2 (SYS_io_uring_setup, entries, params);
206     }
207    
208     inline_size
209     int
210     evsys_io_uring_enter (int fd, unsigned to_submit, unsigned min_complete, unsigned flags, const sigset_t *sig, size_t sigsz)
211     {
212     return ev_syscall6 (SYS_io_uring_enter, fd, to_submit, min_complete, flags, sig, sigsz);
213     }
214    
215     /*****************************************************************************/
216     /* actual backed implementation */
217    
218     /* we hope that volatile will make the compiler access this variables only once */
219     #define EV_SQ_VAR(name) *(volatile unsigned *)((char *)iouring_sq_ring + iouring_sq_ ## name)
220     #define EV_CQ_VAR(name) *(volatile unsigned *)((char *)iouring_cq_ring + iouring_cq_ ## name)
221    
222     /* the index array */
223     #define EV_SQ_ARRAY ((unsigned *)((char *)iouring_sq_ring + iouring_sq_array))
224    
225     /* the submit/completion queue entries */
226     #define EV_SQES ((struct io_uring_sqe *) iouring_sqes)
227     #define EV_CQES ((struct io_uring_cqe *)((char *)iouring_cq_ring + iouring_cq_cqes))
228    
229 root 1.16 inline_speed
230     int
231 root 1.14 iouring_enter (EV_P_ ev_tstamp timeout)
232     {
233     int res;
234    
235     EV_RELEASE_CB;
236    
237     res = evsys_io_uring_enter (iouring_fd, iouring_to_submit, 1,
238     timeout > EV_TS_CONST (0.) ? IORING_ENTER_GETEVENTS : 0, 0, 0);
239    
240     assert (("libev: io_uring_enter did not consume all sqes", (res < 0 || res == iouring_to_submit)));
241    
242     iouring_to_submit = 0;
243    
244     EV_ACQUIRE_CB;
245    
246     return res;
247     }
248    
249 root 1.16 /* TODO: can we move things around so we don't need this forward-reference? */
250     static void
251     iouring_poll (EV_P_ ev_tstamp timeout);
252    
253 root 1.1 static
254     struct io_uring_sqe *
255     iouring_sqe_get (EV_P)
256     {
257 root 1.16 unsigned tail;
258    
259     for (;;)
260     {
261     tail = EV_SQ_VAR (tail);
262 root 1.1
263 root 1.16 if (ecb_expect_true (tail + 1 - EV_SQ_VAR (head) <= EV_SQ_VAR (ring_entries)))
264     break; /* whats the problem, we have free sqes */
265    
266     /* queue full, need to flush and possibly handle some events */
267 root 1.14
268 root 1.16 #if EV_FEATURE_CODE
269     /* first we ask the kernel nicely, most often this frees up some sqes */
270 root 1.14 int res = iouring_enter (EV_A_ EV_TS_CONST (0.));
271    
272 root 1.16 ECB_MEMORY_FENCE_ACQUIRE; /* better safe than sorry */
273    
274     if (res >= 0)
275     continue; /* yes, it worked, try again */
276     #endif
277    
278     /* some problem, possibly EBUSY - do the full poll and let it handle any issues */
279 root 1.14
280 root 1.16 iouring_poll (EV_A_ EV_TS_CONST (0.));
281     /* iouring_poll should have done ECB_MEMORY_FENCE_ACQUIRE for us */
282 root 1.1 }
283    
284 root 1.14 /*assert (("libev: io_uring queue full after flush", tail + 1 - EV_SQ_VAR (head) <= EV_SQ_VAR (ring_entries)));*/
285 root 1.1
286     return EV_SQES + (tail & EV_SQ_VAR (ring_mask));
287     }
288    
289     inline_size
290     struct io_uring_sqe *
291     iouring_sqe_submit (EV_P_ struct io_uring_sqe *sqe)
292     {
293     unsigned idx = sqe - EV_SQES;
294    
295     EV_SQ_ARRAY [idx] = idx;
296     ECB_MEMORY_FENCE_RELEASE;
297     ++EV_SQ_VAR (tail);
298     /*ECB_MEMORY_FENCE_RELEASE; /* for the time being we assume this is not needed */
299     ++iouring_to_submit;
300     }
301    
302     /*****************************************************************************/
303    
304     /* when the timerfd expires we simply note the fact,
305     * as the purpose of the timerfd is to wake us up, nothing else.
306     * the next iteration should re-set it.
307     */
308     static void
309     iouring_tfd_cb (EV_P_ struct ev_io *w, int revents)
310     {
311     iouring_tfd_to = EV_TSTAMP_HUGE;
312     }
313    
314     /* called for full and partial cleanup */
315     ecb_cold
316     static int
317     iouring_internal_destroy (EV_P)
318     {
319     close (iouring_tfd);
320     close (iouring_fd);
321    
322     if (iouring_sq_ring != MAP_FAILED) munmap (iouring_sq_ring, iouring_sq_ring_size);
323     if (iouring_cq_ring != MAP_FAILED) munmap (iouring_cq_ring, iouring_cq_ring_size);
324     if (iouring_sqes != MAP_FAILED) munmap (iouring_sqes , iouring_sqes_size );
325    
326 root 1.8 if (ev_is_active (&iouring_tfd_w))
327     {
328     ev_ref (EV_A);
329     ev_io_stop (EV_A_ &iouring_tfd_w);
330     }
331 root 1.1 }
332    
333     ecb_cold
334     static int
335     iouring_internal_init (EV_P)
336     {
337     struct io_uring_params params = { 0 };
338    
339     iouring_to_submit = 0;
340    
341     iouring_tfd = -1;
342     iouring_sq_ring = MAP_FAILED;
343     iouring_cq_ring = MAP_FAILED;
344     iouring_sqes = MAP_FAILED;
345    
346 root 1.12 if (!have_monotonic) /* cannot really happen, but what if11 */
347     return -1;
348    
349 root 1.1 for (;;)
350     {
351     iouring_fd = evsys_io_uring_setup (iouring_entries, &params);
352    
353     if (iouring_fd >= 0)
354     break; /* yippie */
355    
356     if (errno != EINVAL)
357     return -1; /* we failed */
358    
359 root 1.11 #if TODO
360 root 1.17 if ((~params.features) & (IORING_FEAT_NODROP | IORING_FEATURE_SINGLE_MMAP | IORING_FEAT_SUBMIT_STABLE))
361 root 1.11 return -1; /* we require the above features */
362     #endif
363    
364 root 1.1 /* EINVAL: lots of possible reasons, but maybe
365     * it is because we hit the unqueryable hardcoded size limit
366     */
367    
368     /* we hit the limit already, give up */
369     if (iouring_max_entries)
370     return -1;
371    
372     /* first time we hit EINVAL? assume we hit the limit, so go back and retry */
373     iouring_entries >>= 1;
374     iouring_max_entries = iouring_entries;
375     }
376    
377     iouring_sq_ring_size = params.sq_off.array + params.sq_entries * sizeof (unsigned);
378     iouring_cq_ring_size = params.cq_off.cqes + params.cq_entries * sizeof (struct io_uring_cqe);
379     iouring_sqes_size = params.sq_entries * sizeof (struct io_uring_sqe);
380    
381     iouring_sq_ring = mmap (0, iouring_sq_ring_size, PROT_READ | PROT_WRITE,
382     MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_SQ_RING);
383     iouring_cq_ring = mmap (0, iouring_cq_ring_size, PROT_READ | PROT_WRITE,
384     MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_CQ_RING);
385     iouring_sqes = mmap (0, iouring_sqes_size, PROT_READ | PROT_WRITE,
386     MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_SQES);
387    
388     if (iouring_sq_ring == MAP_FAILED || iouring_cq_ring == MAP_FAILED || iouring_sqes == MAP_FAILED)
389     return -1;
390    
391     iouring_sq_head = params.sq_off.head;
392     iouring_sq_tail = params.sq_off.tail;
393     iouring_sq_ring_mask = params.sq_off.ring_mask;
394     iouring_sq_ring_entries = params.sq_off.ring_entries;
395     iouring_sq_flags = params.sq_off.flags;
396     iouring_sq_dropped = params.sq_off.dropped;
397     iouring_sq_array = params.sq_off.array;
398    
399     iouring_cq_head = params.cq_off.head;
400     iouring_cq_tail = params.cq_off.tail;
401     iouring_cq_ring_mask = params.cq_off.ring_mask;
402     iouring_cq_ring_entries = params.cq_off.ring_entries;
403     iouring_cq_overflow = params.cq_off.overflow;
404     iouring_cq_cqes = params.cq_off.cqes;
405    
406     iouring_tfd = timerfd_create (CLOCK_MONOTONIC, TFD_CLOEXEC);
407    
408     if (iouring_tfd < 0)
409     return iouring_tfd;
410    
411     iouring_tfd_to = EV_TSTAMP_HUGE;
412    
413     return 0;
414     }
415    
416     ecb_cold
417     static void
418     iouring_fork (EV_P)
419     {
420     iouring_internal_destroy (EV_A);
421    
422     while (iouring_internal_init (EV_A) < 0)
423     ev_syserr ("(libev) io_uring_setup");
424    
425 root 1.8 fd_rearm_all (EV_A);
426 root 1.1
427     ev_io_stop (EV_A_ &iouring_tfd_w);
428     ev_io_set (EV_A_ &iouring_tfd_w, iouring_tfd, EV_READ);
429     ev_io_start (EV_A_ &iouring_tfd_w);
430     }
431    
432     /*****************************************************************************/
433    
434     static void
435     iouring_modify (EV_P_ int fd, int oev, int nev)
436     {
437     if (oev)
438     {
439     /* we assume the sqe's are all "properly" initialised */
440     struct io_uring_sqe *sqe = iouring_sqe_get (EV_A);
441     sqe->opcode = IORING_OP_POLL_REMOVE;
442     sqe->fd = fd;
443 root 1.13 /* Jens Axboe notified me that user_data is not what is documented, but is
444     * some kind of unique ID that has to match, otherwise the request cannot
445     * be removed. Since we don't *really* have that, we pass in the old
446     * generation counter - if that fails, too bad, it will hopefully be removed
447     * at close time and then be ignored. */
448 root 1.15 sqe->addr = (uint32_t)fd | ((__u64)(uint32_t)anfds [fd].egen << 32);
449     sqe->user_data = (uint64_t)-1;
450 root 1.1 iouring_sqe_submit (EV_A_ sqe);
451 root 1.2
452     /* increment generation counter to avoid handling old events */
453     ++anfds [fd].egen;
454 root 1.1 }
455    
456     if (nev)
457     {
458     struct io_uring_sqe *sqe = iouring_sqe_get (EV_A);
459     sqe->opcode = IORING_OP_POLL_ADD;
460     sqe->fd = fd;
461 root 1.2 sqe->user_data = (uint32_t)fd | ((__u64)(uint32_t)anfds [fd].egen << 32);
462 root 1.1 sqe->poll_events =
463     (nev & EV_READ ? POLLIN : 0)
464     | (nev & EV_WRITE ? POLLOUT : 0);
465     iouring_sqe_submit (EV_A_ sqe);
466     }
467     }
468    
469     inline_size
470     void
471     iouring_tfd_update (EV_P_ ev_tstamp timeout)
472     {
473     ev_tstamp tfd_to = mn_now + timeout;
474    
475     /* we assume there will be many iterations per timer change, so
476     * we only re-set the timerfd when we have to because its expiry
477     * is too late.
478     */
479     if (ecb_expect_false (tfd_to < iouring_tfd_to))
480     {
481     struct itimerspec its;
482    
483     iouring_tfd_to = tfd_to;
484     EV_TS_SET (its.it_interval, 0.);
485     EV_TS_SET (its.it_value, tfd_to);
486    
487     if (timerfd_settime (iouring_tfd, TFD_TIMER_ABSTIME, &its, 0) < 0)
488     assert (("libev: iouring timerfd_settime failed", 0));
489     }
490     }
491    
492     inline_size
493     void
494     iouring_process_cqe (EV_P_ struct io_uring_cqe *cqe)
495     {
496     int fd = cqe->user_data & 0xffffffffU;
497     uint32_t gen = cqe->user_data >> 32;
498     int res = cqe->res;
499    
500 root 1.15 /* user_data -1 is a remove that we are not atm. interested in */
501     if (cqe->user_data == (uint64_t)-1)
502     return;
503    
504 root 1.1 assert (("libev: io_uring fd must be in-bounds", fd >= 0 && fd < anfdmax));
505    
506     /* documentation lies, of course. the result value is NOT like
507     * normal syscalls, but like linux raw syscalls, i.e. negative
508     * error numbers. fortunate, as otherwise there would be no way
509     * to get error codes at all. still, why not document this?
510     */
511    
512     /* ignore event if generation doesn't match */
513 root 1.13 /* other than skipping removal events, */
514 root 1.1 /* this should actually be very rare */
515 root 1.2 if (ecb_expect_false (gen != (uint32_t)anfds [fd].egen))
516 root 1.1 return;
517    
518     if (ecb_expect_false (res < 0))
519     {
520 root 1.11 /*TODO: EINVAL handling (was something failed with this fd)*/
521     /*TODO: EBUSY happens when?*/
522 root 1.1
523 root 1.8 if (res == -EBADF)
524 root 1.1 {
525     assert (("libev: event loop rejected bad fd", res != -EBADF));
526     fd_kill (EV_A_ fd);
527     }
528     else
529     {
530     errno = -res;
531     ev_syserr ("(libev) IORING_OP_POLL_ADD");
532     }
533    
534     return;
535     }
536    
537     /* feed events, we do not expect or handle POLLNVAL */
538     fd_event (
539     EV_A_
540     fd,
541     (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0)
542     | (res & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0)
543     );
544    
545     /* io_uring is oneshot, so we need to re-arm the fd next iteration */
546     /* this also means we usually have to do at least one syscall per iteration */
547     anfds [fd].events = 0;
548     fd_change (EV_A_ fd, EV_ANFD_REIFY);
549     }
550    
551     /* called when the event queue overflows */
552     ecb_cold
553     static void
554     iouring_overflow (EV_P)
555     {
556     /* we have two options, resize the queue (by tearing down
557     * everything and recreating it, or living with it
558     * and polling.
559 root 1.10 * we implement this by resizing the queue, and, if that fails,
560 root 1.1 * we just recreate the state on every failure, which
561     * kind of is a very inefficient poll.
562     * one danger is, due to the bios toward lower fds,
563     * we will only really get events for those, so
564     * maybe we need a poll() fallback, after all.
565     */
566     /*EV_CQ_VAR (overflow) = 0;*/ /* need to do this if we keep the state and poll manually */
567    
568     fd_rearm_all (EV_A);
569    
570     /* we double the size until we hit the hard-to-probe maximum */
571     if (!iouring_max_entries)
572     {
573     iouring_entries <<= 1;
574     iouring_fork (EV_A);
575     }
576     else
577     {
578     /* we hit the kernel limit, we should fall back to something else.
579     * we can either poll() a few times and hope for the best,
580     * poll always, or switch to epoll.
581 root 1.10 * TODO: is this necessary with newer kernels?
582 root 1.1 */
583    
584     iouring_internal_destroy (EV_A);
585    
586 root 1.10 /* this should make it so that on return, we don't call any uring functions */
587 root 1.1 iouring_to_submit = 0;
588    
589     for (;;)
590     {
591     backend = epoll_init (EV_A_ 0);
592    
593     if (backend)
594     break;
595    
596     ev_syserr ("(libev) iouring switch to epoll");
597     }
598     }
599     }
600    
601     /* handle any events in the completion queue, return true if there were any */
602     static int
603     iouring_handle_cq (EV_P)
604     {
605     unsigned head, tail, mask;
606    
607     head = EV_CQ_VAR (head);
608     ECB_MEMORY_FENCE_ACQUIRE;
609     tail = EV_CQ_VAR (tail);
610    
611     if (head == tail)
612     return 0;
613    
614     /* it can only overflow if we have events, yes, yes? */
615     if (ecb_expect_false (EV_CQ_VAR (overflow)))
616     {
617     iouring_overflow (EV_A);
618     return 1;
619     }
620    
621     mask = EV_CQ_VAR (ring_mask);
622    
623     do
624     iouring_process_cqe (EV_A_ &EV_CQES [head++ & mask]);
625     while (head != tail);
626    
627     EV_CQ_VAR (head) = head;
628     ECB_MEMORY_FENCE_RELEASE;
629    
630     return 1;
631     }
632    
633     static void
634     iouring_poll (EV_P_ ev_tstamp timeout)
635     {
636     /* if we have events, no need for extra syscalls, but we might have to queue events */
637 root 1.16 /* we also clar the timeout if there are outstanding fdchanges */
638     /* the latter should only happen if both the sq and cq are full, most likely */
639     /* because we have a lot of event sources that immediately complete */
640     /* TODO: fdchacngecnt is always 0 because fd_reify does not have two buffers yet */
641     if (iouring_handle_cq (EV_A) || fdchangecnt)
642 root 1.3 timeout = EV_TS_CONST (0.);
643 root 1.1 else
644     /* no events, so maybe wait for some */
645     iouring_tfd_update (EV_A_ timeout);
646    
647 root 1.6 /* only enter the kernel if we have something to submit, or we need to wait */
648 root 1.1 if (timeout || iouring_to_submit)
649     {
650 root 1.14 int res = iouring_enter (EV_A_ timeout);
651 root 1.1
652     if (ecb_expect_false (res < 0))
653     if (errno == EINTR)
654     /* ignore */;
655 root 1.14 else if (errno == EBUSY)
656     /* cq full, cannot submit - should be rare because we flush the cq first, so simply ignore */;
657 root 1.1 else
658     ev_syserr ("(libev) iouring setup");
659     else
660     iouring_handle_cq (EV_A);
661     }
662     }
663    
664     inline_size
665     int
666     iouring_init (EV_P_ int flags)
667     {
668     iouring_entries = IOURING_INIT_ENTRIES;
669     iouring_max_entries = 0;
670    
671     if (iouring_internal_init (EV_A) < 0)
672     {
673     iouring_internal_destroy (EV_A);
674     return 0;
675     }
676    
677 root 1.6 ev_io_init (&iouring_tfd_w, iouring_tfd_cb, iouring_tfd, EV_READ);
678 root 1.8 ev_set_priority (&iouring_tfd_w, EV_MINPRI);
679 root 1.1 ev_io_start (EV_A_ &iouring_tfd_w);
680     ev_unref (EV_A); /* watcher should not keep loop alive */
681    
682     backend_modify = iouring_modify;
683     backend_poll = iouring_poll;
684    
685     return EVBACKEND_IOURING;
686     }
687    
688     inline_size
689     void
690     iouring_destroy (EV_P)
691     {
692     iouring_internal_destroy (EV_A);
693     }
694