1 |
/* |
2 |
* libev linux io_uring fd activity backend |
3 |
* |
4 |
* Copyright (c) 2019 Marc Alexander Lehmann <libev@schmorp.de> |
5 |
* All rights reserved. |
6 |
* |
7 |
* Redistribution and use in source and binary forms, with or without modifica- |
8 |
* tion, are permitted provided that the following conditions are met: |
9 |
* |
10 |
* 1. Redistributions of source code must retain the above copyright notice, |
11 |
* this list of conditions and the following disclaimer. |
12 |
* |
13 |
* 2. Redistributions in binary form must reproduce the above copyright |
14 |
* notice, this list of conditions and the following disclaimer in the |
15 |
* documentation and/or other materials provided with the distribution. |
16 |
* |
17 |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
18 |
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- |
19 |
* CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO |
20 |
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- |
21 |
* CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
22 |
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; |
23 |
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
24 |
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- |
25 |
* ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED |
26 |
* OF THE POSSIBILITY OF SUCH DAMAGE. |
27 |
* |
28 |
* Alternatively, the contents of this file may be used under the terms of |
29 |
* the GNU General Public License ("GPL") version 2 or any later version, |
30 |
* in which case the provisions of the GPL are applicable instead of |
31 |
* the above. If you wish to allow the use of your version of this file |
32 |
* only under the terms of the GPL and not to allow others to use your |
33 |
* version of this file under the BSD license, indicate your decision |
34 |
* by deleting the provisions above and replace them with the notice |
35 |
* and other provisions required by the GPL. If you do not delete the |
36 |
* provisions above, a recipient may use your version of this file under |
37 |
* either the BSD or the GPL. |
38 |
*/ |
39 |
|
40 |
/* |
41 |
* general notes about linux io_uring: |
42 |
* |
43 |
* a) it's the best interface I have seen so far. on linux. |
44 |
* b) best is not necessarily very good. |
45 |
* c) it's better than the aio mess, doesn't suffer from the fork problems |
46 |
* of linux aio or epoll and so on and so on. and you could do event stuff |
47 |
* without any syscalls. what's not to like? |
48 |
* d) ok, it's vastly more complex, but that's ok, really. |
49 |
* e) why 3 mmaps instead of one? one would be more space-efficient, |
50 |
* and I can't see what benefit three would have (other than being |
51 |
* somehow resizable/relocatable, but that's apparently not possible). |
52 |
* f) hmm, it's practiclaly undebuggable (gdb can't access the memory, and |
53 |
the bizarre way structure offsets are commuinicated makes it hard to |
54 |
* just print the ring buffer heads, even *iff* the memory were visible |
55 |
* in gdb. but then, that's also ok, really. |
56 |
* g) well, you cannot specify a timeout when waiting for events. no, |
57 |
* seriously, the interface doesn't support a timeout. never seen _that_ |
58 |
* before. sure, you can use a timerfd, but that's another syscall |
59 |
* you could have avoided. overall, this bizarre omission smells |
60 |
* like a ยต-optimisation by the io_uring author for his personal |
61 |
* applications, to the detriment of everybody else who just wants |
62 |
* an event loop. but, umm, ok, if that's all, it could be worse. |
63 |
* h) there is a hardcoded limit of 4096 outstanding events. okay, |
64 |
* at least there is no arbitrary low system-wide limit... |
65 |
* i) unlike linux aio, you *can* register more then the limit |
66 |
* of fd events, and the kernel will "gracefully" signal an |
67 |
* overflow, after which you could destroy and recreate the kernel |
68 |
* state, a bit bigger, or fall back to e.g. poll. thats not |
69 |
* totally insane, but kind of questions the point a high |
70 |
* performance I/O framework when it doesn't really work |
71 |
* under stress. |
72 |
* j) but, oh my! is has exactly the same bugs as the linux aio backend, |
73 |
* where some undocumented poll combinations just fail. |
74 |
* so we need epoll AGAIN as a fallback. AGAIN! epoll!! and of course, |
75 |
* this is completely undocumented, have I mantioned this already? |
76 |
* k) overall, the *API* itself is, I dare to say, not a total trainwreck. |
77 |
* the big isuess with it are the bugs requiring epoll, which might |
78 |
* or might not get fixed (do I hold my breath?). |
79 |
*/ |
80 |
|
81 |
#include <sys/timerfd.h> |
82 |
#include <sys/mman.h> |
83 |
#include <poll.h> |
84 |
|
85 |
#define IOURING_INIT_ENTRIES 32 |
86 |
|
87 |
/*****************************************************************************/ |
88 |
/* syscall wrapdadoop - this section has the raw api/abi definitions */ |
89 |
|
90 |
#include <linux/fs.h> |
91 |
#include <linux/types.h> |
92 |
|
93 |
/* mostly directly taken from the kernel or documentation */ |
94 |
|
95 |
struct io_uring_sqe |
96 |
{ |
97 |
__u8 opcode; |
98 |
__u8 flags; |
99 |
__u16 ioprio; |
100 |
__s32 fd; |
101 |
__u64 off; |
102 |
__u64 addr; |
103 |
__u32 len; |
104 |
union { |
105 |
__kernel_rwf_t rw_flags; |
106 |
__u32 fsync_flags; |
107 |
__u16 poll_events; |
108 |
__u32 sync_range_flags; |
109 |
__u32 msg_flags; |
110 |
}; |
111 |
__u64 user_data; |
112 |
union { |
113 |
__u16 buf_index; |
114 |
__u64 __pad2[3]; |
115 |
}; |
116 |
}; |
117 |
|
118 |
struct io_uring_cqe |
119 |
{ |
120 |
__u64 user_data; |
121 |
__s32 res; |
122 |
__u32 flags; |
123 |
}; |
124 |
|
125 |
struct io_sqring_offsets |
126 |
{ |
127 |
__u32 head; |
128 |
__u32 tail; |
129 |
__u32 ring_mask; |
130 |
__u32 ring_entries; |
131 |
__u32 flags; |
132 |
__u32 dropped; |
133 |
__u32 array; |
134 |
__u32 resv1; |
135 |
__u64 resv2; |
136 |
}; |
137 |
|
138 |
struct io_cqring_offsets |
139 |
{ |
140 |
__u32 head; |
141 |
__u32 tail; |
142 |
__u32 ring_mask; |
143 |
__u32 ring_entries; |
144 |
__u32 overflow; |
145 |
__u32 cqes; |
146 |
__u64 resv[2]; |
147 |
}; |
148 |
|
149 |
struct io_uring_params |
150 |
{ |
151 |
__u32 sq_entries; |
152 |
__u32 cq_entries; |
153 |
__u32 flags; |
154 |
__u32 sq_thread_cpu; |
155 |
__u32 sq_thread_idle; |
156 |
__u32 resv[5]; |
157 |
struct io_sqring_offsets sq_off; |
158 |
struct io_cqring_offsets cq_off; |
159 |
}; |
160 |
|
161 |
#define IORING_OP_POLL_ADD 6 |
162 |
#define IORING_OP_POLL_REMOVE 7 |
163 |
|
164 |
#define IORING_ENTER_GETEVENTS 0x01 |
165 |
|
166 |
#define IORING_OFF_SQ_RING 0x00000000ULL |
167 |
#define IORING_OFF_CQ_RING 0x08000000ULL |
168 |
#define IORING_OFF_SQES 0x10000000ULL |
169 |
|
170 |
inline_size |
171 |
int |
172 |
evsys_io_uring_setup (unsigned entries, struct io_uring_params *params) |
173 |
{ |
174 |
return ev_syscall2 (SYS_io_uring_setup, entries, params); |
175 |
} |
176 |
|
177 |
inline_size |
178 |
int |
179 |
evsys_io_uring_enter (int fd, unsigned to_submit, unsigned min_complete, unsigned flags, const sigset_t *sig, size_t sigsz) |
180 |
{ |
181 |
return ev_syscall6 (SYS_io_uring_enter, fd, to_submit, min_complete, flags, sig, sigsz); |
182 |
} |
183 |
|
184 |
/*****************************************************************************/ |
185 |
/* actual backed implementation */ |
186 |
|
187 |
/* we hope that volatile will make the compiler access this variables only once */ |
188 |
#define EV_SQ_VAR(name) *(volatile unsigned *)((char *)iouring_sq_ring + iouring_sq_ ## name) |
189 |
#define EV_CQ_VAR(name) *(volatile unsigned *)((char *)iouring_cq_ring + iouring_cq_ ## name) |
190 |
|
191 |
/* the index array */ |
192 |
#define EV_SQ_ARRAY ((unsigned *)((char *)iouring_sq_ring + iouring_sq_array)) |
193 |
|
194 |
/* the submit/completion queue entries */ |
195 |
#define EV_SQES ((struct io_uring_sqe *) iouring_sqes) |
196 |
#define EV_CQES ((struct io_uring_cqe *)((char *)iouring_cq_ring + iouring_cq_cqes)) |
197 |
|
198 |
static |
199 |
struct io_uring_sqe * |
200 |
iouring_sqe_get (EV_P) |
201 |
{ |
202 |
unsigned tail = EV_SQ_VAR (tail); |
203 |
|
204 |
if (tail + 1 - EV_SQ_VAR (head) > EV_SQ_VAR (ring_entries)) |
205 |
{ |
206 |
/* queue full, flush */ |
207 |
evsys_io_uring_enter (iouring_fd, iouring_to_submit, 0, 0, 0, 0); |
208 |
iouring_to_submit = 0; |
209 |
} |
210 |
|
211 |
assert (("libev: io_uring queue full after flush", tail + 1 - EV_SQ_VAR (head) <= EV_SQ_VAR (ring_entries))); |
212 |
|
213 |
return EV_SQES + (tail & EV_SQ_VAR (ring_mask)); |
214 |
} |
215 |
|
216 |
inline_size |
217 |
struct io_uring_sqe * |
218 |
iouring_sqe_submit (EV_P_ struct io_uring_sqe *sqe) |
219 |
{ |
220 |
unsigned idx = sqe - EV_SQES; |
221 |
|
222 |
EV_SQ_ARRAY [idx] = idx; |
223 |
ECB_MEMORY_FENCE_RELEASE; |
224 |
++EV_SQ_VAR (tail); |
225 |
/*ECB_MEMORY_FENCE_RELEASE; /* for the time being we assume this is not needed */ |
226 |
++iouring_to_submit; |
227 |
} |
228 |
|
229 |
/*****************************************************************************/ |
230 |
|
231 |
/* when the timerfd expires we simply note the fact, |
232 |
* as the purpose of the timerfd is to wake us up, nothing else. |
233 |
* the next iteration should re-set it. |
234 |
*/ |
235 |
static void |
236 |
iouring_tfd_cb (EV_P_ struct ev_io *w, int revents) |
237 |
{ |
238 |
iouring_tfd_to = EV_TSTAMP_HUGE; |
239 |
} |
240 |
|
241 |
static void |
242 |
iouring_epoll_cb (EV_P_ struct ev_io *w, int revents) |
243 |
{ |
244 |
epoll_poll (EV_A_ 0); |
245 |
} |
246 |
|
247 |
/* called for full and partial cleanup */ |
248 |
ecb_cold |
249 |
static int |
250 |
iouring_internal_destroy (EV_P) |
251 |
{ |
252 |
close (iouring_tfd); |
253 |
close (iouring_fd); |
254 |
|
255 |
if (iouring_sq_ring != MAP_FAILED) munmap (iouring_sq_ring, iouring_sq_ring_size); |
256 |
if (iouring_cq_ring != MAP_FAILED) munmap (iouring_cq_ring, iouring_cq_ring_size); |
257 |
if (iouring_sqes != MAP_FAILED) munmap (iouring_sqes , iouring_sqes_size ); |
258 |
|
259 |
if (ev_is_active (&iouring_epoll_w)) ev_ref (EV_A); ev_io_stop (EV_A_ &iouring_epoll_w); |
260 |
if (ev_is_active (&iouring_tfd_w )) ev_ref (EV_A); ev_io_stop (EV_A_ &iouring_tfd_w ); |
261 |
} |
262 |
|
263 |
ecb_cold |
264 |
static int |
265 |
iouring_internal_init (EV_P) |
266 |
{ |
267 |
struct io_uring_params params = { 0 }; |
268 |
|
269 |
iouring_to_submit = 0; |
270 |
|
271 |
iouring_tfd = -1; |
272 |
iouring_sq_ring = MAP_FAILED; |
273 |
iouring_cq_ring = MAP_FAILED; |
274 |
iouring_sqes = MAP_FAILED; |
275 |
|
276 |
for (;;) |
277 |
{ |
278 |
iouring_fd = evsys_io_uring_setup (iouring_entries, ¶ms); |
279 |
|
280 |
if (iouring_fd >= 0) |
281 |
break; /* yippie */ |
282 |
|
283 |
if (errno != EINVAL) |
284 |
return -1; /* we failed */ |
285 |
|
286 |
/* EINVAL: lots of possible reasons, but maybe |
287 |
* it is because we hit the unqueryable hardcoded size limit |
288 |
*/ |
289 |
|
290 |
/* we hit the limit already, give up */ |
291 |
if (iouring_max_entries) |
292 |
return -1; |
293 |
|
294 |
/* first time we hit EINVAL? assume we hit the limit, so go back and retry */ |
295 |
iouring_entries >>= 1; |
296 |
iouring_max_entries = iouring_entries; |
297 |
} |
298 |
|
299 |
iouring_sq_ring_size = params.sq_off.array + params.sq_entries * sizeof (unsigned); |
300 |
iouring_cq_ring_size = params.cq_off.cqes + params.cq_entries * sizeof (struct io_uring_cqe); |
301 |
iouring_sqes_size = params.sq_entries * sizeof (struct io_uring_sqe); |
302 |
|
303 |
iouring_sq_ring = mmap (0, iouring_sq_ring_size, PROT_READ | PROT_WRITE, |
304 |
MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_SQ_RING); |
305 |
iouring_cq_ring = mmap (0, iouring_cq_ring_size, PROT_READ | PROT_WRITE, |
306 |
MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_CQ_RING); |
307 |
iouring_sqes = mmap (0, iouring_sqes_size, PROT_READ | PROT_WRITE, |
308 |
MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_SQES); |
309 |
|
310 |
if (iouring_sq_ring == MAP_FAILED || iouring_cq_ring == MAP_FAILED || iouring_sqes == MAP_FAILED) |
311 |
return -1; |
312 |
|
313 |
iouring_sq_head = params.sq_off.head; |
314 |
iouring_sq_tail = params.sq_off.tail; |
315 |
iouring_sq_ring_mask = params.sq_off.ring_mask; |
316 |
iouring_sq_ring_entries = params.sq_off.ring_entries; |
317 |
iouring_sq_flags = params.sq_off.flags; |
318 |
iouring_sq_dropped = params.sq_off.dropped; |
319 |
iouring_sq_array = params.sq_off.array; |
320 |
|
321 |
iouring_cq_head = params.cq_off.head; |
322 |
iouring_cq_tail = params.cq_off.tail; |
323 |
iouring_cq_ring_mask = params.cq_off.ring_mask; |
324 |
iouring_cq_ring_entries = params.cq_off.ring_entries; |
325 |
iouring_cq_overflow = params.cq_off.overflow; |
326 |
iouring_cq_cqes = params.cq_off.cqes; |
327 |
|
328 |
iouring_tfd = timerfd_create (CLOCK_MONOTONIC, TFD_CLOEXEC); |
329 |
|
330 |
if (iouring_tfd < 0) |
331 |
return iouring_tfd; |
332 |
|
333 |
iouring_tfd_to = EV_TSTAMP_HUGE; |
334 |
|
335 |
return 0; |
336 |
} |
337 |
|
338 |
ecb_cold |
339 |
static void |
340 |
iouring_fork (EV_P) |
341 |
{ |
342 |
iouring_internal_destroy (EV_A); |
343 |
|
344 |
while (iouring_internal_init (EV_A) < 0) |
345 |
ev_syserr ("(libev) io_uring_setup"); |
346 |
|
347 |
/* forking epoll should also effectively unregister all fds from the backend */ |
348 |
epoll_fork (EV_A); |
349 |
/* epoll_fork already did this. hopefully */ |
350 |
/*fd_rearm_all (EV_A);*/ |
351 |
|
352 |
ev_io_stop (EV_A_ &iouring_epoll_w); |
353 |
ev_io_set (EV_A_ &iouring_epoll_w, backend_fd, EV_READ); |
354 |
ev_io_start (EV_A_ &iouring_epoll_w); |
355 |
|
356 |
ev_io_stop (EV_A_ &iouring_tfd_w); |
357 |
ev_io_set (EV_A_ &iouring_tfd_w, iouring_tfd, EV_READ); |
358 |
ev_io_start (EV_A_ &iouring_tfd_w); |
359 |
} |
360 |
|
361 |
/*****************************************************************************/ |
362 |
|
363 |
static void |
364 |
iouring_modify (EV_P_ int fd, int oev, int nev) |
365 |
{ |
366 |
if (ecb_expect_false (anfds [fd].eflags)) |
367 |
{ |
368 |
/* we handed this fd over to epoll, so undo this first */ |
369 |
/* we do it manually because the optimisations on epoll_modify won't do us any good */ |
370 |
epoll_ctl (iouring_fd, EPOLL_CTL_DEL, fd, 0); |
371 |
anfds [fd].eflags = 0; |
372 |
oev = 0; |
373 |
} |
374 |
|
375 |
if (oev) |
376 |
{ |
377 |
/* we assume the sqe's are all "properly" initialised */ |
378 |
struct io_uring_sqe *sqe = iouring_sqe_get (EV_A); |
379 |
sqe->opcode = IORING_OP_POLL_REMOVE; |
380 |
sqe->fd = fd; |
381 |
sqe->user_data = -1; |
382 |
iouring_sqe_submit (EV_A_ sqe); |
383 |
|
384 |
/* increment generation counter to avoid handling old events */ |
385 |
++anfds [fd].egen; |
386 |
} |
387 |
|
388 |
if (nev) |
389 |
{ |
390 |
struct io_uring_sqe *sqe = iouring_sqe_get (EV_A); |
391 |
sqe->opcode = IORING_OP_POLL_ADD; |
392 |
sqe->fd = fd; |
393 |
sqe->user_data = (uint32_t)fd | ((__u64)(uint32_t)anfds [fd].egen << 32); |
394 |
sqe->poll_events = |
395 |
(nev & EV_READ ? POLLIN : 0) |
396 |
| (nev & EV_WRITE ? POLLOUT : 0); |
397 |
iouring_sqe_submit (EV_A_ sqe); |
398 |
} |
399 |
} |
400 |
|
401 |
inline_size |
402 |
void |
403 |
iouring_tfd_update (EV_P_ ev_tstamp timeout) |
404 |
{ |
405 |
ev_tstamp tfd_to = mn_now + timeout; |
406 |
|
407 |
/* we assume there will be many iterations per timer change, so |
408 |
* we only re-set the timerfd when we have to because its expiry |
409 |
* is too late. |
410 |
*/ |
411 |
if (ecb_expect_false (tfd_to < iouring_tfd_to)) |
412 |
{ |
413 |
struct itimerspec its; |
414 |
|
415 |
iouring_tfd_to = tfd_to; |
416 |
EV_TS_SET (its.it_interval, 0.); |
417 |
EV_TS_SET (its.it_value, tfd_to); |
418 |
|
419 |
if (timerfd_settime (iouring_tfd, TFD_TIMER_ABSTIME, &its, 0) < 0) |
420 |
assert (("libev: iouring timerfd_settime failed", 0)); |
421 |
} |
422 |
} |
423 |
|
424 |
inline_size |
425 |
void |
426 |
iouring_process_cqe (EV_P_ struct io_uring_cqe *cqe) |
427 |
{ |
428 |
int fd = cqe->user_data & 0xffffffffU; |
429 |
uint32_t gen = cqe->user_data >> 32; |
430 |
int res = cqe->res; |
431 |
|
432 |
/* ignore fd removal events, if there are any. TODO: verify */ |
433 |
if (cqe->user_data == (__u64)-1) |
434 |
abort ();//D |
435 |
|
436 |
assert (("libev: io_uring fd must be in-bounds", fd >= 0 && fd < anfdmax)); |
437 |
|
438 |
/* documentation lies, of course. the result value is NOT like |
439 |
* normal syscalls, but like linux raw syscalls, i.e. negative |
440 |
* error numbers. fortunate, as otherwise there would be no way |
441 |
* to get error codes at all. still, why not document this? |
442 |
*/ |
443 |
|
444 |
/* ignore event if generation doesn't match */ |
445 |
/* this should actually be very rare */ |
446 |
if (ecb_expect_false (gen != (uint32_t)anfds [fd].egen)) |
447 |
return; |
448 |
|
449 |
if (ecb_expect_false (res < 0)) |
450 |
{ |
451 |
if (res == -EINVAL) |
452 |
{ |
453 |
/* we assume this error code means the fd/poll combination is buggy |
454 |
* and fall back to epoll. |
455 |
* this error code might also indicate a bug, but the kernel doesn't |
456 |
* distinguish between those two conditions, so... sigh... |
457 |
*/ |
458 |
|
459 |
epoll_modify (EV_A_ fd, 0, anfds [fd].events); |
460 |
} |
461 |
else if (res == -EBADF) |
462 |
{ |
463 |
assert (("libev: event loop rejected bad fd", res != -EBADF)); |
464 |
fd_kill (EV_A_ fd); |
465 |
} |
466 |
else |
467 |
{ |
468 |
errno = -res; |
469 |
ev_syserr ("(libev) IORING_OP_POLL_ADD"); |
470 |
} |
471 |
|
472 |
return; |
473 |
} |
474 |
|
475 |
/* feed events, we do not expect or handle POLLNVAL */ |
476 |
fd_event ( |
477 |
EV_A_ |
478 |
fd, |
479 |
(res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) |
480 |
| (res & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) |
481 |
); |
482 |
|
483 |
/* io_uring is oneshot, so we need to re-arm the fd next iteration */ |
484 |
/* this also means we usually have to do at least one syscall per iteration */ |
485 |
anfds [fd].events = 0; |
486 |
fd_change (EV_A_ fd, EV_ANFD_REIFY); |
487 |
} |
488 |
|
489 |
/* called when the event queue overflows */ |
490 |
ecb_cold |
491 |
static void |
492 |
iouring_overflow (EV_P) |
493 |
{ |
494 |
/* we have two options, resize the queue (by tearing down |
495 |
* everything and recreating it, or living with it |
496 |
* and polling. |
497 |
* we implement this by resizing tghe queue, and, if that fails, |
498 |
* we just recreate the state on every failure, which |
499 |
* kind of is a very inefficient poll. |
500 |
* one danger is, due to the bios toward lower fds, |
501 |
* we will only really get events for those, so |
502 |
* maybe we need a poll() fallback, after all. |
503 |
*/ |
504 |
/*EV_CQ_VAR (overflow) = 0;*/ /* need to do this if we keep the state and poll manually */ |
505 |
|
506 |
fd_rearm_all (EV_A); |
507 |
|
508 |
/* we double the size until we hit the hard-to-probe maximum */ |
509 |
if (!iouring_max_entries) |
510 |
{ |
511 |
iouring_entries <<= 1; |
512 |
iouring_fork (EV_A); |
513 |
} |
514 |
else |
515 |
{ |
516 |
/* we hit the kernel limit, we should fall back to something else. |
517 |
* we can either poll() a few times and hope for the best, |
518 |
* poll always, or switch to epoll. |
519 |
* since we use epoll anyways, go epoll. |
520 |
*/ |
521 |
|
522 |
iouring_internal_destroy (EV_A); |
523 |
|
524 |
/* this should make it so that on return, we don'T call any uring functions */ |
525 |
iouring_to_submit = 0; |
526 |
|
527 |
for (;;) |
528 |
{ |
529 |
backend = epoll_init (EV_A_ 0); |
530 |
|
531 |
if (backend) |
532 |
break; |
533 |
|
534 |
ev_syserr ("(libev) iouring switch to epoll"); |
535 |
} |
536 |
} |
537 |
} |
538 |
|
539 |
/* handle any events in the completion queue, return true if there were any */ |
540 |
static int |
541 |
iouring_handle_cq (EV_P) |
542 |
{ |
543 |
unsigned head, tail, mask; |
544 |
|
545 |
head = EV_CQ_VAR (head); |
546 |
ECB_MEMORY_FENCE_ACQUIRE; |
547 |
tail = EV_CQ_VAR (tail); |
548 |
|
549 |
if (head == tail) |
550 |
return 0; |
551 |
|
552 |
/* it can only overflow if we have events, yes, yes? */ |
553 |
if (ecb_expect_false (EV_CQ_VAR (overflow))) |
554 |
{ |
555 |
iouring_overflow (EV_A); |
556 |
return 1; |
557 |
} |
558 |
|
559 |
mask = EV_CQ_VAR (ring_mask); |
560 |
|
561 |
do |
562 |
iouring_process_cqe (EV_A_ &EV_CQES [head++ & mask]); |
563 |
while (head != tail); |
564 |
|
565 |
EV_CQ_VAR (head) = head; |
566 |
ECB_MEMORY_FENCE_RELEASE; |
567 |
|
568 |
return 1; |
569 |
} |
570 |
|
571 |
static void |
572 |
iouring_poll (EV_P_ ev_tstamp timeout) |
573 |
{ |
574 |
/* if we have events, no need for extra syscalls, but we might have to queue events */ |
575 |
if (iouring_handle_cq (EV_A)) |
576 |
timeout = EV_TS_CONST (0.); |
577 |
else |
578 |
/* no events, so maybe wait for some */ |
579 |
iouring_tfd_update (EV_A_ timeout); |
580 |
|
581 |
/* only enter the kernel if we have somethign to submit, or we need to wait */ |
582 |
if (timeout || iouring_to_submit) |
583 |
{ |
584 |
int res; |
585 |
|
586 |
EV_RELEASE_CB; |
587 |
|
588 |
res = evsys_io_uring_enter (iouring_fd, iouring_to_submit, 1, |
589 |
timeout > EV_TS_CONST (0.) ? IORING_ENTER_GETEVENTS : 0, 0, 0); |
590 |
iouring_to_submit = 0; |
591 |
|
592 |
EV_ACQUIRE_CB; |
593 |
|
594 |
if (ecb_expect_false (res < 0)) |
595 |
if (errno == EINTR) |
596 |
/* ignore */; |
597 |
else |
598 |
ev_syserr ("(libev) iouring setup"); |
599 |
else |
600 |
iouring_handle_cq (EV_A); |
601 |
} |
602 |
} |
603 |
|
604 |
inline_size |
605 |
int |
606 |
iouring_init (EV_P_ int flags) |
607 |
{ |
608 |
if (!epoll_init (EV_A_ 0)) |
609 |
return 0; |
610 |
|
611 |
ev_io_init (&iouring_epoll_w, iouring_epoll_cb, backend_fd, EV_READ); |
612 |
ev_set_priority (&iouring_epoll_w, EV_MAXPRI); |
613 |
|
614 |
ev_io_init (&iouring_tfd_w, iouring_tfd_cb, iouring_tfd, EV_READ); |
615 |
ev_set_priority (&iouring_tfd_w, EV_MAXPRI); |
616 |
|
617 |
iouring_entries = IOURING_INIT_ENTRIES; |
618 |
iouring_max_entries = 0; |
619 |
|
620 |
if (iouring_internal_init (EV_A) < 0) |
621 |
{ |
622 |
iouring_internal_destroy (EV_A); |
623 |
return 0; |
624 |
} |
625 |
|
626 |
ev_io_start (EV_A_ &iouring_epoll_w); |
627 |
ev_unref (EV_A); /* watcher should not keep loop alive */ |
628 |
|
629 |
ev_io_start (EV_A_ &iouring_tfd_w); |
630 |
ev_unref (EV_A); /* watcher should not keep loop alive */ |
631 |
|
632 |
backend_modify = iouring_modify; |
633 |
backend_poll = iouring_poll; |
634 |
|
635 |
return EVBACKEND_IOURING; |
636 |
} |
637 |
|
638 |
inline_size |
639 |
void |
640 |
iouring_destroy (EV_P) |
641 |
{ |
642 |
iouring_internal_destroy (EV_A); |
643 |
epoll_destroy (EV_A); |
644 |
} |
645 |
|