ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev_iouring.c
Revision: 1.8
Committed: Fri Dec 27 16:08:24 2019 UTC (4 years, 6 months ago) by root
Content type: text/plain
Branch: MAIN
Changes since 1.7: +25 -49 lines
Log Message:
rip out epoll from iouring

File Contents

# Content
1 /*
2 * libev linux io_uring fd activity backend
3 *
4 * Copyright (c) 2019 Marc Alexander Lehmann <libev@schmorp.de>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without modifica-
8 * tion, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
19 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
20 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
21 * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
25 * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
26 * OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Alternatively, the contents of this file may be used under the terms of
29 * the GNU General Public License ("GPL") version 2 or any later version,
30 * in which case the provisions of the GPL are applicable instead of
31 * the above. If you wish to allow the use of your version of this file
32 * only under the terms of the GPL and not to allow others to use your
33 * version of this file under the BSD license, indicate your decision
34 * by deleting the provisions above and replace them with the notice
35 * and other provisions required by the GPL. If you do not delete the
36 * provisions above, a recipient may use your version of this file under
37 * either the BSD or the GPL.
38 */
39
40 /*
41 * general notes about linux io_uring:
42 *
43 * a) it's the best interface I have seen so far. on linux.
44 * b) best is not necessarily very good.
45 * c) it's better than the aio mess, doesn't suffer from the fork problems
46 * of linux aio or epoll and so on and so on. and you could do event stuff
47 * without any syscalls. what's not to like?
48 * d) ok, it's vastly more complex, but that's ok, really.
49 * e) why 3 mmaps instead of one? one would be more space-efficient,
50 * and I can't see what benefit three would have (other than being
51 * somehow resizable/relocatable, but that's apparently not possible).
52 * (FIXME: newer kernels can use 2 mmaps only, need to look into this).
53 * f) hmm, it's practiclaly undebuggable (gdb can't access the memory, and
54 * the bizarre way structure offsets are communicated makes it hard to
55 * just print the ring buffer heads, even *iff* the memory were visible
56 * in gdb. but then, that's also ok, really.
57 * g) well, you cannot specify a timeout when waiting for events. no,
58 * seriously, the interface doesn't support a timeout. never seen _that_
59 * before. sure, you can use a timerfd, but that's another syscall
60 * you could have avoided. overall, this bizarre omission smells
61 * like a ยต-optimisation by the io_uring author for his personal
62 * applications, to the detriment of everybody else who just wants
63 * an event loop. but, umm, ok, if that's all, it could be worse.
64 * (FIXME: jens mentioned timeout commands, need to investigate)
65 * h) there is a hardcoded limit of 4096 outstanding events. okay,
66 * at least there is no arbitrary low system-wide limit...
67 * (FIXME: apparently, this was increased to 32768 in later kernels(
68 * i) unlike linux aio, you *can* register more then the limit
69 * of fd events, and the kernel will "gracefully" signal an
70 * overflow, after which you could destroy and recreate the kernel
71 * state, a bit bigger, or fall back to e.g. poll. thats not
72 * totally insane, but kind of questions the point a high
73 * performance I/O framework when it doesn't really work
74 * under stress.
75 * (FIXME: iouring should no longer drop events, need to investigate)
76 * j) but, oh my! is has exactly the same bugs as the linux aio backend,
77 * where some undocumented poll combinations just fail.
78 * so we need epoll AGAIN as a fallback. AGAIN! epoll!! and of course,
79 * this is completely undocumented, have I mantioned this already?
80 * k) overall, the *API* itself is, I dare to say, not a total trainwreck.
81 * the big isuess with it are the bugs requiring epoll, which might
82 * or might not get fixed (do I hold my breath?).
83 */
84
85 #include <sys/timerfd.h>
86 #include <sys/mman.h>
87 #include <poll.h>
88
89 #define IOURING_INIT_ENTRIES 32
90
91 /*****************************************************************************/
92 /* syscall wrapdadoop - this section has the raw api/abi definitions */
93
94 #include <linux/fs.h>
95 #include <linux/types.h>
96
97 /* mostly directly taken from the kernel or documentation */
98
99 struct io_uring_sqe
100 {
101 __u8 opcode;
102 __u8 flags;
103 __u16 ioprio;
104 __s32 fd;
105 union {
106 __u64 off;
107 __u64 addr2;
108 };
109 __u64 addr;
110 __u32 len;
111 union {
112 __kernel_rwf_t rw_flags;
113 __u32 fsync_flags;
114 __u16 poll_events;
115 __u32 sync_range_flags;
116 __u32 msg_flags;
117 __u32 timeout_flags;
118 __u32 accept_flags;
119 __u32 cancel_flags;
120 __u32 open_flags;
121 __u32 statx_flags;
122 };
123 __u64 user_data;
124 union {
125 __u16 buf_index;
126 __u64 __pad2[3];
127 };
128 };
129
130 struct io_uring_cqe
131 {
132 __u64 user_data;
133 __s32 res;
134 __u32 flags;
135 };
136
137 struct io_sqring_offsets
138 {
139 __u32 head;
140 __u32 tail;
141 __u32 ring_mask;
142 __u32 ring_entries;
143 __u32 flags;
144 __u32 dropped;
145 __u32 array;
146 __u32 resv1;
147 __u64 resv2;
148 };
149
150 struct io_cqring_offsets
151 {
152 __u32 head;
153 __u32 tail;
154 __u32 ring_mask;
155 __u32 ring_entries;
156 __u32 overflow;
157 __u32 cqes;
158 __u64 resv[2];
159 };
160
161 struct io_uring_params
162 {
163 __u32 sq_entries;
164 __u32 cq_entries;
165 __u32 flags;
166 __u32 sq_thread_cpu;
167 __u32 sq_thread_idle;
168 __u32 features;
169 __u32 resv[4];
170 struct io_sqring_offsets sq_off;
171 struct io_cqring_offsets cq_off;
172 };
173
174 #define IORING_OP_POLL_ADD 6
175 #define IORING_OP_POLL_REMOVE 7
176
177 #define IORING_ENTER_GETEVENTS 0x01
178
179 #define IORING_OFF_SQ_RING 0x00000000ULL
180 #define IORING_OFF_CQ_RING 0x08000000ULL
181 #define IORING_OFF_SQES 0x10000000ULL
182
183 #define IORING_FEAT_SINGLE_MMAP 0x1
184 #define IORING_FEAT_NODROP 0x2
185 #define IORING_FEAT_SUBMIT_STABLE 0x4
186
187 inline_size
188 int
189 evsys_io_uring_setup (unsigned entries, struct io_uring_params *params)
190 {
191 return ev_syscall2 (SYS_io_uring_setup, entries, params);
192 }
193
194 inline_size
195 int
196 evsys_io_uring_enter (int fd, unsigned to_submit, unsigned min_complete, unsigned flags, const sigset_t *sig, size_t sigsz)
197 {
198 return ev_syscall6 (SYS_io_uring_enter, fd, to_submit, min_complete, flags, sig, sigsz);
199 }
200
201 /*****************************************************************************/
202 /* actual backed implementation */
203
204 /* we hope that volatile will make the compiler access this variables only once */
205 #define EV_SQ_VAR(name) *(volatile unsigned *)((char *)iouring_sq_ring + iouring_sq_ ## name)
206 #define EV_CQ_VAR(name) *(volatile unsigned *)((char *)iouring_cq_ring + iouring_cq_ ## name)
207
208 /* the index array */
209 #define EV_SQ_ARRAY ((unsigned *)((char *)iouring_sq_ring + iouring_sq_array))
210
211 /* the submit/completion queue entries */
212 #define EV_SQES ((struct io_uring_sqe *) iouring_sqes)
213 #define EV_CQES ((struct io_uring_cqe *)((char *)iouring_cq_ring + iouring_cq_cqes))
214
215 static
216 struct io_uring_sqe *
217 iouring_sqe_get (EV_P)
218 {
219 unsigned tail = EV_SQ_VAR (tail);
220
221 if (tail + 1 - EV_SQ_VAR (head) > EV_SQ_VAR (ring_entries))
222 {
223 /* queue full, flush */
224 evsys_io_uring_enter (iouring_fd, iouring_to_submit, 0, 0, 0, 0);
225 iouring_to_submit = 0;
226 }
227
228 assert (("libev: io_uring queue full after flush", tail + 1 - EV_SQ_VAR (head) <= EV_SQ_VAR (ring_entries)));
229
230 return EV_SQES + (tail & EV_SQ_VAR (ring_mask));
231 }
232
233 inline_size
234 struct io_uring_sqe *
235 iouring_sqe_submit (EV_P_ struct io_uring_sqe *sqe)
236 {
237 unsigned idx = sqe - EV_SQES;
238
239 EV_SQ_ARRAY [idx] = idx;
240 ECB_MEMORY_FENCE_RELEASE;
241 ++EV_SQ_VAR (tail);
242 /*ECB_MEMORY_FENCE_RELEASE; /* for the time being we assume this is not needed */
243 ++iouring_to_submit;
244 }
245
246 /*****************************************************************************/
247
248 /* when the timerfd expires we simply note the fact,
249 * as the purpose of the timerfd is to wake us up, nothing else.
250 * the next iteration should re-set it.
251 */
252 static void
253 iouring_tfd_cb (EV_P_ struct ev_io *w, int revents)
254 {
255 iouring_tfd_to = EV_TSTAMP_HUGE;
256 }
257
258 /* called for full and partial cleanup */
259 ecb_cold
260 static int
261 iouring_internal_destroy (EV_P)
262 {
263 close (iouring_tfd);
264 close (iouring_fd);
265
266 if (iouring_sq_ring != MAP_FAILED) munmap (iouring_sq_ring, iouring_sq_ring_size);
267 if (iouring_cq_ring != MAP_FAILED) munmap (iouring_cq_ring, iouring_cq_ring_size);
268 if (iouring_sqes != MAP_FAILED) munmap (iouring_sqes , iouring_sqes_size );
269
270 if (ev_is_active (&iouring_tfd_w))
271 {
272 ev_ref (EV_A);
273 ev_io_stop (EV_A_ &iouring_tfd_w);
274 }
275 }
276
277 ecb_cold
278 static int
279 iouring_internal_init (EV_P)
280 {
281 struct io_uring_params params = { 0 };
282
283 iouring_to_submit = 0;
284
285 iouring_tfd = -1;
286 iouring_sq_ring = MAP_FAILED;
287 iouring_cq_ring = MAP_FAILED;
288 iouring_sqes = MAP_FAILED;
289
290 for (;;)
291 {
292 iouring_fd = evsys_io_uring_setup (iouring_entries, &params);
293
294 if (iouring_fd >= 0)
295 break; /* yippie */
296
297 if (errno != EINVAL)
298 return -1; /* we failed */
299
300 /* EINVAL: lots of possible reasons, but maybe
301 * it is because we hit the unqueryable hardcoded size limit
302 */
303
304 /* we hit the limit already, give up */
305 if (iouring_max_entries)
306 return -1;
307
308 /* first time we hit EINVAL? assume we hit the limit, so go back and retry */
309 iouring_entries >>= 1;
310 iouring_max_entries = iouring_entries;
311 }
312
313 iouring_sq_ring_size = params.sq_off.array + params.sq_entries * sizeof (unsigned);
314 iouring_cq_ring_size = params.cq_off.cqes + params.cq_entries * sizeof (struct io_uring_cqe);
315 iouring_sqes_size = params.sq_entries * sizeof (struct io_uring_sqe);
316
317 iouring_sq_ring = mmap (0, iouring_sq_ring_size, PROT_READ | PROT_WRITE,
318 MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_SQ_RING);
319 iouring_cq_ring = mmap (0, iouring_cq_ring_size, PROT_READ | PROT_WRITE,
320 MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_CQ_RING);
321 iouring_sqes = mmap (0, iouring_sqes_size, PROT_READ | PROT_WRITE,
322 MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_SQES);
323
324 if (iouring_sq_ring == MAP_FAILED || iouring_cq_ring == MAP_FAILED || iouring_sqes == MAP_FAILED)
325 return -1;
326
327 iouring_sq_head = params.sq_off.head;
328 iouring_sq_tail = params.sq_off.tail;
329 iouring_sq_ring_mask = params.sq_off.ring_mask;
330 iouring_sq_ring_entries = params.sq_off.ring_entries;
331 iouring_sq_flags = params.sq_off.flags;
332 iouring_sq_dropped = params.sq_off.dropped;
333 iouring_sq_array = params.sq_off.array;
334
335 iouring_cq_head = params.cq_off.head;
336 iouring_cq_tail = params.cq_off.tail;
337 iouring_cq_ring_mask = params.cq_off.ring_mask;
338 iouring_cq_ring_entries = params.cq_off.ring_entries;
339 iouring_cq_overflow = params.cq_off.overflow;
340 iouring_cq_cqes = params.cq_off.cqes;
341
342 iouring_tfd = timerfd_create (CLOCK_MONOTONIC, TFD_CLOEXEC);
343
344 if (iouring_tfd < 0)
345 return iouring_tfd;
346
347 iouring_tfd_to = EV_TSTAMP_HUGE;
348
349 return 0;
350 }
351
352 ecb_cold
353 static void
354 iouring_fork (EV_P)
355 {
356 iouring_internal_destroy (EV_A);
357
358 while (iouring_internal_init (EV_A) < 0)
359 ev_syserr ("(libev) io_uring_setup");
360
361 fd_rearm_all (EV_A);
362
363 ev_io_stop (EV_A_ &iouring_tfd_w);
364 ev_io_set (EV_A_ &iouring_tfd_w, iouring_tfd, EV_READ);
365 ev_io_start (EV_A_ &iouring_tfd_w);
366 }
367
368 /*****************************************************************************/
369
370 static void
371 iouring_modify (EV_P_ int fd, int oev, int nev)
372 {
373 if (oev)
374 {
375 /* we assume the sqe's are all "properly" initialised */
376 struct io_uring_sqe *sqe = iouring_sqe_get (EV_A);
377 sqe->opcode = IORING_OP_POLL_REMOVE;
378 sqe->fd = fd;
379 sqe->user_data = -1;
380 iouring_sqe_submit (EV_A_ sqe);
381
382 /* increment generation counter to avoid handling old events */
383 ++anfds [fd].egen;
384 }
385
386 if (nev)
387 {
388 struct io_uring_sqe *sqe = iouring_sqe_get (EV_A);
389 sqe->opcode = IORING_OP_POLL_ADD;
390 sqe->fd = fd;
391 sqe->user_data = (uint32_t)fd | ((__u64)(uint32_t)anfds [fd].egen << 32);
392 sqe->poll_events =
393 (nev & EV_READ ? POLLIN : 0)
394 | (nev & EV_WRITE ? POLLOUT : 0);
395 iouring_sqe_submit (EV_A_ sqe);
396 }
397 }
398
399 inline_size
400 void
401 iouring_tfd_update (EV_P_ ev_tstamp timeout)
402 {
403 ev_tstamp tfd_to = mn_now + timeout;
404
405 /* we assume there will be many iterations per timer change, so
406 * we only re-set the timerfd when we have to because its expiry
407 * is too late.
408 */
409 if (ecb_expect_false (tfd_to < iouring_tfd_to))
410 {
411 struct itimerspec its;
412
413 iouring_tfd_to = tfd_to;
414 EV_TS_SET (its.it_interval, 0.);
415 EV_TS_SET (its.it_value, tfd_to);
416
417 if (timerfd_settime (iouring_tfd, TFD_TIMER_ABSTIME, &its, 0) < 0)
418 assert (("libev: iouring timerfd_settime failed", 0));
419 }
420 }
421
422 inline_size
423 void
424 iouring_process_cqe (EV_P_ struct io_uring_cqe *cqe)
425 {
426 int fd = cqe->user_data & 0xffffffffU;
427 uint32_t gen = cqe->user_data >> 32;
428 int res = cqe->res;
429
430 /* ignore fd removal events, if there are any. TODO: verify */
431 if (cqe->user_data == (__u64)-1)
432 abort ();//D
433
434 assert (("libev: io_uring fd must be in-bounds", fd >= 0 && fd < anfdmax));
435
436 /* documentation lies, of course. the result value is NOT like
437 * normal syscalls, but like linux raw syscalls, i.e. negative
438 * error numbers. fortunate, as otherwise there would be no way
439 * to get error codes at all. still, why not document this?
440 */
441
442 /* ignore event if generation doesn't match */
443 /* this should actually be very rare */
444 if (ecb_expect_false (gen != (uint32_t)anfds [fd].egen))
445 return;
446
447 if (ecb_expect_false (res < 0))
448 {
449 //TODO: EINVAL handling (was something failed with this fd)
450 //TODO: EBUSY happens when?
451
452 if (res == -EBADF)
453 {
454 assert (("libev: event loop rejected bad fd", res != -EBADF));
455 fd_kill (EV_A_ fd);
456 }
457 else
458 {
459 errno = -res;
460 ev_syserr ("(libev) IORING_OP_POLL_ADD");
461 }
462
463 return;
464 }
465
466 /* feed events, we do not expect or handle POLLNVAL */
467 fd_event (
468 EV_A_
469 fd,
470 (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0)
471 | (res & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0)
472 );
473
474 /* io_uring is oneshot, so we need to re-arm the fd next iteration */
475 /* this also means we usually have to do at least one syscall per iteration */
476 anfds [fd].events = 0;
477 fd_change (EV_A_ fd, EV_ANFD_REIFY);
478 }
479
480 /* called when the event queue overflows */
481 ecb_cold
482 static void
483 iouring_overflow (EV_P)
484 {
485 /* we have two options, resize the queue (by tearing down
486 * everything and recreating it, or living with it
487 * and polling.
488 * we implement this by resizing tghe queue, and, if that fails,
489 * we just recreate the state on every failure, which
490 * kind of is a very inefficient poll.
491 * one danger is, due to the bios toward lower fds,
492 * we will only really get events for those, so
493 * maybe we need a poll() fallback, after all.
494 */
495 /*EV_CQ_VAR (overflow) = 0;*/ /* need to do this if we keep the state and poll manually */
496
497 fd_rearm_all (EV_A);
498
499 /* we double the size until we hit the hard-to-probe maximum */
500 if (!iouring_max_entries)
501 {
502 iouring_entries <<= 1;
503 iouring_fork (EV_A);
504 }
505 else
506 {
507 /* we hit the kernel limit, we should fall back to something else.
508 * we can either poll() a few times and hope for the best,
509 * poll always, or switch to epoll.
510 * since we use epoll anyways, go epoll.
511 */
512
513 iouring_internal_destroy (EV_A);
514
515 /* this should make it so that on return, we don'T call any uring functions */
516 iouring_to_submit = 0;
517
518 for (;;)
519 {
520 backend = epoll_init (EV_A_ 0);
521
522 if (backend)
523 break;
524
525 ev_syserr ("(libev) iouring switch to epoll");
526 }
527 }
528 }
529
530 /* handle any events in the completion queue, return true if there were any */
531 static int
532 iouring_handle_cq (EV_P)
533 {
534 unsigned head, tail, mask;
535
536 head = EV_CQ_VAR (head);
537 ECB_MEMORY_FENCE_ACQUIRE;
538 tail = EV_CQ_VAR (tail);
539
540 if (head == tail)
541 return 0;
542
543 /* it can only overflow if we have events, yes, yes? */
544 if (ecb_expect_false (EV_CQ_VAR (overflow)))
545 {
546 iouring_overflow (EV_A);
547 return 1;
548 }
549
550 mask = EV_CQ_VAR (ring_mask);
551
552 do
553 iouring_process_cqe (EV_A_ &EV_CQES [head++ & mask]);
554 while (head != tail);
555
556 EV_CQ_VAR (head) = head;
557 ECB_MEMORY_FENCE_RELEASE;
558
559 return 1;
560 }
561
562 static void
563 iouring_poll (EV_P_ ev_tstamp timeout)
564 {
565 /* if we have events, no need for extra syscalls, but we might have to queue events */
566 if (iouring_handle_cq (EV_A))
567 timeout = EV_TS_CONST (0.);
568 else
569 /* no events, so maybe wait for some */
570 iouring_tfd_update (EV_A_ timeout);
571
572 /* only enter the kernel if we have something to submit, or we need to wait */
573 if (timeout || iouring_to_submit)
574 {
575 int res;
576
577 EV_RELEASE_CB;
578
579 res = evsys_io_uring_enter (iouring_fd, iouring_to_submit, 1,
580 timeout > EV_TS_CONST (0.) ? IORING_ENTER_GETEVENTS : 0, 0, 0);
581 iouring_to_submit = 0;
582
583 EV_ACQUIRE_CB;
584
585 if (ecb_expect_false (res < 0))
586 if (errno == EINTR)
587 /* ignore */;
588 else
589 ev_syserr ("(libev) iouring setup");
590 else
591 iouring_handle_cq (EV_A);
592 }
593 }
594
595 inline_size
596 int
597 iouring_init (EV_P_ int flags)
598 {
599 iouring_entries = IOURING_INIT_ENTRIES;
600 iouring_max_entries = 0;
601
602 if (iouring_internal_init (EV_A) < 0)
603 {
604 iouring_internal_destroy (EV_A);
605 return 0;
606 }
607
608 ev_io_init (&iouring_tfd_w, iouring_tfd_cb, iouring_tfd, EV_READ);
609 ev_set_priority (&iouring_tfd_w, EV_MINPRI);
610 ev_io_start (EV_A_ &iouring_tfd_w);
611 ev_unref (EV_A); /* watcher should not keep loop alive */
612
613 backend_modify = iouring_modify;
614 backend_poll = iouring_poll;
615
616 return EVBACKEND_IOURING;
617 }
618
619 inline_size
620 void
621 iouring_destroy (EV_P)
622 {
623 iouring_internal_destroy (EV_A);
624 }
625