1 | /* |
1 | /* |
2 | * libev linux io_uring fd activity backend |
2 | * libev linux io_uring fd activity backend |
3 | * |
3 | * |
4 | * Copyright (c) 2019 Marc Alexander Lehmann <libev@schmorp.de> |
4 | * Copyright (c) 2019-2020 Marc Alexander Lehmann <libev@schmorp.de> |
5 | * All rights reserved. |
5 | * All rights reserved. |
6 | * |
6 | * |
7 | * Redistribution and use in source and binary forms, with or without modifica- |
7 | * Redistribution and use in source and binary forms, with or without modifica- |
8 | * tion, are permitted provided that the following conditions are met: |
8 | * tion, are permitted provided that the following conditions are met: |
9 | * |
9 | * |
… | |
… | |
44 | * b) best is not necessarily very good. |
44 | * b) best is not necessarily very good. |
45 | * c) it's better than the aio mess, doesn't suffer from the fork problems |
45 | * c) it's better than the aio mess, doesn't suffer from the fork problems |
46 | * of linux aio or epoll and so on and so on. and you could do event stuff |
46 | * of linux aio or epoll and so on and so on. and you could do event stuff |
47 | * without any syscalls. what's not to like? |
47 | * without any syscalls. what's not to like? |
48 | * d) ok, it's vastly more complex, but that's ok, really. |
48 | * d) ok, it's vastly more complex, but that's ok, really. |
49 | * e) why 3 mmaps instead of one? one would be more space-efficient, |
49 | * e) why two mmaps instead of one? one would be more space-efficient, |
50 | * and I can't see what benefit three would have (other than being |
50 | * and I can't see what benefit two would have (other than being |
51 | * somehow resizable/relocatable, but that's apparently not possible). |
51 | * somehow resizable/relocatable, but that's apparently not possible). |
52 | * f) hmm, it's practiclaly undebuggable (gdb can't access the memory, and |
52 | * f) hmm, it's practically undebuggable (gdb can't access the memory, and |
53 | the bizarre way structure offsets are commuinicated makes it hard to |
53 | * the bizarre way structure offsets are communicated makes it hard to |
54 | * just print the ring buffer heads, even *iff* the memory were visible |
54 | * just print the ring buffer heads, even *iff* the memory were visible |
55 | * in gdb. but then, that's also ok, really. |
55 | * in gdb. but then, that's also ok, really. |
56 | * g) well, you cannot specify a timeout when waiting for events. no, |
56 | * g) well, you cannot specify a timeout when waiting for events. no, |
57 | * seriously, the interface doesn't support a timeout. never seen _that_ |
57 | * seriously, the interface doesn't support a timeout. never seen _that_ |
58 | * before. sure, you can use a timerfd, but that's another syscall |
58 | * before. sure, you can use a timerfd, but that's another syscall |
59 | * you could have avoided. overall, this bizarre omission smells |
59 | * you could have avoided. overall, this bizarre omission smells |
60 | * like a µ-optimisation by the io_uring author for his personal |
60 | * like a µ-optimisation by the io_uring author for his personal |
61 | * applications, to the detriment of everybody else who just wants |
61 | * applications, to the detriment of everybody else who just wants |
62 | * an event loop. but, umm, ok, if that's all, it could be worse. |
62 | * an event loop. but, umm, ok, if that's all, it could be worse. |
|
|
63 | * (from what I gather from the author Jens Axboe, it simply didn't |
|
|
64 | * occur to him, and he made good on it by adding an unlimited number |
|
|
65 | * of timeouts later :). |
63 | * h) there is a hardcoded limit of 4096 outstanding events. okay, |
66 | * h) initially there was a hardcoded limit of 4096 outstanding events. |
64 | * at least there is no arbitrary low system-wide limit... |
67 | * later versions not only bump this to 32k, but also can handle |
|
|
68 | * an unlimited amount of events, so this only affects the batch size. |
65 | * i) unlike linux aio, you *can* register more then the limit |
69 | * i) unlike linux aio, you *can* register more then the limit |
66 | * of fd events, and the kernel will "gracefully" signal an |
70 | * of fd events. while early verisons of io_uring signalled an overflow |
67 | * overflow, after which you could destroy and recreate the kernel |
71 | * and you ended up getting wet. 5.5+ does not do this anymore. |
68 | * state, a bit bigger, or fall back to e.g. poll. thats not |
|
|
69 | * totally insane, but kind of questions the point a high |
|
|
70 | * performance I/O framework when it doesn't really work |
|
|
71 | * under stress. |
|
|
72 | * j) but, oh my! is has exactly the same bugs as the linux aio backend, |
72 | * j) but, oh my! it had exactly the same bugs as the linux aio backend, |
73 | * where some undocumented poll combinations just fail. |
73 | * where some undocumented poll combinations just fail. fortunately, |
74 | * so we need epoll AGAIN as a fallback. AGAIN! epoll!! and of course, |
74 | * after finally reaching the author, he was more than willing to fix |
75 | * this is completely undocumented, have I mantioned this already? |
75 | * this probably in 5.6+. |
76 | * k) overall, the *API* itself is, I dare to say, not a total trainwreck. |
76 | * k) overall, the *API* itself is, I dare to say, not a total trainwreck. |
77 | * the big isuess with it are the bugs requiring epoll, which might |
77 | * once the bugs ae fixed (probably in 5.6+), it will be without |
78 | * or might not get fixed (do I hold my breath?). |
78 | * competition. |
79 | */ |
79 | */ |
|
|
80 | |
|
|
81 | /* TODO: use internal TIMEOUT */ |
|
|
82 | /* TODO: take advantage of single mmap, NODROP etc. */ |
|
|
83 | /* TODO: resize cq/sq size independently */ |
80 | |
84 | |
81 | #include <sys/timerfd.h> |
85 | #include <sys/timerfd.h> |
82 | #include <sys/mman.h> |
86 | #include <sys/mman.h> |
83 | #include <poll.h> |
87 | #include <poll.h> |
|
|
88 | #include <stdint.h> |
84 | |
89 | |
85 | #define IOURING_INIT_ENTRIES 32 |
90 | #define IOURING_INIT_ENTRIES 32 |
86 | |
91 | |
87 | /*****************************************************************************/ |
92 | /*****************************************************************************/ |
88 | /* syscall wrapdadoop - this section has the raw api/abi definitions */ |
93 | /* syscall wrapdadoop - this section has the raw api/abi definitions */ |
… | |
… | |
96 | { |
101 | { |
97 | __u8 opcode; |
102 | __u8 opcode; |
98 | __u8 flags; |
103 | __u8 flags; |
99 | __u16 ioprio; |
104 | __u16 ioprio; |
100 | __s32 fd; |
105 | __s32 fd; |
|
|
106 | union { |
101 | __u64 off; |
107 | __u64 off; |
|
|
108 | __u64 addr2; |
|
|
109 | }; |
102 | __u64 addr; |
110 | __u64 addr; |
103 | __u32 len; |
111 | __u32 len; |
104 | union { |
112 | union { |
105 | __kernel_rwf_t rw_flags; |
113 | __kernel_rwf_t rw_flags; |
106 | __u32 fsync_flags; |
114 | __u32 fsync_flags; |
107 | __u16 poll_events; |
115 | __u16 poll_events; |
108 | __u32 sync_range_flags; |
116 | __u32 sync_range_flags; |
109 | __u32 msg_flags; |
117 | __u32 msg_flags; |
|
|
118 | __u32 timeout_flags; |
|
|
119 | __u32 accept_flags; |
|
|
120 | __u32 cancel_flags; |
|
|
121 | __u32 open_flags; |
|
|
122 | __u32 statx_flags; |
|
|
123 | __u32 fadvise_advice; |
110 | }; |
124 | }; |
111 | __u64 user_data; |
125 | __u64 user_data; |
112 | union { |
126 | union { |
113 | __u16 buf_index; |
127 | __u16 buf_index; |
|
|
128 | __u16 personality; |
114 | __u64 __pad2[3]; |
129 | __u64 __pad2[3]; |
115 | }; |
130 | }; |
116 | }; |
131 | }; |
117 | |
132 | |
118 | struct io_uring_cqe |
133 | struct io_uring_cqe |
… | |
… | |
151 | __u32 sq_entries; |
166 | __u32 sq_entries; |
152 | __u32 cq_entries; |
167 | __u32 cq_entries; |
153 | __u32 flags; |
168 | __u32 flags; |
154 | __u32 sq_thread_cpu; |
169 | __u32 sq_thread_cpu; |
155 | __u32 sq_thread_idle; |
170 | __u32 sq_thread_idle; |
|
|
171 | __u32 features; |
156 | __u32 resv[5]; |
172 | __u32 resv[4]; |
157 | struct io_sqring_offsets sq_off; |
173 | struct io_sqring_offsets sq_off; |
158 | struct io_cqring_offsets cq_off; |
174 | struct io_cqring_offsets cq_off; |
159 | }; |
175 | }; |
160 | |
176 | |
|
|
177 | #define IORING_FEAT_SINGLE_MMAP 0x00000001 |
|
|
178 | #define IORING_FEAT_NODROP 0x00000002 |
|
|
179 | #define IORING_FEAT_SUBMIT_STABLE 0x00000004 |
|
|
180 | |
|
|
181 | #define IORING_SETUP_CQSIZE 0x00000008 |
|
|
182 | #define IORING_SETUP_CLAMP 0x00000010 |
|
|
183 | |
161 | #define IORING_OP_POLL_ADD 6 |
184 | #define IORING_OP_POLL_ADD 6 |
162 | #define IORING_OP_POLL_REMOVE 7 |
185 | #define IORING_OP_POLL_REMOVE 7 |
|
|
186 | #define IORING_OP_TIMEOUT 11 |
|
|
187 | #define IORING_OP_TIMEOUT_REMOVE 12 |
|
|
188 | |
|
|
189 | #define IORING_REGISTER_EVENTFD 4 |
|
|
190 | #define IORING_REGISTER_EVENTFD_ASYNC 7 |
|
|
191 | #define IORING_REGISTER_PROBE 8 |
|
|
192 | |
|
|
193 | #define IO_URING_OP_SUPPORTED 1 |
|
|
194 | |
|
|
195 | struct io_uring_probe_op { |
|
|
196 | __u8 op; |
|
|
197 | __u8 resv; |
|
|
198 | __u16 flags; |
|
|
199 | __u32 resv2; |
|
|
200 | }; |
|
|
201 | |
|
|
202 | struct io_uring_probe |
|
|
203 | { |
|
|
204 | __u8 last_op; |
|
|
205 | __u8 ops_len; |
|
|
206 | __u16 resv; |
|
|
207 | __u32 resv2[3]; |
|
|
208 | struct io_uring_probe_op ops[0]; |
|
|
209 | }; |
|
|
210 | |
|
|
211 | /* relative or absolute, reference clock is CLOCK_MONOTONIC */ |
|
|
212 | struct iouring_kernel_timespec |
|
|
213 | { |
|
|
214 | int64_t tv_sec; |
|
|
215 | long long tv_nsec; |
|
|
216 | }; |
|
|
217 | |
|
|
218 | #define IORING_TIMEOUT_ABS 0x00000001 |
163 | |
219 | |
164 | #define IORING_ENTER_GETEVENTS 0x01 |
220 | #define IORING_ENTER_GETEVENTS 0x01 |
165 | |
221 | |
166 | #define IORING_OFF_SQ_RING 0x00000000ULL |
222 | #define IORING_OFF_SQ_RING 0x00000000ULL |
167 | #define IORING_OFF_CQ_RING 0x08000000ULL |
|
|
168 | #define IORING_OFF_SQES 0x10000000ULL |
223 | #define IORING_OFF_SQES 0x10000000ULL |
|
|
224 | |
|
|
225 | #define IORING_FEAT_SINGLE_MMAP 0x00000001 |
|
|
226 | #define IORING_FEAT_NODROP 0x00000002 |
|
|
227 | #define IORING_FEAT_SUBMIT_STABLE 0x00000004 |
169 | |
228 | |
170 | inline_size |
229 | inline_size |
171 | int |
230 | int |
172 | evsys_io_uring_setup (unsigned entries, struct io_uring_params *params) |
231 | evsys_io_uring_setup (unsigned entries, struct io_uring_params *params) |
173 | { |
232 | { |
… | |
… | |
179 | evsys_io_uring_enter (int fd, unsigned to_submit, unsigned min_complete, unsigned flags, const sigset_t *sig, size_t sigsz) |
238 | evsys_io_uring_enter (int fd, unsigned to_submit, unsigned min_complete, unsigned flags, const sigset_t *sig, size_t sigsz) |
180 | { |
239 | { |
181 | return ev_syscall6 (SYS_io_uring_enter, fd, to_submit, min_complete, flags, sig, sigsz); |
240 | return ev_syscall6 (SYS_io_uring_enter, fd, to_submit, min_complete, flags, sig, sigsz); |
182 | } |
241 | } |
183 | |
242 | |
|
|
243 | inline_size |
|
|
244 | int |
|
|
245 | evsys_io_uring_register (unsigned int fd, unsigned int opcode, void *arg, unsigned int nr_args) |
|
|
246 | { |
|
|
247 | return ev_syscall4 (SYS_io_uring_register, fd, opcode, arg, nr_args); |
|
|
248 | } |
|
|
249 | |
184 | /*****************************************************************************/ |
250 | /*****************************************************************************/ |
185 | /* actual backed implementation */ |
251 | /* actual backend implementation */ |
186 | |
252 | |
187 | /* we hope that volatile will make the compiler access this variables only once */ |
253 | /* we hope that volatile will make the compiler access this variables only once */ |
188 | #define EV_SQ_VAR(name) *(volatile unsigned *)((char *)iouring_sq_ring + iouring_sq_ ## name) |
|
|
189 | #define EV_CQ_VAR(name) *(volatile unsigned *)((char *)iouring_cq_ring + iouring_cq_ ## name) |
254 | #define EV_SQ_VAR(name) *(volatile unsigned *)((char *)iouring_ring + iouring_sq_ ## name) |
|
|
255 | #define EV_CQ_VAR(name) *(volatile unsigned *)((char *)iouring_ring + iouring_cq_ ## name) |
190 | |
256 | |
191 | /* the index array */ |
257 | /* the index array */ |
192 | #define EV_SQ_ARRAY ((unsigned *)((char *)iouring_sq_ring + iouring_sq_array)) |
258 | #define EV_SQ_ARRAY ((unsigned *)((char *)iouring_ring + iouring_sq_array)) |
193 | |
259 | |
194 | /* the submit/completion queue entries */ |
260 | /* the submit/completion queue entries */ |
195 | #define EV_SQES ((struct io_uring_sqe *) iouring_sqes) |
261 | #define EV_SQES ((struct io_uring_sqe *) iouring_sqes) |
196 | #define EV_CQES ((struct io_uring_cqe *)((char *)iouring_cq_ring + iouring_cq_cqes)) |
262 | #define EV_CQES ((struct io_uring_cqe *)((char *)iouring_ring + iouring_cq_cqes)) |
|
|
263 | |
|
|
264 | inline_speed |
|
|
265 | int |
|
|
266 | iouring_enter (EV_P_ ev_tstamp timeout) |
|
|
267 | { |
|
|
268 | int res; |
|
|
269 | |
|
|
270 | EV_RELEASE_CB; |
|
|
271 | |
|
|
272 | res = evsys_io_uring_enter (iouring_fd, iouring_to_submit, 1, |
|
|
273 | timeout > EV_TS_CONST (0.) ? IORING_ENTER_GETEVENTS : 0, 0, 0); |
|
|
274 | |
|
|
275 | assert (("libev: io_uring_enter did not consume all sqes", (res < 0 || res == iouring_to_submit))); |
|
|
276 | |
|
|
277 | iouring_to_submit = 0; |
|
|
278 | |
|
|
279 | EV_ACQUIRE_CB; |
|
|
280 | |
|
|
281 | return res; |
|
|
282 | } |
|
|
283 | |
|
|
284 | /* TODO: can we move things around so we don't need this forward-reference? */ |
|
|
285 | static void |
|
|
286 | iouring_poll (EV_P_ ev_tstamp timeout); |
197 | |
287 | |
198 | static |
288 | static |
199 | struct io_uring_sqe * |
289 | struct io_uring_sqe * |
200 | iouring_sqe_get (EV_P) |
290 | iouring_sqe_get (EV_P) |
201 | { |
291 | { |
|
|
292 | unsigned tail; |
|
|
293 | |
|
|
294 | for (;;) |
|
|
295 | { |
202 | unsigned tail = EV_SQ_VAR (tail); |
296 | tail = EV_SQ_VAR (tail); |
203 | |
297 | |
204 | if (tail + 1 - EV_SQ_VAR (head) > EV_SQ_VAR (ring_entries)) |
298 | if (ecb_expect_true (tail + 1 - EV_SQ_VAR (head) <= EV_SQ_VAR (ring_entries))) |
|
|
299 | break; /* whats the problem, we have free sqes */ |
|
|
300 | |
|
|
301 | /* queue full, need to flush and possibly handle some events */ |
|
|
302 | |
|
|
303 | #if EV_FEATURE_CODE |
|
|
304 | /* first we ask the kernel nicely, most often this frees up some sqes */ |
|
|
305 | int res = iouring_enter (EV_A_ EV_TS_CONST (0.)); |
|
|
306 | |
|
|
307 | ECB_MEMORY_FENCE_ACQUIRE; /* better safe than sorry */ |
|
|
308 | |
|
|
309 | if (res >= 0) |
|
|
310 | continue; /* yes, it worked, try again */ |
|
|
311 | #endif |
|
|
312 | |
|
|
313 | /* some problem, possibly EBUSY - do the full poll and let it handle any issues */ |
|
|
314 | |
|
|
315 | iouring_poll (EV_A_ EV_TS_CONST (0.)); |
|
|
316 | /* iouring_poll should have done ECB_MEMORY_FENCE_ACQUIRE for us */ |
205 | { |
317 | } |
206 | /* queue full, flush */ |
|
|
207 | evsys_io_uring_enter (iouring_fd, iouring_to_submit, 0, 0, 0, 0); |
|
|
208 | iouring_to_submit = 0; |
|
|
209 | } |
|
|
210 | |
318 | |
211 | assert (("libev: io_uring queue full after flush", tail + 1 - EV_SQ_VAR (head) <= EV_SQ_VAR (ring_entries))); |
319 | /*assert (("libev: io_uring queue full after flush", tail + 1 - EV_SQ_VAR (head) <= EV_SQ_VAR (ring_entries)));*/ |
212 | |
320 | |
213 | return EV_SQES + (tail & EV_SQ_VAR (ring_mask)); |
321 | return EV_SQES + (tail & EV_SQ_VAR (ring_mask)); |
214 | } |
322 | } |
215 | |
323 | |
216 | inline_size |
324 | inline_size |
217 | struct io_uring_sqe * |
325 | struct io_uring_sqe * |
218 | iouring_sqe_submit (EV_P_ struct io_uring_sqe *sqe) |
326 | iouring_sqe_submit (EV_P_ struct io_uring_sqe *sqe) |
219 | { |
327 | { |
220 | unsigned idx = sqe - EV_SQES; |
328 | unsigned idx = sqe - EV_SQES; |
|
|
329 | |
|
|
330 | printf ("submit idx %d, op %d, fd %d, us5r %p, poll %d\n", idx, sqe->opcode, sqe->fd, sqe->user_data, sqe->poll_events); |
221 | |
331 | |
222 | EV_SQ_ARRAY [idx] = idx; |
332 | EV_SQ_ARRAY [idx] = idx; |
223 | ECB_MEMORY_FENCE_RELEASE; |
333 | ECB_MEMORY_FENCE_RELEASE; |
224 | ++EV_SQ_VAR (tail); |
334 | ++EV_SQ_VAR (tail); |
225 | /*ECB_MEMORY_FENCE_RELEASE; /* for the time being we assume this is not needed */ |
335 | /*ECB_MEMORY_FENCE_RELEASE; /* for the time being we assume this is not needed */ |
… | |
… | |
236 | iouring_tfd_cb (EV_P_ struct ev_io *w, int revents) |
346 | iouring_tfd_cb (EV_P_ struct ev_io *w, int revents) |
237 | { |
347 | { |
238 | iouring_tfd_to = EV_TSTAMP_HUGE; |
348 | iouring_tfd_to = EV_TSTAMP_HUGE; |
239 | } |
349 | } |
240 | |
350 | |
241 | static void |
|
|
242 | iouring_epoll_cb (EV_P_ struct ev_io *w, int revents) |
|
|
243 | { |
|
|
244 | epoll_poll (EV_A_ 0); |
|
|
245 | } |
|
|
246 | |
|
|
247 | /* called for full and partial cleanup */ |
351 | /* called for full and partial cleanup */ |
248 | ecb_cold |
352 | ecb_cold |
249 | static int |
353 | static int |
250 | iouring_internal_destroy (EV_P) |
354 | iouring_internal_destroy (EV_P) |
251 | { |
355 | { |
252 | close (iouring_tfd); |
356 | close (iouring_tfd); |
253 | close (iouring_fd); |
357 | close (iouring_fd); |
254 | |
358 | |
255 | if (iouring_sq_ring != MAP_FAILED) munmap (iouring_sq_ring, iouring_sq_ring_size); |
359 | if (iouring_ring != MAP_FAILED) munmap (iouring_ring, iouring_ring_size); |
256 | if (iouring_cq_ring != MAP_FAILED) munmap (iouring_cq_ring, iouring_cq_ring_size); |
|
|
257 | if (iouring_sqes != MAP_FAILED) munmap (iouring_sqes , iouring_sqes_size ); |
360 | if (iouring_sqes != MAP_FAILED) munmap (iouring_sqes, iouring_sqes_size); |
258 | |
361 | |
259 | if (ev_is_active (&iouring_epoll_w)) ev_ref (EV_A); ev_io_stop (EV_A_ &iouring_epoll_w); |
362 | if (ev_is_active (&iouring_tfd_w)) |
260 | if (ev_is_active (&iouring_tfd_w )) ev_ref (EV_A); ev_io_stop (EV_A_ &iouring_tfd_w ); |
363 | { |
|
|
364 | ev_ref (EV_A); |
|
|
365 | ev_io_stop (EV_A_ &iouring_tfd_w); |
|
|
366 | } |
261 | } |
367 | } |
262 | |
368 | |
263 | ecb_cold |
369 | ecb_cold |
264 | static int |
370 | static int |
265 | iouring_internal_init (EV_P) |
371 | iouring_internal_init (EV_P) |
266 | { |
372 | { |
267 | struct io_uring_params params = { 0 }; |
373 | struct io_uring_params params = { 0 }; |
|
|
374 | uint32_t sq_size, cq_size; |
|
|
375 | |
|
|
376 | params.flags = IORING_SETUP_CLAMP; |
268 | |
377 | |
269 | iouring_to_submit = 0; |
378 | iouring_to_submit = 0; |
270 | |
379 | |
271 | iouring_tfd = -1; |
380 | iouring_tfd = -1; |
272 | iouring_sq_ring = MAP_FAILED; |
381 | iouring_ring = MAP_FAILED; |
273 | iouring_cq_ring = MAP_FAILED; |
|
|
274 | iouring_sqes = MAP_FAILED; |
382 | iouring_sqes = MAP_FAILED; |
275 | |
383 | |
276 | for (;;) |
384 | if (!have_monotonic) /* cannot really happen, but what if11 */ |
277 | { |
385 | return -1; |
|
|
386 | |
278 | iouring_fd = evsys_io_uring_setup (iouring_entries, ¶ms); |
387 | iouring_fd = evsys_io_uring_setup (iouring_entries, ¶ms); |
279 | |
388 | |
280 | if (iouring_fd >= 0) |
389 | if (iouring_fd < 0) |
281 | break; /* yippie */ |
|
|
282 | |
|
|
283 | if (errno != EINVAL) |
|
|
284 | return -1; /* we failed */ |
|
|
285 | |
|
|
286 | /* EINVAL: lots of possible reasons, but maybe |
|
|
287 | * it is because we hit the unqueryable hardcoded size limit |
|
|
288 | */ |
|
|
289 | |
|
|
290 | /* we hit the limit already, give up */ |
|
|
291 | if (iouring_max_entries) |
|
|
292 | return -1; |
390 | return -1; |
293 | |
391 | |
294 | /* first time we hit EINVAL? assume we hit the limit, so go back and retry */ |
392 | if ((~params.features) & (IORING_FEAT_NODROP | IORING_FEAT_SINGLE_MMAP | IORING_FEAT_SUBMIT_STABLE)) |
295 | iouring_entries >>= 1; |
393 | return -1; /* we require the above features */ |
296 | iouring_max_entries = iouring_entries; |
|
|
297 | } |
|
|
298 | |
394 | |
|
|
395 | /* TODO: remember somehow whether our queue size has been clamped */ |
|
|
396 | |
299 | iouring_sq_ring_size = params.sq_off.array + params.sq_entries * sizeof (unsigned); |
397 | sq_size = params.sq_off.array + params.sq_entries * sizeof (unsigned); |
300 | iouring_cq_ring_size = params.cq_off.cqes + params.cq_entries * sizeof (struct io_uring_cqe); |
398 | cq_size = params.cq_off.cqes + params.cq_entries * sizeof (struct io_uring_cqe); |
|
|
399 | |
|
|
400 | iouring_ring_size = sq_size > cq_size ? sq_size : cq_size; |
301 | iouring_sqes_size = params.sq_entries * sizeof (struct io_uring_sqe); |
401 | iouring_sqes_size = params.sq_entries * sizeof (struct io_uring_sqe); |
302 | |
402 | |
303 | iouring_sq_ring = mmap (0, iouring_sq_ring_size, PROT_READ | PROT_WRITE, |
403 | iouring_ring = mmap (0, iouring_ring_size, PROT_READ | PROT_WRITE, |
304 | MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_SQ_RING); |
404 | MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_SQ_RING); |
305 | iouring_cq_ring = mmap (0, iouring_cq_ring_size, PROT_READ | PROT_WRITE, |
|
|
306 | MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_CQ_RING); |
|
|
307 | iouring_sqes = mmap (0, iouring_sqes_size, PROT_READ | PROT_WRITE, |
405 | iouring_sqes = mmap (0, iouring_sqes_size, PROT_READ | PROT_WRITE, |
308 | MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_SQES); |
406 | MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_SQES); |
309 | |
407 | |
310 | if (iouring_sq_ring == MAP_FAILED || iouring_cq_ring == MAP_FAILED || iouring_sqes == MAP_FAILED) |
408 | if (iouring_ring == MAP_FAILED || iouring_sqes == MAP_FAILED) |
311 | return -1; |
409 | return -1; |
312 | |
410 | |
313 | iouring_sq_head = params.sq_off.head; |
411 | iouring_sq_head = params.sq_off.head; |
314 | iouring_sq_tail = params.sq_off.tail; |
412 | iouring_sq_tail = params.sq_off.tail; |
315 | iouring_sq_ring_mask = params.sq_off.ring_mask; |
413 | iouring_sq_ring_mask = params.sq_off.ring_mask; |
… | |
… | |
323 | iouring_cq_ring_mask = params.cq_off.ring_mask; |
421 | iouring_cq_ring_mask = params.cq_off.ring_mask; |
324 | iouring_cq_ring_entries = params.cq_off.ring_entries; |
422 | iouring_cq_ring_entries = params.cq_off.ring_entries; |
325 | iouring_cq_overflow = params.cq_off.overflow; |
423 | iouring_cq_overflow = params.cq_off.overflow; |
326 | iouring_cq_cqes = params.cq_off.cqes; |
424 | iouring_cq_cqes = params.cq_off.cqes; |
327 | |
425 | |
|
|
426 | iouring_tfd_to = EV_TSTAMP_HUGE; |
|
|
427 | |
328 | iouring_tfd = timerfd_create (CLOCK_MONOTONIC, TFD_CLOEXEC); |
428 | iouring_tfd = timerfd_create (CLOCK_MONOTONIC, TFD_CLOEXEC); |
329 | |
429 | |
330 | if (iouring_tfd < 0) |
430 | if (iouring_tfd < 0) |
331 | return iouring_tfd; |
431 | return -1; |
332 | |
|
|
333 | iouring_tfd_to = EV_TSTAMP_HUGE; |
|
|
334 | |
432 | |
335 | return 0; |
433 | return 0; |
336 | } |
434 | } |
337 | |
435 | |
338 | ecb_cold |
436 | ecb_cold |
… | |
… | |
342 | iouring_internal_destroy (EV_A); |
440 | iouring_internal_destroy (EV_A); |
343 | |
441 | |
344 | while (iouring_internal_init (EV_A) < 0) |
442 | while (iouring_internal_init (EV_A) < 0) |
345 | ev_syserr ("(libev) io_uring_setup"); |
443 | ev_syserr ("(libev) io_uring_setup"); |
346 | |
444 | |
347 | /* forking epoll should also effectively unregister all fds from the backend */ |
|
|
348 | epoll_fork (EV_A); |
|
|
349 | /* epoll_fork already did this. hopefully */ |
|
|
350 | /*fd_rearm_all (EV_A);*/ |
445 | fd_rearm_all (EV_A); |
351 | |
|
|
352 | ev_io_stop (EV_A_ &iouring_epoll_w); |
|
|
353 | ev_io_set (EV_A_ &iouring_epoll_w, backend_fd, EV_READ); |
|
|
354 | ev_io_start (EV_A_ &iouring_epoll_w); |
|
|
355 | |
446 | |
356 | ev_io_stop (EV_A_ &iouring_tfd_w); |
447 | ev_io_stop (EV_A_ &iouring_tfd_w); |
357 | ev_io_set (EV_A_ &iouring_tfd_w, iouring_tfd, EV_READ); |
448 | ev_io_set (EV_A_ &iouring_tfd_w, iouring_tfd, EV_READ); |
358 | ev_io_start (EV_A_ &iouring_tfd_w); |
449 | ev_io_start (EV_A_ &iouring_tfd_w); |
359 | } |
450 | } |
… | |
… | |
361 | /*****************************************************************************/ |
452 | /*****************************************************************************/ |
362 | |
453 | |
363 | static void |
454 | static void |
364 | iouring_modify (EV_P_ int fd, int oev, int nev) |
455 | iouring_modify (EV_P_ int fd, int oev, int nev) |
365 | { |
456 | { |
366 | fprintf (stderr,"modify %d (%d, %d) %d\n", fd, oev,nev, anfds[fd].eflags);//D |
|
|
367 | if (ecb_expect_false (anfds [fd].eflags)) |
|
|
368 | { |
|
|
369 | /* we handed this fd over to epoll, so undo this first */ |
|
|
370 | /* we do it manually because the optimisations on epoll_modify won't do us any good */ |
|
|
371 | epoll_ctl (iouring_fd, EPOLL_CTL_DEL, fd, 0); |
|
|
372 | anfds [fd].eflags = 0; |
|
|
373 | oev = 0; |
|
|
374 | } |
|
|
375 | |
|
|
376 | if (oev) |
457 | if (oev) |
377 | { |
458 | { |
378 | /* we assume the sqe's are all "properly" initialised */ |
459 | /* we assume the sqe's are all "properly" initialised */ |
379 | struct io_uring_sqe *sqe = iouring_sqe_get (EV_A); |
460 | struct io_uring_sqe *sqe = iouring_sqe_get (EV_A); |
380 | sqe->opcode = IORING_OP_POLL_REMOVE; |
461 | sqe->opcode = IORING_OP_POLL_REMOVE; |
381 | sqe->fd = fd; |
462 | sqe->fd = fd; |
|
|
463 | /* Jens Axboe notified me that user_data is not what is documented, but is |
|
|
464 | * some kind of unique ID that has to match, otherwise the request cannot |
|
|
465 | * be removed. Since we don't *really* have that, we pass in the old |
|
|
466 | * generation counter - if that fails, too bad, it will hopefully be removed |
|
|
467 | * at close time and then be ignored. */ |
|
|
468 | sqe->addr = (uint32_t)fd | ((__u64)(uint32_t)anfds [fd].egen << 32); |
382 | sqe->user_data = -1; |
469 | sqe->user_data = (uint64_t)-1; |
383 | iouring_sqe_submit (EV_A_ sqe); |
470 | iouring_sqe_submit (EV_A_ sqe); |
384 | |
471 | |
385 | /* increment generation counter to avoid handling old events */ |
472 | /* increment generation counter to avoid handling old events */ |
386 | ++anfds [fd].egen; |
473 | ++anfds [fd].egen; |
387 | } |
474 | } |
… | |
… | |
389 | if (nev) |
476 | if (nev) |
390 | { |
477 | { |
391 | struct io_uring_sqe *sqe = iouring_sqe_get (EV_A); |
478 | struct io_uring_sqe *sqe = iouring_sqe_get (EV_A); |
392 | sqe->opcode = IORING_OP_POLL_ADD; |
479 | sqe->opcode = IORING_OP_POLL_ADD; |
393 | sqe->fd = fd; |
480 | sqe->fd = fd; |
|
|
481 | sqe->addr = 0; |
394 | sqe->user_data = (uint32_t)fd | ((__u64)(uint32_t)anfds [fd].egen << 32); |
482 | sqe->user_data = (uint32_t)fd | ((__u64)(uint32_t)anfds [fd].egen << 32); |
395 | sqe->poll_events = |
483 | sqe->poll_events = |
396 | (nev & EV_READ ? POLLIN : 0) |
484 | (nev & EV_READ ? POLLIN : 0) |
397 | | (nev & EV_WRITE ? POLLOUT : 0); |
485 | | (nev & EV_WRITE ? POLLOUT : 0); |
398 | iouring_sqe_submit (EV_A_ sqe); |
486 | iouring_sqe_submit (EV_A_ sqe); |
… | |
… | |
428 | { |
516 | { |
429 | int fd = cqe->user_data & 0xffffffffU; |
517 | int fd = cqe->user_data & 0xffffffffU; |
430 | uint32_t gen = cqe->user_data >> 32; |
518 | uint32_t gen = cqe->user_data >> 32; |
431 | int res = cqe->res; |
519 | int res = cqe->res; |
432 | |
520 | |
433 | /* ignore fd removal events, if there are any. TODO: verify */ |
521 | /* user_data -1 is a remove that we are not atm. interested in */ |
434 | if (cqe->user_data == (__u64)-1) |
522 | if (cqe->user_data == (uint64_t)-1) |
435 | abort ();//D |
523 | return; |
436 | |
524 | |
437 | assert (("libev: io_uring fd must be in-bounds", fd >= 0 && fd < anfdmax)); |
525 | assert (("libev: io_uring fd must be in-bounds", fd >= 0 && fd < anfdmax)); |
438 | |
526 | |
439 | /* documentation lies, of course. the result value is NOT like |
527 | /* documentation lies, of course. the result value is NOT like |
440 | * normal syscalls, but like linux raw syscalls, i.e. negative |
528 | * normal syscalls, but like linux raw syscalls, i.e. negative |
441 | * error numbers. fortunate, as otherwise there would be no way |
529 | * error numbers. fortunate, as otherwise there would be no way |
442 | * to get error codes at all. still, why not document this? |
530 | * to get error codes at all. still, why not document this? |
443 | */ |
531 | */ |
444 | |
532 | |
445 | /* ignore event if generation doesn't match */ |
533 | /* ignore event if generation doesn't match */ |
|
|
534 | /* other than skipping removal events, */ |
446 | /* this should actually be very rare */ |
535 | /* this should actually be very rare */ |
447 | if (ecb_expect_false (gen != (uint32_t)anfds [fd].egen)) |
536 | if (ecb_expect_false (gen != (uint32_t)anfds [fd].egen)) |
448 | return; |
537 | return; |
449 | |
538 | |
450 | if (ecb_expect_false (res < 0)) |
539 | if (ecb_expect_false (res < 0)) |
451 | { |
540 | { |
452 | if (res == -EINVAL) |
541 | /*TODO: EINVAL handling (was something failed with this fd)*/ |
453 | { |
|
|
454 | /* we assume this error code means the fd/poll combination is buggy |
|
|
455 | * and fall back to epoll. |
|
|
456 | * this error code might also indicate a bug, but the kernel doesn't |
|
|
457 | * distinguish between those two conditions, so... sigh... |
|
|
458 | */ |
|
|
459 | |
542 | |
460 | epoll_modify (EV_A_ fd, 0, anfds [fd].events); |
|
|
461 | } |
|
|
462 | else if (res == -EBADF) |
543 | if (res == -EBADF) |
463 | { |
544 | { |
464 | assert (("libev: event loop rejected bad fd", res != -EBADF)); |
545 | assert (("libev: event loop rejected bad fd", res != -EBADF)); |
465 | fd_kill (EV_A_ fd); |
546 | fd_kill (EV_A_ fd); |
466 | } |
547 | } |
467 | else |
548 | else |
… | |
… | |
471 | } |
552 | } |
472 | |
553 | |
473 | return; |
554 | return; |
474 | } |
555 | } |
475 | |
556 | |
476 | fprintf (stderr, "fd %d event, rearm\n", fd);//D |
|
|
477 | |
|
|
478 | /* feed events, we do not expect or handle POLLNVAL */ |
557 | /* feed events, we do not expect or handle POLLNVAL */ |
479 | fd_event ( |
558 | fd_event ( |
480 | EV_A_ |
559 | EV_A_ |
481 | fd, |
560 | fd, |
482 | (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) |
561 | (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) |
… | |
… | |
495 | iouring_overflow (EV_P) |
574 | iouring_overflow (EV_P) |
496 | { |
575 | { |
497 | /* we have two options, resize the queue (by tearing down |
576 | /* we have two options, resize the queue (by tearing down |
498 | * everything and recreating it, or living with it |
577 | * everything and recreating it, or living with it |
499 | * and polling. |
578 | * and polling. |
500 | * we implement this by resizing tghe queue, and, if that fails, |
579 | * we implement this by resizing the queue, and, if that fails, |
501 | * we just recreate the state on every failure, which |
580 | * we just recreate the state on every failure, which |
502 | * kind of is a very inefficient poll. |
581 | * kind of is a very inefficient poll. |
503 | * one danger is, due to the bios toward lower fds, |
582 | * one danger is, due to the bios toward lower fds, |
504 | * we will only really get events for those, so |
583 | * we will only really get events for those, so |
505 | * maybe we need a poll() fallback, after all. |
584 | * maybe we need a poll() fallback, after all. |
… | |
… | |
517 | else |
596 | else |
518 | { |
597 | { |
519 | /* we hit the kernel limit, we should fall back to something else. |
598 | /* we hit the kernel limit, we should fall back to something else. |
520 | * we can either poll() a few times and hope for the best, |
599 | * we can either poll() a few times and hope for the best, |
521 | * poll always, or switch to epoll. |
600 | * poll always, or switch to epoll. |
522 | * since we use epoll anyways, go epoll. |
601 | * TODO: is this necessary with newer kernels? |
523 | */ |
602 | */ |
524 | |
603 | |
525 | iouring_internal_destroy (EV_A); |
604 | iouring_internal_destroy (EV_A); |
526 | |
605 | |
527 | /* this should make it so that on return, we don'T call any uring functions */ |
606 | /* this should make it so that on return, we don't call any uring functions */ |
528 | iouring_to_submit = 0; |
607 | iouring_to_submit = 0; |
529 | |
608 | |
530 | for (;;) |
609 | for (;;) |
531 | { |
610 | { |
532 | backend = epoll_init (EV_A_ 0); |
611 | backend = epoll_init (EV_A_ 0); |
… | |
… | |
573 | |
652 | |
574 | static void |
653 | static void |
575 | iouring_poll (EV_P_ ev_tstamp timeout) |
654 | iouring_poll (EV_P_ ev_tstamp timeout) |
576 | { |
655 | { |
577 | /* if we have events, no need for extra syscalls, but we might have to queue events */ |
656 | /* if we have events, no need for extra syscalls, but we might have to queue events */ |
|
|
657 | /* we also clar the timeout if there are outstanding fdchanges */ |
|
|
658 | /* the latter should only happen if both the sq and cq are full, most likely */ |
|
|
659 | /* because we have a lot of event sources that immediately complete */ |
|
|
660 | /* TODO: fdchacngecnt is always 0 because fd_reify does not have two buffers yet */ |
578 | if (iouring_handle_cq (EV_A)) |
661 | if (iouring_handle_cq (EV_A) || fdchangecnt) |
579 | timeout = EV_TS_CONST (0.); |
662 | timeout = EV_TS_CONST (0.); |
580 | else |
663 | else |
581 | /* no events, so maybe wait for some */ |
664 | /* no events, so maybe wait for some */ |
582 | iouring_tfd_update (EV_A_ timeout); |
665 | iouring_tfd_update (EV_A_ timeout); |
583 | |
666 | |
584 | /* only enter the kernel if we have somethign to submit, or we need to wait */ |
667 | /* only enter the kernel if we have something to submit, or we need to wait */ |
585 | if (timeout || iouring_to_submit) |
668 | if (timeout || iouring_to_submit) |
586 | { |
669 | { |
587 | int res; |
670 | int res = iouring_enter (EV_A_ timeout); |
588 | |
|
|
589 | EV_RELEASE_CB; |
|
|
590 | |
|
|
591 | res = evsys_io_uring_enter (iouring_fd, iouring_to_submit, 1, |
|
|
592 | timeout > EV_TS_CONST (0.) ? IORING_ENTER_GETEVENTS : 0, 0, 0); |
|
|
593 | iouring_to_submit = 0; |
|
|
594 | |
|
|
595 | EV_ACQUIRE_CB; |
|
|
596 | |
671 | |
597 | if (ecb_expect_false (res < 0)) |
672 | if (ecb_expect_false (res < 0)) |
598 | if (errno == EINTR) |
673 | if (errno == EINTR) |
599 | /* ignore */; |
674 | /* ignore */; |
|
|
675 | else if (errno == EBUSY) |
|
|
676 | /* cq full, cannot submit - should be rare because we flush the cq first, so simply ignore */; |
600 | else |
677 | else |
601 | ev_syserr ("(libev) iouring setup"); |
678 | ev_syserr ("(libev) iouring setup"); |
602 | else |
679 | else |
603 | iouring_handle_cq (EV_A); |
680 | iouring_handle_cq (EV_A); |
604 | } |
681 | } |
… | |
… | |
606 | |
683 | |
607 | inline_size |
684 | inline_size |
608 | int |
685 | int |
609 | iouring_init (EV_P_ int flags) |
686 | iouring_init (EV_P_ int flags) |
610 | { |
687 | { |
611 | if (!epoll_init (EV_A_ 0)) |
|
|
612 | return 0; |
|
|
613 | |
|
|
614 | ev_io_init (EV_A_ &iouring_epoll_w, iouring_epoll_cb, backend_fd, EV_READ); |
|
|
615 | ev_set_priority (&iouring_epoll_w, EV_MAXPRI); |
|
|
616 | |
|
|
617 | ev_io_init (&iouring_tfd_w, iouring_tfd_cb, iouring_tfd, EV_READ); |
|
|
618 | ev_set_priority (&iouring_tfd_w, EV_MAXPRI); |
|
|
619 | |
|
|
620 | iouring_entries = IOURING_INIT_ENTRIES; |
688 | iouring_entries = IOURING_INIT_ENTRIES; |
621 | iouring_max_entries = 0; |
689 | iouring_max_entries = 0; |
622 | |
690 | |
623 | if (iouring_internal_init (EV_A) < 0) |
691 | if (iouring_internal_init (EV_A) < 0) |
624 | { |
692 | { |
625 | iouring_internal_destroy (EV_A); |
693 | iouring_internal_destroy (EV_A); |
626 | return 0; |
694 | return 0; |
627 | } |
695 | } |
628 | |
696 | |
629 | ev_io_start (EV_A_ &iouring_epoll_w); |
697 | ev_io_init (&iouring_tfd_w, iouring_tfd_cb, iouring_tfd, EV_READ); |
630 | ev_unref (EV_A); /* watcher should not keep loop alive */ |
698 | ev_set_priority (&iouring_tfd_w, EV_MINPRI); |
631 | |
|
|
632 | ev_io_start (EV_A_ &iouring_tfd_w); |
699 | ev_io_start (EV_A_ &iouring_tfd_w); |
633 | ev_unref (EV_A); /* watcher should not keep loop alive */ |
700 | ev_unref (EV_A); /* watcher should not keep loop alive */ |
634 | |
701 | |
635 | backend_modify = iouring_modify; |
702 | backend_modify = iouring_modify; |
636 | backend_poll = iouring_poll; |
703 | backend_poll = iouring_poll; |
… | |
… | |
641 | inline_size |
708 | inline_size |
642 | void |
709 | void |
643 | iouring_destroy (EV_P) |
710 | iouring_destroy (EV_P) |
644 | { |
711 | { |
645 | iouring_internal_destroy (EV_A); |
712 | iouring_internal_destroy (EV_A); |
646 | epoll_destroy (EV_A); |
|
|
647 | } |
713 | } |
648 | |
714 | |