… | |
… | |
35 | * and other provisions required by the GPL. If you do not delete the |
35 | * and other provisions required by the GPL. If you do not delete the |
36 | * provisions above, a recipient may use your version of this file under |
36 | * provisions above, a recipient may use your version of this file under |
37 | * either the BSD or the GPL. |
37 | * either the BSD or the GPL. |
38 | */ |
38 | */ |
39 | |
39 | |
|
|
40 | /* |
|
|
41 | * general notes about linux aio: |
|
|
42 | * |
|
|
43 | * a) at first, the linux aio IOCB_CMD_POLL functionality introduced in |
|
|
44 | * 4.18 looks too good to be true: both watchers and events can be |
|
|
45 | * batched, and events can even be handled in userspace using |
|
|
46 | * a ring buffer shared with the kernel. watchers can be canceled |
|
|
47 | * regardless of whether the fd has been closed. no problems with fork. |
|
|
48 | * ok, the ring buffer is 200% undocumented (there isn't even a |
|
|
49 | * header file), but otherwise, it's pure bliss! |
|
|
50 | * b) ok, watchers are one-shot, so you have to re-arm active ones |
|
|
51 | * on every iteration. so much for syscall-less event handling, |
|
|
52 | * but at least these re-arms can be batched, no big deal, right? |
|
|
53 | * c) well, linux as usual: the documentation lies to you: io_submit |
|
|
54 | * sometimes returns EINVAL because the kernel doesn't feel like |
|
|
55 | * handling your poll mask - ttys can be polled for POLLOUT, |
|
|
56 | * POLLOUT|POLLIN, but polling for POLLIN fails. just great, |
|
|
57 | * so we have to fall back to something else (hello, epoll), |
|
|
58 | * but at least the fallback can be slow, because these are |
|
|
59 | * exceptional cases, right? |
|
|
60 | * d) hmm, you have to tell the kernel the maximum number of watchers |
|
|
61 | * you want to queue when initialising the aio context. but of |
|
|
62 | * course the real limit is magically calculated in the kernel, and |
|
|
63 | * is often higher then we asked for. so we just have to destroy |
|
|
64 | * the aio context and re-create it a bit larger if we hit the limit. |
|
|
65 | * (starts to remind you of epoll? well, it's a bit more deterministic |
|
|
66 | * and less gambling, but still ugly as hell). |
|
|
67 | * e) that's when you find out you can also hit an arbitrary system-wide |
|
|
68 | * limit. or the kernel simply doesn't want to handle your watchers. |
|
|
69 | * what the fuck do we do then? you guessed it, in the middle |
|
|
70 | * of event handling we have to switch to 100% epoll polling. and |
|
|
71 | * that better is as fast as normal epoll polling, so you practically |
|
|
72 | * have to use the normal epoll backend with all its quirks. |
|
|
73 | * f) end result of this train wreck: it inherits all the disadvantages |
|
|
74 | * from epoll, while adding a number on its own. why even bother to use |
|
|
75 | * it? because if conditions are right and your fds are supported and you |
|
|
76 | * don't hit a limit, this backend is actually faster, doesn't gamble with |
|
|
77 | * your fds, batches watchers and events and doesn't require costly state |
|
|
78 | * recreates. well, until it does. |
|
|
79 | * g) all of this makes this backend use almost twice as much code as epoll. |
|
|
80 | * which in turn uses twice as much code as poll. and that#s not counting |
|
|
81 | * the fact that this backend also depends on the epoll backend, making |
|
|
82 | * it three times as much code as poll, or kqueue. |
|
|
83 | * h) bleah. why can't linux just do kqueue. sure kqueue is ugly, but by now |
|
|
84 | * it's clear that whatever linux comes up with is far, far, far worse. |
|
|
85 | */ |
|
|
86 | |
40 | #include <sys/time.h> /* actually linux/time.h, but we must assume they are compatible */ |
87 | #include <sys/time.h> /* actually linux/time.h, but we must assume they are compatible */ |
41 | #include <poll.h> |
88 | #include <poll.h> |
42 | #include <linux/aio_abi.h> |
89 | #include <linux/aio_abi.h> |
43 | |
90 | |
44 | /* we try to fill 4kn pages exactly. |
91 | /*****************************************************************************/ |
45 | * the ring buffer header is 32 bytes, every io event is 32 bytes. |
92 | /* syscall wrapdadoop - this section has the raw api/abi definitions */ |
46 | * the kernel takes the io event number, doubles it, adds 2, adds the ring buffer |
93 | |
47 | * so the calculation below will use "exactly" 8kB for the ring buffer |
94 | #include <sys/syscall.h> /* no glibc wrappers */ |
|
|
95 | |
|
|
96 | /* aio_abi.h is not versioned in any way, so we cannot test for its existance */ |
|
|
97 | #define IOCB_CMD_POLL 5 |
|
|
98 | |
|
|
99 | /* taken from linux/fs/aio.c. yup, that's a .c file. |
|
|
100 | * not only is this totally undocumented, not even the source code |
|
|
101 | * can tell you what the future semantics of compat_features and |
|
|
102 | * incompat_features are, or what header_length actually is for. |
48 | */ |
103 | */ |
49 | #define EV_LINUXAIO_DEPTH (256 / 2 - 2 - 1) /* max. number of io events per batch */ |
|
|
50 | |
|
|
51 | /*****************************************************************************/ |
|
|
52 | /* syscall wrapdadoop */ |
|
|
53 | |
|
|
54 | #include <sys/syscall.h> /* no glibc wrappers */ |
|
|
55 | |
|
|
56 | /* aio_abi.h is not verioned in any way, so we cannot test for its existance */ |
|
|
57 | #define IOCB_CMD_POLL 5 |
|
|
58 | |
|
|
59 | /* taken from linux/fs/aio.c */ |
|
|
60 | #define AIO_RING_MAGIC 0xa10a10a1 |
104 | #define AIO_RING_MAGIC 0xa10a10a1 |
61 | #define AIO_RING_INCOMPAT_FEATURES 0 |
105 | #define EV_AIO_RING_INCOMPAT_FEATURES 0 |
62 | struct aio_ring |
106 | struct aio_ring |
63 | { |
107 | { |
64 | unsigned id; /* kernel internal index number */ |
108 | unsigned id; /* kernel internal index number */ |
65 | unsigned nr; /* number of io_events */ |
109 | unsigned nr; /* number of io_events */ |
66 | unsigned head; /* Written to by userland or by kernel. */ |
110 | unsigned head; /* Written to by userland or by kernel. */ |
… | |
… | |
72 | unsigned header_length; /* size of aio_ring */ |
116 | unsigned header_length; /* size of aio_ring */ |
73 | |
117 | |
74 | struct io_event io_events[0]; |
118 | struct io_event io_events[0]; |
75 | }; |
119 | }; |
76 | |
120 | |
77 | static int |
121 | inline_size |
|
|
122 | int |
78 | ev_io_setup (unsigned nr_events, aio_context_t *ctx_idp) |
123 | evsys_io_setup (unsigned nr_events, aio_context_t *ctx_idp) |
79 | { |
124 | { |
80 | return syscall (SYS_io_setup, nr_events, ctx_idp); |
125 | return ev_syscall2 (SYS_io_setup, nr_events, ctx_idp); |
81 | } |
126 | } |
82 | |
127 | |
83 | static int |
128 | inline_size |
|
|
129 | int |
84 | ev_io_destroy (aio_context_t ctx_id) |
130 | evsys_io_destroy (aio_context_t ctx_id) |
85 | { |
131 | { |
86 | return syscall (SYS_io_destroy, ctx_id); |
132 | return ev_syscall1 (SYS_io_destroy, ctx_id); |
87 | } |
133 | } |
88 | |
134 | |
89 | static int |
135 | inline_size |
|
|
136 | int |
90 | ev_io_submit (aio_context_t ctx_id, long nr, struct iocb *cbp[]) |
137 | evsys_io_submit (aio_context_t ctx_id, long nr, struct iocb *cbp[]) |
91 | { |
138 | { |
92 | return syscall (SYS_io_submit, ctx_id, nr, cbp); |
139 | return ev_syscall3 (SYS_io_submit, ctx_id, nr, cbp); |
93 | } |
140 | } |
94 | |
141 | |
95 | static int |
142 | inline_size |
|
|
143 | int |
96 | ev_io_cancel (aio_context_t ctx_id, struct iocb *cbp, struct io_event *result) |
144 | evsys_io_cancel (aio_context_t ctx_id, struct iocb *cbp, struct io_event *result) |
97 | { |
145 | { |
98 | return syscall (SYS_io_cancel, ctx_id, cbp, result); |
146 | return ev_syscall3 (SYS_io_cancel, ctx_id, cbp, result); |
99 | } |
147 | } |
100 | |
148 | |
101 | static int |
149 | inline_size |
|
|
150 | int |
102 | ev_io_getevents (aio_context_t ctx_id, long min_nr, long nr, struct io_event *events, struct timespec *timeout) |
151 | evsys_io_getevents (aio_context_t ctx_id, long min_nr, long nr, struct io_event *events, struct timespec *timeout) |
103 | { |
152 | { |
104 | return syscall (SYS_io_getevents, ctx_id, min_nr, nr, events, timeout); |
153 | return ev_syscall5 (SYS_io_getevents, ctx_id, min_nr, nr, events, timeout); |
105 | } |
154 | } |
106 | |
|
|
107 | typedef void (*ev_io_cb) (long nr, struct io_event *events); |
|
|
108 | |
155 | |
109 | /*****************************************************************************/ |
156 | /*****************************************************************************/ |
110 | /* actual backed implementation */ |
157 | /* actual backed implementation */ |
111 | |
158 | |
112 | /* two iocbs for every fd, one for read, one for write */ |
159 | ecb_cold |
|
|
160 | static int |
|
|
161 | linuxaio_nr_events (EV_P) |
|
|
162 | { |
|
|
163 | /* we start with 16 iocbs and incraese from there |
|
|
164 | * that's tiny, but the kernel has a rather low system-wide |
|
|
165 | * limit that can be reached quickly, so let's be parsimonious |
|
|
166 | * with this resource. |
|
|
167 | * Rest assured, the kernel generously rounds up small and big numbers |
|
|
168 | * in different ways (but doesn't seem to charge you for it). |
|
|
169 | * The 15 here is because the kernel usually has a power of two as aio-max-nr, |
|
|
170 | * and this helps to take advantage of that limit. |
|
|
171 | */ |
|
|
172 | |
|
|
173 | /* we try to fill 4kB pages exactly. |
|
|
174 | * the ring buffer header is 32 bytes, every io event is 32 bytes. |
|
|
175 | * the kernel takes the io requests number, doubles it, adds 2 |
|
|
176 | * and adds the ring buffer. |
|
|
177 | * the way we use this is by starting low, and then roughly doubling the |
|
|
178 | * size each time we hit a limit. |
|
|
179 | */ |
|
|
180 | |
|
|
181 | int requests = 15 << linuxaio_iteration; |
|
|
182 | int one_page = (4096 |
|
|
183 | / sizeof (struct io_event) ) / 2; /* how many fit into one page */ |
|
|
184 | int first_page = ((4096 - sizeof (struct aio_ring)) |
|
|
185 | / sizeof (struct io_event) - 2) / 2; /* how many fit into the first page */ |
|
|
186 | |
|
|
187 | /* if everything fits into one page, use count exactly */ |
|
|
188 | if (requests > first_page) |
|
|
189 | /* otherwise, round down to full pages and add the first page */ |
|
|
190 | requests = requests / one_page * one_page + first_page; |
|
|
191 | |
|
|
192 | return requests; |
|
|
193 | } |
|
|
194 | |
|
|
195 | /* we use out own wrapper structure in case we ever want to do something "clever" */ |
113 | typedef struct aniocb |
196 | typedef struct aniocb |
114 | { |
197 | { |
115 | struct iocb io; |
198 | struct iocb io; |
116 | /*int inuse;*/ |
199 | /*int inuse;*/ |
117 | } *ANIOCBP; |
200 | } *ANIOCBP; |
118 | |
201 | |
119 | inline_size |
202 | inline_size |
120 | void |
203 | void |
121 | linuxaio_array_needsize_iocbp (ANIOCBP *base, int count) |
204 | linuxaio_array_needsize_iocbp (ANIOCBP *base, int offset, int count) |
122 | { |
205 | { |
123 | /* TODO: quite the overhead to allocate every iocb separately */ |
|
|
124 | while (count--) |
206 | while (count--) |
125 | { |
207 | { |
|
|
208 | /* TODO: quite the overhead to allocate every iocb separately, maybe use our own allocator? */ |
126 | *base = (ANIOCBP)ev_malloc (sizeof (**base)); |
209 | ANIOCBP iocb = (ANIOCBP)ev_malloc (sizeof (*iocb)); |
127 | /* TODO: full zero initialize required? */ |
210 | |
|
|
211 | /* full zero initialise is probably not required at the moment, but |
|
|
212 | * this is not well documented, so we better do it. |
|
|
213 | */ |
128 | memset (*base, 0, sizeof (**base)); |
214 | memset (iocb, 0, sizeof (*iocb)); |
129 | /* would be nice to initialize fd/data as well */ |
215 | |
130 | (*base)->io.aio_lio_opcode = IOCB_CMD_POLL; |
216 | iocb->io.aio_lio_opcode = IOCB_CMD_POLL; |
131 | ++base; |
217 | iocb->io.aio_data = offset; |
132 | } |
218 | iocb->io.aio_fildes = offset; |
133 | } |
|
|
134 | |
219 | |
|
|
220 | base [offset++] = iocb; |
|
|
221 | } |
|
|
222 | } |
|
|
223 | |
|
|
224 | ecb_cold |
135 | static void |
225 | static void |
136 | linuxaio_free_iocbp (EV_P) |
226 | linuxaio_free_iocbp (EV_P) |
137 | { |
227 | { |
138 | while (linuxaio_iocbpmax--) |
228 | while (linuxaio_iocbpmax--) |
139 | ev_free (linuxaio_iocbps [linuxaio_iocbpmax]); |
229 | ev_free (linuxaio_iocbps [linuxaio_iocbpmax]); |
140 | |
230 | |
141 | /* next resize will completely reallocate the array */ |
231 | linuxaio_iocbpmax = 0; /* next resize will completely reallocate the array, at some overhead */ |
142 | linuxaio_iocbpmax = 0; |
|
|
143 | linuxaio_submitcnt = 0; /* all pointers invalidated */ |
|
|
144 | } |
232 | } |
145 | |
233 | |
146 | static void |
234 | static void |
147 | linuxaio_modify (EV_P_ int fd, int oev, int nev) |
235 | linuxaio_modify (EV_P_ int fd, int oev, int nev) |
148 | { |
236 | { |
149 | array_needsize (ANIOCBP, linuxaio_iocbps, linuxaio_iocbpmax, fd + 1, linuxaio_array_needsize_iocbp); |
237 | array_needsize (ANIOCBP, linuxaio_iocbps, linuxaio_iocbpmax, fd + 1, linuxaio_array_needsize_iocbp); |
150 | struct aniocb *iocb = linuxaio_iocbps [fd]; |
238 | ANIOCBP iocb = linuxaio_iocbps [fd]; |
151 | |
239 | |
|
|
240 | if (ecb_expect_false (iocb->io.aio_reqprio < 0)) |
|
|
241 | { |
|
|
242 | /* we handed this fd over to epoll, so undo this first */ |
|
|
243 | /* we do it manually because the optimisations on epoll_modify won't do us any good */ |
|
|
244 | epoll_ctl (backend_fd, EPOLL_CTL_DEL, fd, 0); |
|
|
245 | anfds [fd].emask = 0; |
|
|
246 | iocb->io.aio_reqprio = 0; |
|
|
247 | } |
|
|
248 | else if (ecb_expect_false (iocb->io.aio_buf)) |
|
|
249 | { |
|
|
250 | /* iocb active, so cancel it first before resubmit */ |
|
|
251 | for (;;) |
|
|
252 | { |
|
|
253 | /* on all relevant kernels, io_cancel fails with EINPROGRESS on "success" */ |
|
|
254 | if (ecb_expect_false (evsys_io_cancel (linuxaio_ctx, &iocb->io, (struct io_event *)0) == 0)) |
|
|
255 | break; |
|
|
256 | |
|
|
257 | if (ecb_expect_true (errno == EINPROGRESS)) |
|
|
258 | break; |
|
|
259 | |
|
|
260 | /* the EINPROGRESS test is for nicer error message. clumsy. */ |
|
|
261 | if (errno != EINTR) |
|
|
262 | { |
|
|
263 | assert (("libev: linuxaio unexpected io_cancel failed", errno != EINTR && errno != EINPROGRESS)); |
|
|
264 | break; |
|
|
265 | } |
|
|
266 | } |
|
|
267 | } |
|
|
268 | |
152 | if (iocb->io.aio_buf) |
269 | iocb->io.aio_buf = |
153 | ev_io_cancel (linuxaio_ctx, &iocb->io, (struct io_event *)0); /* always returns an error relevant kernels */ |
270 | (nev & EV_READ ? POLLIN : 0) |
|
|
271 | | (nev & EV_WRITE ? POLLOUT : 0); |
154 | |
272 | |
155 | if (nev) |
273 | if (nev) |
156 | { |
274 | { |
157 | iocb->io.aio_data = fd; |
|
|
158 | iocb->io.aio_fildes = fd; |
|
|
159 | iocb->io.aio_buf = |
|
|
160 | (nev & EV_READ ? POLLIN : 0) |
|
|
161 | | (nev & EV_WRITE ? POLLOUT : 0); |
|
|
162 | |
|
|
163 | /* queue iocb up for io_submit */ |
275 | /* queue iocb up for io_submit */ |
164 | /* this assumes we only ever get one call per fd per loop iteration */ |
276 | /* this assumes we only ever get one call per fd per loop iteration */ |
165 | ++linuxaio_submitcnt; |
277 | ++linuxaio_submitcnt; |
166 | array_needsize (struct iocb *, linuxaio_submits, linuxaio_submitmax, linuxaio_submitcnt, array_needsize_noinit); |
278 | array_needsize (struct iocb *, linuxaio_submits, linuxaio_submitmax, linuxaio_submitcnt, array_needsize_noinit); |
167 | linuxaio_submits [linuxaio_submitcnt - 1] = &iocb->io; |
279 | linuxaio_submits [linuxaio_submitcnt - 1] = &iocb->io; |
168 | } |
280 | } |
169 | } |
281 | } |
170 | |
282 | |
171 | static void |
283 | static void |
|
|
284 | linuxaio_epoll_cb (EV_P_ struct ev_io *w, int revents) |
|
|
285 | { |
|
|
286 | epoll_poll (EV_A_ 0); |
|
|
287 | } |
|
|
288 | |
|
|
289 | inline_speed |
|
|
290 | void |
|
|
291 | linuxaio_fd_rearm (EV_P_ int fd) |
|
|
292 | { |
|
|
293 | anfds [fd].events = 0; |
|
|
294 | linuxaio_iocbps [fd]->io.aio_buf = 0; |
|
|
295 | fd_change (EV_A_ fd, EV_ANFD_REIFY); |
|
|
296 | } |
|
|
297 | |
|
|
298 | static void |
172 | linuxaio_parse_events (EV_P_ struct io_event *ev, int nr) |
299 | linuxaio_parse_events (EV_P_ struct io_event *ev, int nr) |
173 | { |
300 | { |
174 | while (nr) |
301 | while (nr) |
175 | { |
302 | { |
176 | int fd = ev->data; |
303 | int fd = ev->data; |
177 | int res = ev->res; |
304 | int res = ev->res; |
178 | |
305 | |
179 | assert (("libev: iocb fd must be in-bounds", fd >= 0 && fd < anfdmax)); |
306 | assert (("libev: iocb fd must be in-bounds", fd >= 0 && fd < anfdmax)); |
180 | |
307 | |
181 | /* linux aio is oneshot: rearm fd */ |
|
|
182 | linuxaio_iocbps [fd]->io.aio_buf = 0; |
|
|
183 | anfds [fd].events = 0; |
|
|
184 | fd_change (EV_A_ fd, 0); |
|
|
185 | |
|
|
186 | /* feed events, we do not expect or handle POLLNVAL */ |
308 | /* feed events, we do not expect or handle POLLNVAL */ |
187 | if (ecb_expect_false (res & POLLNVAL)) |
|
|
188 | fd_kill (EV_A_ fd); |
|
|
189 | else |
|
|
190 | fd_event ( |
309 | fd_event ( |
191 | EV_A_ |
310 | EV_A_ |
192 | fd, |
311 | fd, |
193 | (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) |
312 | (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) |
194 | | (res & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) |
313 | | (res & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) |
195 | ); |
314 | ); |
|
|
315 | |
|
|
316 | /* linux aio is oneshot: rearm fd. TODO: this does more work than strictly needed */ |
|
|
317 | linuxaio_fd_rearm (EV_A_ fd); |
196 | |
318 | |
197 | --nr; |
319 | --nr; |
198 | ++ev; |
320 | ++ev; |
199 | } |
321 | } |
200 | } |
322 | } |
201 | |
323 | |
202 | /* get any events from ringbuffer, return true if any were handled */ |
324 | /* get any events from ring buffer, return true if any were handled */ |
203 | static int |
325 | static int |
204 | linuxaio_get_events_from_ring (EV_P) |
326 | linuxaio_get_events_from_ring (EV_P) |
205 | { |
327 | { |
206 | struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx; |
328 | struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx; |
|
|
329 | unsigned head, tail; |
207 | |
330 | |
|
|
331 | /* the kernel reads and writes both of these variables, */ |
|
|
332 | /* as a C extension, we assume that volatile use here */ |
|
|
333 | /* both makes reads atomic and once-only */ |
|
|
334 | head = *(volatile unsigned *)&ring->head; |
208 | ECB_MEMORY_FENCE_ACQUIRE; |
335 | ECB_MEMORY_FENCE_ACQUIRE; |
209 | |
|
|
210 | unsigned head = ring->head; |
|
|
211 | unsigned tail = *(volatile unsigned *)&ring->tail; |
336 | tail = *(volatile unsigned *)&ring->tail; |
212 | |
337 | |
213 | if (ring->magic != AIO_RING_MAGIC |
338 | if (head == tail) |
214 | || ring->incompat_features != AIO_RING_INCOMPAT_FEATURES |
|
|
215 | || ring->header_length != sizeof (struct aio_ring) /* TODO: or use it to find io_event[0]? */ |
|
|
216 | || head == tail) |
|
|
217 | return 0; |
339 | return 0; |
218 | |
340 | |
219 | /* parse all available events, but only once, to avoid starvation */ |
341 | /* parse all available events, but only once, to avoid starvation */ |
220 | if (tail > head) /* normal case around */ |
342 | if (ecb_expect_true (tail > head)) /* normal case around */ |
221 | linuxaio_parse_events (EV_A_ ring->io_events + head, tail - head); |
343 | linuxaio_parse_events (EV_A_ ring->io_events + head, tail - head); |
222 | else |
|
|
223 | { |
|
|
224 | /* wrapped around */ |
344 | else /* wrapped around */ |
|
|
345 | { |
225 | linuxaio_parse_events (EV_A_ ring->io_events + head, ring->nr - head); |
346 | linuxaio_parse_events (EV_A_ ring->io_events + head, ring->nr - head); |
226 | linuxaio_parse_events (EV_A_ ring->io_events, tail); |
347 | linuxaio_parse_events (EV_A_ ring->io_events, tail); |
227 | } |
348 | } |
228 | |
349 | |
229 | ring->head = tail; |
350 | ECB_MEMORY_FENCE_RELEASE; |
|
|
351 | /* as an extension to C, we hope that the volatile will make this atomic and once-only */ |
|
|
352 | *(volatile unsigned *)&ring->head = tail; |
230 | |
353 | |
231 | return 1; |
354 | return 1; |
|
|
355 | } |
|
|
356 | |
|
|
357 | inline_size |
|
|
358 | int |
|
|
359 | linuxaio_ringbuf_valid (EV_P) |
|
|
360 | { |
|
|
361 | struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx; |
|
|
362 | |
|
|
363 | return ecb_expect_true (ring->magic == AIO_RING_MAGIC) |
|
|
364 | && ring->incompat_features == EV_AIO_RING_INCOMPAT_FEATURES |
|
|
365 | && ring->header_length == sizeof (struct aio_ring); /* TODO: or use it to find io_event[0]? */ |
232 | } |
366 | } |
233 | |
367 | |
234 | /* read at least one event from kernel, or timeout */ |
368 | /* read at least one event from kernel, or timeout */ |
235 | inline_size |
369 | inline_size |
236 | void |
370 | void |
237 | linuxaio_get_events (EV_P_ ev_tstamp timeout) |
371 | linuxaio_get_events (EV_P_ ev_tstamp timeout) |
238 | { |
372 | { |
239 | struct timespec ts; |
373 | struct timespec ts; |
240 | struct io_event ioev; |
374 | struct io_event ioev[8]; /* 256 octet stack space */ |
241 | int res; |
375 | int want = 1; /* how many events to request */ |
|
|
376 | int ringbuf_valid = linuxaio_ringbuf_valid (EV_A); |
242 | |
377 | |
|
|
378 | if (ecb_expect_true (ringbuf_valid)) |
|
|
379 | { |
|
|
380 | /* if the ring buffer has any events, we don't wait or call the kernel at all */ |
243 | if (linuxaio_get_events_from_ring (EV_A)) |
381 | if (linuxaio_get_events_from_ring (EV_A)) |
244 | return; |
382 | return; |
245 | |
383 | |
246 | /* no events, so wait for at least one, then poll ring buffer again */ |
384 | /* if the ring buffer is empty, and we don't have a timeout, then don't call the kernel */ |
247 | /* this degraded to one event per loop iteration */ |
385 | if (!timeout) |
248 | /* if the ring buffer changes layout, but so be it */ |
386 | return; |
|
|
387 | } |
|
|
388 | else |
|
|
389 | /* no ringbuffer, request slightly larger batch */ |
|
|
390 | want = sizeof (ioev) / sizeof (ioev [0]); |
249 | |
391 | |
250 | ts.tv_sec = (long)timeout; |
392 | /* no events, so wait for some |
251 | ts.tv_nsec = (long)((timeout - ts.tv_sec) * 1e9); |
393 | * for fairness reasons, we do this in a loop, to fetch all events |
|
|
394 | */ |
|
|
395 | for (;;) |
|
|
396 | { |
|
|
397 | int res; |
252 | |
398 | |
|
|
399 | EV_RELEASE_CB; |
|
|
400 | |
|
|
401 | EV_TS_SET (ts, timeout); |
253 | res = ev_io_getevents (linuxaio_ctx, 1, 1, &ioev, &ts); |
402 | res = evsys_io_getevents (linuxaio_ctx, 1, want, ioev, &ts); |
254 | |
403 | |
|
|
404 | EV_ACQUIRE_CB; |
|
|
405 | |
255 | if (res < 0) |
406 | if (res < 0) |
|
|
407 | if (errno == EINTR) |
|
|
408 | /* ignored, retry */; |
|
|
409 | else |
256 | ev_syserr ("(libev) io_getevents"); |
410 | ev_syserr ("(libev) linuxaio io_getevents"); |
257 | else if (res) |
411 | else if (res) |
258 | { |
412 | { |
259 | /* at least one event received, handle it and any remaining ones in the ring buffer */ |
413 | /* at least one event available, handle them */ |
260 | linuxaio_parse_events (EV_A_ &ioev, 1); |
414 | linuxaio_parse_events (EV_A_ ioev, res); |
|
|
415 | |
|
|
416 | if (ecb_expect_true (ringbuf_valid)) |
|
|
417 | { |
|
|
418 | /* if we have a ring buffer, handle any remaining events in it */ |
261 | linuxaio_get_events_from_ring (EV_A); |
419 | linuxaio_get_events_from_ring (EV_A); |
|
|
420 | |
|
|
421 | /* at this point, we should have handled all outstanding events */ |
|
|
422 | break; |
|
|
423 | } |
|
|
424 | else if (res < want) |
|
|
425 | /* otherwise, if there were fewere events than we wanted, we assume there are no more */ |
|
|
426 | break; |
|
|
427 | } |
|
|
428 | else |
|
|
429 | break; /* no events from the kernel, we are done */ |
|
|
430 | |
|
|
431 | timeout = 0; /* only wait in the first iteration */ |
262 | } |
432 | } |
|
|
433 | } |
|
|
434 | |
|
|
435 | inline_size |
|
|
436 | int |
|
|
437 | linuxaio_io_setup (EV_P) |
|
|
438 | { |
|
|
439 | linuxaio_ctx = 0; |
|
|
440 | return evsys_io_setup (linuxaio_nr_events (EV_A), &linuxaio_ctx); |
263 | } |
441 | } |
264 | |
442 | |
265 | static void |
443 | static void |
266 | linuxaio_poll (EV_P_ ev_tstamp timeout) |
444 | linuxaio_poll (EV_P_ ev_tstamp timeout) |
267 | { |
445 | { |
… | |
… | |
269 | |
447 | |
270 | /* first phase: submit new iocbs */ |
448 | /* first phase: submit new iocbs */ |
271 | |
449 | |
272 | /* io_submit might return less than the requested number of iocbs */ |
450 | /* io_submit might return less than the requested number of iocbs */ |
273 | /* this is, afaics, only because of errors, but we go by the book and use a loop, */ |
451 | /* this is, afaics, only because of errors, but we go by the book and use a loop, */ |
274 | /* which allows us to pinpoint the errornous iocb */ |
452 | /* which allows us to pinpoint the erroneous iocb */ |
275 | for (submitted = 0; submitted < linuxaio_submitcnt; ) |
453 | for (submitted = 0; submitted < linuxaio_submitcnt; ) |
276 | { |
454 | { |
277 | int res = ev_io_submit (linuxaio_ctx, linuxaio_submitcnt - submitted, linuxaio_submits + submitted); |
455 | int res = evsys_io_submit (linuxaio_ctx, linuxaio_submitcnt - submitted, linuxaio_submits + submitted); |
278 | |
456 | |
279 | if (res < 0) |
457 | if (ecb_expect_false (res < 0)) |
280 | if (errno == EAGAIN) |
458 | if (errno == EINVAL) |
281 | { |
459 | { |
282 | /* This happens when the ring buffer is full, at least. I assume this means |
460 | /* This happens for unsupported fds, officially, but in my testing, |
283 | * that the event was queued synchronously during io_submit, and thus |
461 | * also randomly happens for supported fds. We fall back to good old |
284 | * the buffer overflowd. |
462 | * poll() here, under the assumption that this is a very rare case. |
285 | * In this case, we just try next loop iteration. |
463 | * See https://lore.kernel.org/patchwork/patch/1047453/ to see |
|
|
464 | * discussion about such a case (ttys) where polling for POLLIN |
|
|
465 | * fails but POLLIN|POLLOUT works. |
286 | */ |
466 | */ |
287 | memcpy (linuxaio_submits, linuxaio_submits + submitted, (linuxaio_submitcnt - submitted) * sizeof (*linuxaio_submits)); |
467 | struct iocb *iocb = linuxaio_submits [submitted]; |
|
|
468 | epoll_modify (EV_A_ iocb->aio_fildes, 0, anfds [iocb->aio_fildes].events); |
|
|
469 | iocb->aio_reqprio = -1; /* mark iocb as epoll */ |
|
|
470 | |
|
|
471 | res = 1; /* skip this iocb - another iocb, another chance */ |
|
|
472 | } |
|
|
473 | else if (errno == EAGAIN) |
|
|
474 | { |
|
|
475 | /* This happens when the ring buffer is full, or some other shit we |
|
|
476 | * don't know and isn't documented. Most likely because we have too |
|
|
477 | * many requests and linux aio can't be assed to handle them. |
|
|
478 | * In this case, we try to allocate a larger ring buffer, freeing |
|
|
479 | * ours first. This might fail, in which case we have to fall back to 100% |
|
|
480 | * epoll. |
|
|
481 | * God, how I hate linux not getting its act together. Ever. |
|
|
482 | */ |
|
|
483 | evsys_io_destroy (linuxaio_ctx); |
288 | linuxaio_submitcnt -= submitted; |
484 | linuxaio_submitcnt = 0; |
|
|
485 | |
|
|
486 | /* rearm all fds with active iocbs */ |
|
|
487 | { |
|
|
488 | int fd; |
|
|
489 | for (fd = 0; fd < linuxaio_iocbpmax; ++fd) |
|
|
490 | if (linuxaio_iocbps [fd]->io.aio_buf) |
|
|
491 | linuxaio_fd_rearm (EV_A_ fd); |
|
|
492 | } |
|
|
493 | |
|
|
494 | ++linuxaio_iteration; |
|
|
495 | if (linuxaio_io_setup (EV_A) < 0) |
|
|
496 | { |
|
|
497 | /* TODO: rearm all and recreate epoll backend from scratch */ |
|
|
498 | /* TODO: might be more prudent? */ |
|
|
499 | |
|
|
500 | /* to bad, we can't get a new aio context, go 100% epoll */ |
|
|
501 | linuxaio_free_iocbp (EV_A); |
|
|
502 | ev_io_stop (EV_A_ &linuxaio_epoll_w); |
|
|
503 | ev_ref (EV_A); |
|
|
504 | linuxaio_ctx = 0; |
|
|
505 | |
|
|
506 | backend = EVBACKEND_EPOLL; |
|
|
507 | backend_modify = epoll_modify; |
|
|
508 | backend_poll = epoll_poll; |
|
|
509 | } |
|
|
510 | |
289 | timeout = 0; |
511 | timeout = 0; |
|
|
512 | /* it's easiest to handle this mess in another iteration */ |
290 | break; |
513 | return; |
291 | } |
514 | } |
|
|
515 | else if (errno == EBADF) |
|
|
516 | { |
|
|
517 | assert (("libev: event loop rejected bad fd", errno != EBADF)); |
|
|
518 | fd_kill (EV_A_ linuxaio_submits [submitted]->aio_fildes); |
|
|
519 | |
|
|
520 | res = 1; /* skip this iocb */ |
|
|
521 | } |
|
|
522 | else if (errno == EINTR) /* not seen in reality, not documented */ |
|
|
523 | res = 0; /* silently ignore and retry */ |
292 | else |
524 | else |
293 | /* TODO: we get EAGAIN when the ring buffer is full for some reason */ |
525 | { |
294 | /* TODO: should we always just try next time? */ |
|
|
295 | ev_syserr ("(libev) io_submit"); |
526 | ev_syserr ("(libev) linuxaio io_submit"); |
|
|
527 | res = 0; |
|
|
528 | } |
296 | |
529 | |
297 | submitted += res; |
530 | submitted += res; |
298 | } |
531 | } |
299 | |
532 | |
300 | linuxaio_submitcnt = 0; |
533 | linuxaio_submitcnt = 0; |
… | |
… | |
308 | int |
541 | int |
309 | linuxaio_init (EV_P_ int flags) |
542 | linuxaio_init (EV_P_ int flags) |
310 | { |
543 | { |
311 | /* would be great to have a nice test for IOCB_CMD_POLL instead */ |
544 | /* would be great to have a nice test for IOCB_CMD_POLL instead */ |
312 | /* also: test some semi-common fd types, such as files and ttys in recommended_backends */ |
545 | /* also: test some semi-common fd types, such as files and ttys in recommended_backends */ |
313 | if (ev_linux_version () < 0x041200) /* 4.18 introduced IOCB_CMD_POLL */ |
546 | /* 4.18 introduced IOCB_CMD_POLL, 4.19 made epoll work, and we need that */ |
|
|
547 | if (ev_linux_version () < 0x041300) |
314 | return 0; |
548 | return 0; |
315 | |
549 | |
316 | linuxaio_ctx = 0; |
550 | if (!epoll_init (EV_A_ 0)) |
317 | if (ev_io_setup (EV_LINUXAIO_DEPTH, &linuxaio_ctx) < 0) |
|
|
318 | return 0; |
551 | return 0; |
319 | |
552 | |
|
|
553 | linuxaio_iteration = 0; |
|
|
554 | |
|
|
555 | if (linuxaio_io_setup (EV_A) < 0) |
|
|
556 | { |
|
|
557 | epoll_destroy (EV_A); |
|
|
558 | return 0; |
|
|
559 | } |
|
|
560 | |
|
|
561 | ev_io_init (EV_A_ &linuxaio_epoll_w, linuxaio_epoll_cb, backend_fd, EV_READ); |
|
|
562 | ev_set_priority (&linuxaio_epoll_w, EV_MAXPRI); |
|
|
563 | ev_io_start (EV_A_ &linuxaio_epoll_w); |
|
|
564 | ev_unref (EV_A); /* watcher should not keep loop alive */ |
|
|
565 | |
320 | backend_modify = linuxaio_modify; |
566 | backend_modify = linuxaio_modify; |
321 | backend_poll = linuxaio_poll; |
567 | backend_poll = linuxaio_poll; |
322 | |
568 | |
323 | linuxaio_iocbpmax = 0; |
569 | linuxaio_iocbpmax = 0; |
324 | linuxaio_iocbps = 0; |
570 | linuxaio_iocbps = 0; |
325 | |
571 | |
326 | linuxaio_submits = 0; |
572 | linuxaio_submits = 0; |
… | |
… | |
332 | |
578 | |
333 | inline_size |
579 | inline_size |
334 | void |
580 | void |
335 | linuxaio_destroy (EV_P) |
581 | linuxaio_destroy (EV_P) |
336 | { |
582 | { |
|
|
583 | epoll_destroy (EV_A); |
337 | linuxaio_free_iocbp (EV_A); |
584 | linuxaio_free_iocbp (EV_A); |
338 | ev_io_destroy (linuxaio_ctx); |
585 | evsys_io_destroy (linuxaio_ctx); /* fails in child, aio context is destroyed */ |
339 | } |
586 | } |
340 | |
587 | |
341 | inline_size |
588 | ecb_cold |
342 | void |
589 | static void |
343 | linuxaio_fork (EV_P) |
590 | linuxaio_fork (EV_P) |
344 | { |
591 | { |
345 | /* TODO: verify and test */ |
592 | linuxaio_submitcnt = 0; /* all pointers were invalidated */ |
346 | linuxaio_destroy (EV_A); |
593 | linuxaio_free_iocp (EV_A); /* this frees all iocbs, which is very heavy-handed */ |
|
|
594 | evsys_io_destroy (linuxaio_ctx); /* fails in child, aio context is destroyed */ |
347 | |
595 | |
348 | linuxaio_ctx = 0; |
596 | linuxaio_iteration = 0; /* we start over in the child */ |
349 | while (ev_io_setup (EV_LINUXAIO_DEPTH, &linuxaio_ctx) < 0) |
597 | |
|
|
598 | while (linuxaio_io_setup (EV_A) < 0) |
350 | ev_syserr ("(libev) io_setup"); |
599 | ev_syserr ("(libev) linuxaio io_setup"); |
351 | |
600 | |
|
|
601 | /* forking epoll should also effectively unregister all fds from the backend */ |
|
|
602 | epoll_fork (EV_A); |
|
|
603 | /* epoll_fork already did this. hopefully */ |
352 | fd_rearm_all (EV_A); |
604 | /*fd_rearm_all (EV_A);*/ |
353 | } |
|
|
354 | |
605 | |
|
|
606 | ev_io_stop (EV_A_ &linuxaio_epoll_w); |
|
|
607 | ev_io_set (EV_A_ &linuxaio_epoll_w, backend_fd, EV_READ); |
|
|
608 | ev_io_start (EV_A_ &linuxaio_epoll_w); |
|
|
609 | } |
|
|
610 | |