1 |
root |
1.1 |
/* |
2 |
|
|
* libev linux aio fd activity backend |
3 |
|
|
* |
4 |
|
|
* Copyright (c) 2019 Marc Alexander Lehmann <libev@schmorp.de> |
5 |
|
|
* All rights reserved. |
6 |
|
|
* |
7 |
|
|
* Redistribution and use in source and binary forms, with or without modifica- |
8 |
|
|
* tion, are permitted provided that the following conditions are met: |
9 |
|
|
* |
10 |
|
|
* 1. Redistributions of source code must retain the above copyright notice, |
11 |
|
|
* this list of conditions and the following disclaimer. |
12 |
|
|
* |
13 |
|
|
* 2. Redistributions in binary form must reproduce the above copyright |
14 |
|
|
* notice, this list of conditions and the following disclaimer in the |
15 |
|
|
* documentation and/or other materials provided with the distribution. |
16 |
|
|
* |
17 |
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
18 |
|
|
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- |
19 |
|
|
* CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO |
20 |
|
|
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- |
21 |
|
|
* CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
22 |
|
|
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; |
23 |
|
|
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
24 |
|
|
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- |
25 |
|
|
* ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED |
26 |
|
|
* OF THE POSSIBILITY OF SUCH DAMAGE. |
27 |
|
|
* |
28 |
|
|
* Alternatively, the contents of this file may be used under the terms of |
29 |
|
|
* the GNU General Public License ("GPL") version 2 or any later version, |
30 |
|
|
* in which case the provisions of the GPL are applicable instead of |
31 |
|
|
* the above. If you wish to allow the use of your version of this file |
32 |
|
|
* only under the terms of the GPL and not to allow others to use your |
33 |
|
|
* version of this file under the BSD license, indicate your decision |
34 |
|
|
* by deleting the provisions above and replace them with the notice |
35 |
|
|
* and other provisions required by the GPL. If you do not delete the |
36 |
|
|
* provisions above, a recipient may use your version of this file under |
37 |
|
|
* either the BSD or the GPL. |
38 |
|
|
*/ |
39 |
|
|
|
40 |
root |
1.25 |
/* |
41 |
|
|
* general notes about linux aio: |
42 |
|
|
* |
43 |
|
|
* a) at first, the linux aio IOCB_CMD_POLL functionality introduced in |
44 |
|
|
* 4.18 looks too good to be true: both watchers and events can be |
45 |
|
|
* batched, and events can even be handled in userspace using |
46 |
|
|
* a ring buffer shared with the kernel. watchers can be canceled |
47 |
|
|
* regardless of whether the fd has been closed. no problems with fork. |
48 |
|
|
* ok, the ring buffer is 200% undocumented (there isn't even a |
49 |
|
|
* header file), but otherwise, it's pure bliss! |
50 |
|
|
* b) ok, watchers are one-shot, so you have to re-arm active ones |
51 |
|
|
* on every iteration. so much for syscall-less event handling, |
52 |
|
|
* but at least these re-arms can be batched, no big deal, right? |
53 |
|
|
* c) well, linux as usual: the documentation lies to you: io_submit |
54 |
|
|
* sometimes returns EINVAL because the kernel doesn't feel like |
55 |
|
|
* handling your poll mask - ttys can be polled for POLLOUT, |
56 |
|
|
* POLLOUT|POLLIN, but polling for POLLIN fails. just great, |
57 |
|
|
* so we have to fall back to something else (hello, epoll), |
58 |
|
|
* but at least the fallback can be slow, because these are |
59 |
|
|
* exceptional cases, right? |
60 |
|
|
* d) hmm, you have to tell the kernel the maximum number of watchers |
61 |
root |
1.27 |
* you want to queue when initialising the aio context. but of |
62 |
root |
1.25 |
* course the real limit is magically calculated in the kernel, and |
63 |
|
|
* is often higher then we asked for. so we just have to destroy |
64 |
|
|
* the aio context and re-create it a bit larger if we hit the limit. |
65 |
|
|
* (starts to remind you of epoll? well, it's a bit more deterministic |
66 |
|
|
* and less gambling, but still ugly as hell). |
67 |
|
|
* e) that's when you find out you can also hit an arbitrary system-wide |
68 |
|
|
* limit. or the kernel simply doesn't want to handle your watchers. |
69 |
|
|
* what the fuck do we do then? you guessed it, in the middle |
70 |
|
|
* of event handling we have to switch to 100% epoll polling. and |
71 |
|
|
* that better is as fast as normal epoll polling, so you practically |
72 |
|
|
* have to use the normal epoll backend with all its quirks. |
73 |
root |
1.27 |
* f) end result of this train wreck: it inherits all the disadvantages |
74 |
root |
1.25 |
* from epoll, while adding a number on its own. why even bother to use |
75 |
|
|
* it? because if conditions are right and your fds are supported and you |
76 |
|
|
* don't hit a limit, this backend is actually faster, doesn't gamble with |
77 |
|
|
* your fds, batches watchers and events and doesn't require costly state |
78 |
|
|
* recreates. well, until it does. |
79 |
|
|
* g) all of this makes this backend use almost twice as much code as epoll. |
80 |
root |
1.27 |
* which in turn uses twice as much code as poll. and that#s not counting |
81 |
root |
1.25 |
* the fact that this backend also depends on the epoll backend, making |
82 |
|
|
* it three times as much code as poll, or kqueue. |
83 |
|
|
* h) bleah. why can't linux just do kqueue. sure kqueue is ugly, but by now |
84 |
root |
1.27 |
* it's clear that whatever linux comes up with is far, far, far worse. |
85 |
root |
1.25 |
*/ |
86 |
root |
1.10 |
|
87 |
root |
1.1 |
#include <sys/time.h> /* actually linux/time.h, but we must assume they are compatible */ |
88 |
root |
1.2 |
#include <poll.h> |
89 |
root |
1.1 |
#include <linux/aio_abi.h> |
90 |
|
|
|
91 |
|
|
/*****************************************************************************/ |
92 |
root |
1.25 |
/* syscall wrapdadoop - this section has the raw api/abi definitions */ |
93 |
root |
1.1 |
|
94 |
|
|
#include <sys/syscall.h> /* no glibc wrappers */ |
95 |
|
|
|
96 |
root |
1.5 |
/* aio_abi.h is not versioned in any way, so we cannot test for its existance */ |
97 |
root |
1.1 |
#define IOCB_CMD_POLL 5 |
98 |
|
|
|
99 |
root |
1.25 |
/* taken from linux/fs/aio.c. yup, that's a .c file. |
100 |
|
|
* not only is this totally undocumented, not even the source code |
101 |
|
|
* can tell you what the future semantics of compat_features and |
102 |
|
|
* incompat_features are, or what header_length actually is for. |
103 |
|
|
*/ |
104 |
root |
1.1 |
#define AIO_RING_MAGIC 0xa10a10a1 |
105 |
root |
1.37 |
#define EV_AIO_RING_INCOMPAT_FEATURES 0 |
106 |
root |
1.1 |
struct aio_ring |
107 |
|
|
{ |
108 |
|
|
unsigned id; /* kernel internal index number */ |
109 |
|
|
unsigned nr; /* number of io_events */ |
110 |
|
|
unsigned head; /* Written to by userland or by kernel. */ |
111 |
|
|
unsigned tail; |
112 |
|
|
|
113 |
|
|
unsigned magic; |
114 |
|
|
unsigned compat_features; |
115 |
|
|
unsigned incompat_features; |
116 |
|
|
unsigned header_length; /* size of aio_ring */ |
117 |
|
|
|
118 |
|
|
struct io_event io_events[0]; |
119 |
|
|
}; |
120 |
|
|
|
121 |
root |
1.6 |
inline_size |
122 |
|
|
int |
123 |
root |
1.20 |
evsys_io_setup (unsigned nr_events, aio_context_t *ctx_idp) |
124 |
root |
1.1 |
{ |
125 |
root |
1.30 |
return ev_syscall2 (SYS_io_setup, nr_events, ctx_idp); |
126 |
root |
1.1 |
} |
127 |
|
|
|
128 |
root |
1.6 |
inline_size |
129 |
|
|
int |
130 |
root |
1.20 |
evsys_io_destroy (aio_context_t ctx_id) |
131 |
root |
1.1 |
{ |
132 |
root |
1.30 |
return ev_syscall1 (SYS_io_destroy, ctx_id); |
133 |
root |
1.1 |
} |
134 |
|
|
|
135 |
root |
1.6 |
inline_size |
136 |
|
|
int |
137 |
root |
1.20 |
evsys_io_submit (aio_context_t ctx_id, long nr, struct iocb *cbp[]) |
138 |
root |
1.1 |
{ |
139 |
root |
1.30 |
return ev_syscall3 (SYS_io_submit, ctx_id, nr, cbp); |
140 |
root |
1.1 |
} |
141 |
|
|
|
142 |
root |
1.6 |
inline_size |
143 |
|
|
int |
144 |
root |
1.20 |
evsys_io_cancel (aio_context_t ctx_id, struct iocb *cbp, struct io_event *result) |
145 |
root |
1.1 |
{ |
146 |
root |
1.30 |
return ev_syscall3 (SYS_io_cancel, ctx_id, cbp, result); |
147 |
root |
1.1 |
} |
148 |
|
|
|
149 |
root |
1.6 |
inline_size |
150 |
|
|
int |
151 |
root |
1.20 |
evsys_io_getevents (aio_context_t ctx_id, long min_nr, long nr, struct io_event *events, struct timespec *timeout) |
152 |
root |
1.1 |
{ |
153 |
root |
1.30 |
return ev_syscall5 (SYS_io_getevents, ctx_id, min_nr, nr, events, timeout); |
154 |
root |
1.1 |
} |
155 |
|
|
|
156 |
|
|
/*****************************************************************************/ |
157 |
|
|
/* actual backed implementation */ |
158 |
|
|
|
159 |
root |
1.25 |
ecb_cold |
160 |
|
|
static int |
161 |
|
|
linuxaio_nr_events (EV_P) |
162 |
|
|
{ |
163 |
|
|
/* we start with 16 iocbs and incraese from there |
164 |
|
|
* that's tiny, but the kernel has a rather low system-wide |
165 |
|
|
* limit that can be reached quickly, so let's be parsimonious |
166 |
|
|
* with this resource. |
167 |
|
|
* Rest assured, the kernel generously rounds up small and big numbers |
168 |
|
|
* in different ways (but doesn't seem to charge you for it). |
169 |
|
|
* The 15 here is because the kernel usually has a power of two as aio-max-nr, |
170 |
|
|
* and this helps to take advantage of that limit. |
171 |
|
|
*/ |
172 |
|
|
|
173 |
|
|
/* we try to fill 4kB pages exactly. |
174 |
|
|
* the ring buffer header is 32 bytes, every io event is 32 bytes. |
175 |
|
|
* the kernel takes the io requests number, doubles it, adds 2 |
176 |
|
|
* and adds the ring buffer. |
177 |
|
|
* the way we use this is by starting low, and then roughly doubling the |
178 |
|
|
* size each time we hit a limit. |
179 |
|
|
*/ |
180 |
|
|
|
181 |
|
|
int requests = 15 << linuxaio_iteration; |
182 |
|
|
int one_page = (4096 |
183 |
|
|
/ sizeof (struct io_event) ) / 2; /* how many fit into one page */ |
184 |
|
|
int first_page = ((4096 - sizeof (struct aio_ring)) |
185 |
|
|
/ sizeof (struct io_event) - 2) / 2; /* how many fit into the first page */ |
186 |
|
|
|
187 |
|
|
/* if everything fits into one page, use count exactly */ |
188 |
|
|
if (requests > first_page) |
189 |
|
|
/* otherwise, round down to full pages and add the first page */ |
190 |
|
|
requests = requests / one_page * one_page + first_page; |
191 |
|
|
|
192 |
|
|
return requests; |
193 |
|
|
} |
194 |
|
|
|
195 |
root |
1.27 |
/* we use out own wrapper structure in case we ever want to do something "clever" */ |
196 |
root |
1.1 |
typedef struct aniocb |
197 |
|
|
{ |
198 |
|
|
struct iocb io; |
199 |
|
|
/*int inuse;*/ |
200 |
|
|
} *ANIOCBP; |
201 |
|
|
|
202 |
|
|
inline_size |
203 |
|
|
void |
204 |
root |
1.22 |
linuxaio_array_needsize_iocbp (ANIOCBP *base, int offset, int count) |
205 |
root |
1.1 |
{ |
206 |
|
|
while (count--) |
207 |
|
|
{ |
208 |
root |
1.27 |
/* TODO: quite the overhead to allocate every iocb separately, maybe use our own allocator? */ |
209 |
root |
1.22 |
ANIOCBP iocb = (ANIOCBP)ev_malloc (sizeof (*iocb)); |
210 |
|
|
|
211 |
|
|
/* full zero initialise is probably not required at the moment, but |
212 |
|
|
* this is not well documented, so we better do it. |
213 |
|
|
*/ |
214 |
|
|
memset (iocb, 0, sizeof (*iocb)); |
215 |
|
|
|
216 |
|
|
iocb->io.aio_lio_opcode = IOCB_CMD_POLL; |
217 |
|
|
iocb->io.aio_fildes = offset; |
218 |
|
|
|
219 |
|
|
base [offset++] = iocb; |
220 |
root |
1.1 |
} |
221 |
|
|
} |
222 |
|
|
|
223 |
root |
1.6 |
ecb_cold |
224 |
root |
1.1 |
static void |
225 |
|
|
linuxaio_free_iocbp (EV_P) |
226 |
|
|
{ |
227 |
|
|
while (linuxaio_iocbpmax--) |
228 |
|
|
ev_free (linuxaio_iocbps [linuxaio_iocbpmax]); |
229 |
|
|
|
230 |
root |
1.6 |
linuxaio_iocbpmax = 0; /* next resize will completely reallocate the array, at some overhead */ |
231 |
root |
1.1 |
} |
232 |
|
|
|
233 |
|
|
static void |
234 |
|
|
linuxaio_modify (EV_P_ int fd, int oev, int nev) |
235 |
|
|
{ |
236 |
|
|
array_needsize (ANIOCBP, linuxaio_iocbps, linuxaio_iocbpmax, fd + 1, linuxaio_array_needsize_iocbp); |
237 |
root |
1.22 |
ANIOCBP iocb = linuxaio_iocbps [fd]; |
238 |
root |
1.49 |
ANFD *anfd = &anfds [fd]; |
239 |
root |
1.1 |
|
240 |
root |
1.42 |
if (ecb_expect_false (iocb->io.aio_reqprio < 0)) |
241 |
root |
1.10 |
{ |
242 |
root |
1.25 |
/* we handed this fd over to epoll, so undo this first */ |
243 |
root |
1.34 |
/* we do it manually because the optimisations on epoll_modify won't do us any good */ |
244 |
root |
1.10 |
epoll_ctl (backend_fd, EPOLL_CTL_DEL, fd, 0); |
245 |
root |
1.49 |
anfd->emask = 0; |
246 |
root |
1.10 |
iocb->io.aio_reqprio = 0; |
247 |
|
|
} |
248 |
root |
1.46 |
else if (ecb_expect_false (iocb->io.aio_buf)) |
249 |
root |
1.34 |
{ |
250 |
root |
1.41 |
/* iocb active, so cancel it first before resubmit */ |
251 |
root |
1.49 |
/* this assumes we only ever get one call per fd per loop iteration */ |
252 |
root |
1.39 |
for (;;) |
253 |
|
|
{ |
254 |
|
|
/* on all relevant kernels, io_cancel fails with EINPROGRESS on "success" */ |
255 |
root |
1.42 |
if (ecb_expect_false (evsys_io_cancel (linuxaio_ctx, &iocb->io, (struct io_event *)0) == 0)) |
256 |
root |
1.39 |
break; |
257 |
|
|
|
258 |
root |
1.42 |
if (ecb_expect_true (errno == EINPROGRESS)) |
259 |
root |
1.39 |
break; |
260 |
|
|
|
261 |
|
|
/* the EINPROGRESS test is for nicer error message. clumsy. */ |
262 |
root |
1.46 |
if (errno != EINTR) |
263 |
|
|
{ |
264 |
|
|
assert (("libev: linuxaio unexpected io_cancel failed", errno != EINTR && errno != EINPROGRESS)); |
265 |
|
|
break; |
266 |
|
|
} |
267 |
root |
1.39 |
} |
268 |
root |
1.49 |
|
269 |
|
|
/* increment generation counter to avoid handling old events */ |
270 |
|
|
++anfd->egen; |
271 |
root |
1.34 |
} |
272 |
root |
1.1 |
|
273 |
root |
1.41 |
iocb->io.aio_buf = |
274 |
|
|
(nev & EV_READ ? POLLIN : 0) |
275 |
|
|
| (nev & EV_WRITE ? POLLOUT : 0); |
276 |
|
|
|
277 |
root |
1.1 |
if (nev) |
278 |
|
|
{ |
279 |
root |
1.49 |
iocb->io.aio_data = (uint32_t)fd | ((__u64)(uint32_t)anfd->egen << 32); |
280 |
|
|
|
281 |
root |
1.1 |
/* queue iocb up for io_submit */ |
282 |
|
|
/* this assumes we only ever get one call per fd per loop iteration */ |
283 |
|
|
++linuxaio_submitcnt; |
284 |
|
|
array_needsize (struct iocb *, linuxaio_submits, linuxaio_submitmax, linuxaio_submitcnt, array_needsize_noinit); |
285 |
|
|
linuxaio_submits [linuxaio_submitcnt - 1] = &iocb->io; |
286 |
|
|
} |
287 |
|
|
} |
288 |
|
|
|
289 |
root |
1.19 |
static void |
290 |
root |
1.25 |
linuxaio_epoll_cb (EV_P_ struct ev_io *w, int revents) |
291 |
root |
1.19 |
{ |
292 |
root |
1.25 |
epoll_poll (EV_A_ 0); |
293 |
root |
1.19 |
} |
294 |
|
|
|
295 |
root |
1.35 |
inline_speed |
296 |
|
|
void |
297 |
root |
1.25 |
linuxaio_fd_rearm (EV_P_ int fd) |
298 |
root |
1.19 |
{ |
299 |
root |
1.25 |
anfds [fd].events = 0; |
300 |
|
|
linuxaio_iocbps [fd]->io.aio_buf = 0; |
301 |
|
|
fd_change (EV_A_ fd, EV_ANFD_REIFY); |
302 |
root |
1.19 |
} |
303 |
|
|
|
304 |
root |
1.1 |
static void |
305 |
|
|
linuxaio_parse_events (EV_P_ struct io_event *ev, int nr) |
306 |
|
|
{ |
307 |
|
|
while (nr) |
308 |
|
|
{ |
309 |
root |
1.49 |
int fd = ev->data & 0xffffffff; |
310 |
|
|
uint32_t gen = ev->data >> 32; |
311 |
|
|
int res = ev->res; |
312 |
root |
1.1 |
|
313 |
root |
1.2 |
assert (("libev: iocb fd must be in-bounds", fd >= 0 && fd < anfdmax)); |
314 |
root |
1.1 |
|
315 |
root |
1.50 |
/* only accept events if generation counter matches */ |
316 |
|
|
if (ecb_expect_true (gen == (uint32_t)anfds [fd].egen)) |
317 |
|
|
{ |
318 |
|
|
/* feed events, we do not expect or handle POLLNVAL */ |
319 |
|
|
fd_event ( |
320 |
|
|
EV_A_ |
321 |
|
|
fd, |
322 |
|
|
(res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) |
323 |
|
|
| (res & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) |
324 |
|
|
); |
325 |
root |
1.1 |
|
326 |
root |
1.50 |
/* linux aio is oneshot: rearm fd. TODO: this does more work than strictly needed */ |
327 |
|
|
linuxaio_fd_rearm (EV_A_ fd); |
328 |
|
|
} |
329 |
root |
1.25 |
|
330 |
root |
1.1 |
--nr; |
331 |
|
|
++ev; |
332 |
|
|
} |
333 |
|
|
} |
334 |
|
|
|
335 |
root |
1.27 |
/* get any events from ring buffer, return true if any were handled */ |
336 |
root |
1.1 |
static int |
337 |
|
|
linuxaio_get_events_from_ring (EV_P) |
338 |
|
|
{ |
339 |
|
|
struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx; |
340 |
root |
1.44 |
unsigned head, tail; |
341 |
root |
1.1 |
|
342 |
root |
1.13 |
/* the kernel reads and writes both of these variables, */ |
343 |
|
|
/* as a C extension, we assume that volatile use here */ |
344 |
|
|
/* both makes reads atomic and once-only */ |
345 |
root |
1.44 |
head = *(volatile unsigned *)&ring->head; |
346 |
|
|
ECB_MEMORY_FENCE_ACQUIRE; |
347 |
|
|
tail = *(volatile unsigned *)&ring->tail; |
348 |
root |
1.1 |
|
349 |
root |
1.6 |
if (head == tail) |
350 |
|
|
return 0; |
351 |
|
|
|
352 |
root |
1.1 |
/* parse all available events, but only once, to avoid starvation */ |
353 |
root |
1.45 |
if (ecb_expect_true (tail > head)) /* normal case around */ |
354 |
root |
1.1 |
linuxaio_parse_events (EV_A_ ring->io_events + head, tail - head); |
355 |
root |
1.6 |
else /* wrapped around */ |
356 |
root |
1.1 |
{ |
357 |
|
|
linuxaio_parse_events (EV_A_ ring->io_events + head, ring->nr - head); |
358 |
|
|
linuxaio_parse_events (EV_A_ ring->io_events, tail); |
359 |
|
|
} |
360 |
|
|
|
361 |
root |
1.28 |
ECB_MEMORY_FENCE_RELEASE; |
362 |
root |
1.16 |
/* as an extension to C, we hope that the volatile will make this atomic and once-only */ |
363 |
root |
1.11 |
*(volatile unsigned *)&ring->head = tail; |
364 |
root |
1.1 |
|
365 |
|
|
return 1; |
366 |
|
|
} |
367 |
|
|
|
368 |
root |
1.37 |
inline_size |
369 |
|
|
int |
370 |
|
|
linuxaio_ringbuf_valid (EV_P) |
371 |
|
|
{ |
372 |
|
|
struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx; |
373 |
|
|
|
374 |
root |
1.42 |
return ecb_expect_true (ring->magic == AIO_RING_MAGIC) |
375 |
root |
1.37 |
&& ring->incompat_features == EV_AIO_RING_INCOMPAT_FEATURES |
376 |
|
|
&& ring->header_length == sizeof (struct aio_ring); /* TODO: or use it to find io_event[0]? */ |
377 |
|
|
} |
378 |
|
|
|
379 |
root |
1.1 |
/* read at least one event from kernel, or timeout */ |
380 |
|
|
inline_size |
381 |
|
|
void |
382 |
|
|
linuxaio_get_events (EV_P_ ev_tstamp timeout) |
383 |
|
|
{ |
384 |
|
|
struct timespec ts; |
385 |
root |
1.37 |
struct io_event ioev[8]; /* 256 octet stack space */ |
386 |
|
|
int want = 1; /* how many events to request */ |
387 |
|
|
int ringbuf_valid = linuxaio_ringbuf_valid (EV_A); |
388 |
|
|
|
389 |
root |
1.42 |
if (ecb_expect_true (ringbuf_valid)) |
390 |
root |
1.37 |
{ |
391 |
|
|
/* if the ring buffer has any events, we don't wait or call the kernel at all */ |
392 |
|
|
if (linuxaio_get_events_from_ring (EV_A)) |
393 |
|
|
return; |
394 |
|
|
|
395 |
|
|
/* if the ring buffer is empty, and we don't have a timeout, then don't call the kernel */ |
396 |
|
|
if (!timeout) |
397 |
|
|
return; |
398 |
|
|
} |
399 |
|
|
else |
400 |
|
|
/* no ringbuffer, request slightly larger batch */ |
401 |
|
|
want = sizeof (ioev) / sizeof (ioev [0]); |
402 |
|
|
|
403 |
|
|
/* no events, so wait for some |
404 |
|
|
* for fairness reasons, we do this in a loop, to fetch all events |
405 |
|
|
*/ |
406 |
|
|
for (;;) |
407 |
|
|
{ |
408 |
|
|
int res; |
409 |
root |
1.1 |
|
410 |
root |
1.37 |
EV_RELEASE_CB; |
411 |
root |
1.1 |
|
412 |
root |
1.44 |
EV_TS_SET (ts, timeout); |
413 |
root |
1.37 |
res = evsys_io_getevents (linuxaio_ctx, 1, want, ioev, &ts); |
414 |
root |
1.19 |
|
415 |
root |
1.37 |
EV_ACQUIRE_CB; |
416 |
root |
1.1 |
|
417 |
root |
1.37 |
if (res < 0) |
418 |
|
|
if (errno == EINTR) |
419 |
|
|
/* ignored, retry */; |
420 |
|
|
else |
421 |
|
|
ev_syserr ("(libev) linuxaio io_getevents"); |
422 |
|
|
else if (res) |
423 |
|
|
{ |
424 |
|
|
/* at least one event available, handle them */ |
425 |
|
|
linuxaio_parse_events (EV_A_ ioev, res); |
426 |
root |
1.1 |
|
427 |
root |
1.42 |
if (ecb_expect_true (ringbuf_valid)) |
428 |
root |
1.37 |
{ |
429 |
|
|
/* if we have a ring buffer, handle any remaining events in it */ |
430 |
|
|
linuxaio_get_events_from_ring (EV_A); |
431 |
root |
1.19 |
|
432 |
root |
1.37 |
/* at this point, we should have handled all outstanding events */ |
433 |
|
|
break; |
434 |
|
|
} |
435 |
|
|
else if (res < want) |
436 |
|
|
/* otherwise, if there were fewere events than we wanted, we assume there are no more */ |
437 |
|
|
break; |
438 |
|
|
} |
439 |
|
|
else |
440 |
|
|
break; /* no events from the kernel, we are done */ |
441 |
|
|
|
442 |
|
|
timeout = 0; /* only wait in the first iteration */ |
443 |
root |
1.1 |
} |
444 |
|
|
} |
445 |
|
|
|
446 |
root |
1.31 |
inline_size |
447 |
|
|
int |
448 |
root |
1.25 |
linuxaio_io_setup (EV_P) |
449 |
|
|
{ |
450 |
|
|
linuxaio_ctx = 0; |
451 |
|
|
return evsys_io_setup (linuxaio_nr_events (EV_A), &linuxaio_ctx); |
452 |
|
|
} |
453 |
|
|
|
454 |
root |
1.1 |
static void |
455 |
|
|
linuxaio_poll (EV_P_ ev_tstamp timeout) |
456 |
|
|
{ |
457 |
|
|
int submitted; |
458 |
|
|
|
459 |
|
|
/* first phase: submit new iocbs */ |
460 |
|
|
|
461 |
|
|
/* io_submit might return less than the requested number of iocbs */ |
462 |
|
|
/* this is, afaics, only because of errors, but we go by the book and use a loop, */ |
463 |
root |
1.27 |
/* which allows us to pinpoint the erroneous iocb */ |
464 |
root |
1.1 |
for (submitted = 0; submitted < linuxaio_submitcnt; ) |
465 |
|
|
{ |
466 |
root |
1.20 |
int res = evsys_io_submit (linuxaio_ctx, linuxaio_submitcnt - submitted, linuxaio_submits + submitted); |
467 |
root |
1.1 |
|
468 |
root |
1.42 |
if (ecb_expect_false (res < 0)) |
469 |
root |
1.25 |
if (errno == EINVAL) |
470 |
root |
1.10 |
{ |
471 |
root |
1.15 |
/* This happens for unsupported fds, officially, but in my testing, |
472 |
root |
1.10 |
* also randomly happens for supported fds. We fall back to good old |
473 |
|
|
* poll() here, under the assumption that this is a very rare case. |
474 |
root |
1.19 |
* See https://lore.kernel.org/patchwork/patch/1047453/ to see |
475 |
|
|
* discussion about such a case (ttys) where polling for POLLIN |
476 |
|
|
* fails but POLLIN|POLLOUT works. |
477 |
root |
1.10 |
*/ |
478 |
|
|
struct iocb *iocb = linuxaio_submits [submitted]; |
479 |
root |
1.25 |
epoll_modify (EV_A_ iocb->aio_fildes, 0, anfds [iocb->aio_fildes].events); |
480 |
|
|
iocb->aio_reqprio = -1; /* mark iocb as epoll */ |
481 |
root |
1.10 |
|
482 |
root |
1.25 |
res = 1; /* skip this iocb - another iocb, another chance */ |
483 |
|
|
} |
484 |
|
|
else if (errno == EAGAIN) |
485 |
|
|
{ |
486 |
|
|
/* This happens when the ring buffer is full, or some other shit we |
487 |
root |
1.27 |
* don't know and isn't documented. Most likely because we have too |
488 |
root |
1.25 |
* many requests and linux aio can't be assed to handle them. |
489 |
|
|
* In this case, we try to allocate a larger ring buffer, freeing |
490 |
|
|
* ours first. This might fail, in which case we have to fall back to 100% |
491 |
|
|
* epoll. |
492 |
|
|
* God, how I hate linux not getting its act together. Ever. |
493 |
|
|
*/ |
494 |
|
|
evsys_io_destroy (linuxaio_ctx); |
495 |
|
|
linuxaio_submitcnt = 0; |
496 |
|
|
|
497 |
|
|
/* rearm all fds with active iocbs */ |
498 |
|
|
{ |
499 |
|
|
int fd; |
500 |
|
|
for (fd = 0; fd < linuxaio_iocbpmax; ++fd) |
501 |
|
|
if (linuxaio_iocbps [fd]->io.aio_buf) |
502 |
|
|
linuxaio_fd_rearm (EV_A_ fd); |
503 |
|
|
} |
504 |
|
|
|
505 |
|
|
++linuxaio_iteration; |
506 |
|
|
if (linuxaio_io_setup (EV_A) < 0) |
507 |
|
|
{ |
508 |
root |
1.44 |
/* TODO: rearm all and recreate epoll backend from scratch */ |
509 |
|
|
/* TODO: might be more prudent? */ |
510 |
|
|
|
511 |
root |
1.25 |
/* to bad, we can't get a new aio context, go 100% epoll */ |
512 |
|
|
linuxaio_free_iocbp (EV_A); |
513 |
|
|
ev_io_stop (EV_A_ &linuxaio_epoll_w); |
514 |
|
|
ev_ref (EV_A); |
515 |
|
|
linuxaio_ctx = 0; |
516 |
root |
1.44 |
|
517 |
|
|
backend = EVBACKEND_EPOLL; |
518 |
root |
1.25 |
backend_modify = epoll_modify; |
519 |
|
|
backend_poll = epoll_poll; |
520 |
|
|
} |
521 |
root |
1.21 |
|
522 |
root |
1.25 |
timeout = 0; |
523 |
|
|
/* it's easiest to handle this mess in another iteration */ |
524 |
|
|
return; |
525 |
root |
1.10 |
} |
526 |
root |
1.21 |
else if (errno == EBADF) |
527 |
|
|
{ |
528 |
root |
1.34 |
assert (("libev: event loop rejected bad fd", errno != EBADF)); |
529 |
root |
1.21 |
fd_kill (EV_A_ linuxaio_submits [submitted]->aio_fildes); |
530 |
|
|
|
531 |
|
|
res = 1; /* skip this iocb */ |
532 |
|
|
} |
533 |
root |
1.39 |
else if (errno == EINTR) /* not seen in reality, not documented */ |
534 |
|
|
res = 0; /* silently ignore and retry */ |
535 |
root |
1.1 |
else |
536 |
root |
1.44 |
{ |
537 |
|
|
ev_syserr ("(libev) linuxaio io_submit"); |
538 |
|
|
res = 0; |
539 |
|
|
} |
540 |
root |
1.1 |
|
541 |
|
|
submitted += res; |
542 |
|
|
} |
543 |
|
|
|
544 |
|
|
linuxaio_submitcnt = 0; |
545 |
|
|
|
546 |
|
|
/* second phase: fetch and parse events */ |
547 |
|
|
|
548 |
|
|
linuxaio_get_events (EV_A_ timeout); |
549 |
|
|
} |
550 |
|
|
|
551 |
|
|
inline_size |
552 |
|
|
int |
553 |
|
|
linuxaio_init (EV_P_ int flags) |
554 |
|
|
{ |
555 |
|
|
/* would be great to have a nice test for IOCB_CMD_POLL instead */ |
556 |
root |
1.2 |
/* also: test some semi-common fd types, such as files and ttys in recommended_backends */ |
557 |
root |
1.27 |
/* 4.18 introduced IOCB_CMD_POLL, 4.19 made epoll work, and we need that */ |
558 |
root |
1.15 |
if (ev_linux_version () < 0x041300) |
559 |
|
|
return 0; |
560 |
root |
1.25 |
|
561 |
|
|
if (!epoll_init (EV_A_ 0)) |
562 |
root |
1.1 |
return 0; |
563 |
|
|
|
564 |
root |
1.25 |
linuxaio_iteration = 0; |
565 |
root |
1.1 |
|
566 |
root |
1.25 |
if (linuxaio_io_setup (EV_A) < 0) |
567 |
root |
1.10 |
{ |
568 |
root |
1.25 |
epoll_destroy (EV_A); |
569 |
root |
1.10 |
return 0; |
570 |
|
|
} |
571 |
|
|
|
572 |
|
|
ev_io_init (EV_A_ &linuxaio_epoll_w, linuxaio_epoll_cb, backend_fd, EV_READ); |
573 |
root |
1.19 |
ev_set_priority (&linuxaio_epoll_w, EV_MAXPRI); |
574 |
root |
1.10 |
ev_io_start (EV_A_ &linuxaio_epoll_w); |
575 |
root |
1.14 |
ev_unref (EV_A); /* watcher should not keep loop alive */ |
576 |
root |
1.10 |
|
577 |
root |
1.44 |
backend_modify = linuxaio_modify; |
578 |
|
|
backend_poll = linuxaio_poll; |
579 |
root |
1.1 |
|
580 |
|
|
linuxaio_iocbpmax = 0; |
581 |
|
|
linuxaio_iocbps = 0; |
582 |
|
|
|
583 |
|
|
linuxaio_submits = 0; |
584 |
|
|
linuxaio_submitmax = 0; |
585 |
|
|
linuxaio_submitcnt = 0; |
586 |
|
|
|
587 |
|
|
return EVBACKEND_LINUXAIO; |
588 |
|
|
} |
589 |
|
|
|
590 |
|
|
inline_size |
591 |
|
|
void |
592 |
|
|
linuxaio_destroy (EV_P) |
593 |
|
|
{ |
594 |
root |
1.25 |
epoll_destroy (EV_A); |
595 |
root |
1.1 |
linuxaio_free_iocbp (EV_A); |
596 |
root |
1.33 |
evsys_io_destroy (linuxaio_ctx); /* fails in child, aio context is destroyed */ |
597 |
root |
1.1 |
} |
598 |
|
|
|
599 |
root |
1.44 |
ecb_cold |
600 |
|
|
static void |
601 |
root |
1.1 |
linuxaio_fork (EV_P) |
602 |
|
|
{ |
603 |
root |
1.6 |
linuxaio_submitcnt = 0; /* all pointers were invalidated */ |
604 |
root |
1.48 |
linuxaio_free_iocbp (EV_A); /* this frees all iocbs, which is very heavy-handed */ |
605 |
root |
1.47 |
evsys_io_destroy (linuxaio_ctx); /* fails in child, aio context is destroyed */ |
606 |
root |
1.2 |
|
607 |
root |
1.25 |
linuxaio_iteration = 0; /* we start over in the child */ |
608 |
|
|
|
609 |
|
|
while (linuxaio_io_setup (EV_A) < 0) |
610 |
root |
1.8 |
ev_syserr ("(libev) linuxaio io_setup"); |
611 |
root |
1.2 |
|
612 |
root |
1.33 |
/* forking epoll should also effectively unregister all fds from the backend */ |
613 |
root |
1.25 |
epoll_fork (EV_A); |
614 |
root |
1.44 |
/* epoll_fork already did this. hopefully */ |
615 |
|
|
/*fd_rearm_all (EV_A);*/ |
616 |
root |
1.10 |
|
617 |
|
|
ev_io_stop (EV_A_ &linuxaio_epoll_w); |
618 |
root |
1.25 |
ev_io_set (EV_A_ &linuxaio_epoll_w, backend_fd, EV_READ); |
619 |
root |
1.10 |
ev_io_start (EV_A_ &linuxaio_epoll_w); |
620 |
root |
1.1 |
} |
621 |
|
|
|