ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev_linuxaio.c
(Generate patch)

Comparing libev/ev_linuxaio.c (file contents):
Revision 1.9 by root, Sat Jun 22 22:29:38 2019 UTC vs.
Revision 1.27 by root, Tue Jun 25 05:17:50 2019 UTC

35 * and other provisions required by the GPL. If you do not delete the 35 * and other provisions required by the GPL. If you do not delete the
36 * provisions above, a recipient may use your version of this file under 36 * provisions above, a recipient may use your version of this file under
37 * either the BSD or the GPL. 37 * either the BSD or the GPL.
38 */ 38 */
39 39
40/*
41 * general notes about linux aio:
42 *
43 * a) at first, the linux aio IOCB_CMD_POLL functionality introduced in
44 * 4.18 looks too good to be true: both watchers and events can be
45 * batched, and events can even be handled in userspace using
46 * a ring buffer shared with the kernel. watchers can be canceled
47 * regardless of whether the fd has been closed. no problems with fork.
48 * ok, the ring buffer is 200% undocumented (there isn't even a
49 * header file), but otherwise, it's pure bliss!
50 * b) ok, watchers are one-shot, so you have to re-arm active ones
51 * on every iteration. so much for syscall-less event handling,
52 * but at least these re-arms can be batched, no big deal, right?
53 * c) well, linux as usual: the documentation lies to you: io_submit
54 * sometimes returns EINVAL because the kernel doesn't feel like
55 * handling your poll mask - ttys can be polled for POLLOUT,
56 * POLLOUT|POLLIN, but polling for POLLIN fails. just great,
57 * so we have to fall back to something else (hello, epoll),
58 * but at least the fallback can be slow, because these are
59 * exceptional cases, right?
60 * d) hmm, you have to tell the kernel the maximum number of watchers
61 * you want to queue when initialising the aio context. but of
62 * course the real limit is magically calculated in the kernel, and
63 * is often higher then we asked for. so we just have to destroy
64 * the aio context and re-create it a bit larger if we hit the limit.
65 * (starts to remind you of epoll? well, it's a bit more deterministic
66 * and less gambling, but still ugly as hell).
67 * e) that's when you find out you can also hit an arbitrary system-wide
68 * limit. or the kernel simply doesn't want to handle your watchers.
69 * what the fuck do we do then? you guessed it, in the middle
70 * of event handling we have to switch to 100% epoll polling. and
71 * that better is as fast as normal epoll polling, so you practically
72 * have to use the normal epoll backend with all its quirks.
73 * f) end result of this train wreck: it inherits all the disadvantages
74 * from epoll, while adding a number on its own. why even bother to use
75 * it? because if conditions are right and your fds are supported and you
76 * don't hit a limit, this backend is actually faster, doesn't gamble with
77 * your fds, batches watchers and events and doesn't require costly state
78 * recreates. well, until it does.
79 * g) all of this makes this backend use almost twice as much code as epoll.
80 * which in turn uses twice as much code as poll. and that#s not counting
81 * the fact that this backend also depends on the epoll backend, making
82 * it three times as much code as poll, or kqueue.
83 * h) bleah. why can't linux just do kqueue. sure kqueue is ugly, but by now
84 * it's clear that whatever linux comes up with is far, far, far worse.
85 */
86
40#include <sys/time.h> /* actually linux/time.h, but we must assume they are compatible */ 87#include <sys/time.h> /* actually linux/time.h, but we must assume they are compatible */
41#include <poll.h> 88#include <poll.h>
42#include <linux/aio_abi.h> 89#include <linux/aio_abi.h>
43 90
44/* we try to fill 4kB pages exactly.
45 * the ring buffer header is 32 bytes, every io event is 32 bytes.
46 * the kernel takes the io event number, doubles it, adds 2, adds the ring buffer.
47 * therefore the calculation below will use "exactly" 4kB for the ring buffer
48 */
49#define EV_LINUXAIO_DEPTH (128 / 2 - 2 - 1) /* max. number of io events per batch */
50
51/*****************************************************************************/ 91/*****************************************************************************/
52/* syscall wrapdadoop */ 92/* syscall wrapdadoop - this section has the raw api/abi definitions */
53 93
54#include <sys/syscall.h> /* no glibc wrappers */ 94#include <sys/syscall.h> /* no glibc wrappers */
55 95
56/* aio_abi.h is not versioned in any way, so we cannot test for its existance */ 96/* aio_abi.h is not versioned in any way, so we cannot test for its existance */
57#define IOCB_CMD_POLL 5 97#define IOCB_CMD_POLL 5
58 98
59/* taken from linux/fs/aio.c */ 99/* taken from linux/fs/aio.c. yup, that's a .c file.
100 * not only is this totally undocumented, not even the source code
101 * can tell you what the future semantics of compat_features and
102 * incompat_features are, or what header_length actually is for.
103 */
60#define AIO_RING_MAGIC 0xa10a10a1 104#define AIO_RING_MAGIC 0xa10a10a1
61#define AIO_RING_INCOMPAT_FEATURES 0 105#define AIO_RING_INCOMPAT_FEATURES 0
62struct aio_ring 106struct aio_ring
63{ 107{
64 unsigned id; /* kernel internal index number */ 108 unsigned id; /* kernel internal index number */
74 struct io_event io_events[0]; 118 struct io_event io_events[0];
75}; 119};
76 120
77inline_size 121inline_size
78int 122int
79ev_io_setup (unsigned nr_events, aio_context_t *ctx_idp) 123evsys_io_setup (unsigned nr_events, aio_context_t *ctx_idp)
80{ 124{
81 return syscall (SYS_io_setup, nr_events, ctx_idp); 125 return syscall (SYS_io_setup, nr_events, ctx_idp);
82} 126}
83 127
84inline_size 128inline_size
85int 129int
86ev_io_destroy (aio_context_t ctx_id) 130evsys_io_destroy (aio_context_t ctx_id)
87{ 131{
88 return syscall (SYS_io_destroy, ctx_id); 132 return syscall (SYS_io_destroy, ctx_id);
89} 133}
90 134
91inline_size 135inline_size
92int 136int
93ev_io_submit (aio_context_t ctx_id, long nr, struct iocb *cbp[]) 137evsys_io_submit (aio_context_t ctx_id, long nr, struct iocb *cbp[])
94{ 138{
95 return syscall (SYS_io_submit, ctx_id, nr, cbp); 139 return syscall (SYS_io_submit, ctx_id, nr, cbp);
96} 140}
97 141
98inline_size 142inline_size
99int 143int
100ev_io_cancel (aio_context_t ctx_id, struct iocb *cbp, struct io_event *result) 144evsys_io_cancel (aio_context_t ctx_id, struct iocb *cbp, struct io_event *result)
101{ 145{
102 return syscall (SYS_io_cancel, ctx_id, cbp, result); 146 return syscall (SYS_io_cancel, ctx_id, cbp, result);
103} 147}
104 148
105inline_size 149inline_size
106int 150int
107ev_io_getevents (aio_context_t ctx_id, long min_nr, long nr, struct io_event *events, struct timespec *timeout) 151evsys_io_getevents (aio_context_t ctx_id, long min_nr, long nr, struct io_event *events, struct timespec *timeout)
108{ 152{
109 return syscall (SYS_io_getevents, ctx_id, min_nr, nr, events, timeout); 153 return syscall (SYS_io_getevents, ctx_id, min_nr, nr, events, timeout);
110} 154}
111 155
112/*****************************************************************************/ 156/*****************************************************************************/
113/* actual backed implementation */ 157/* actual backed implementation */
114 158
159ecb_cold
160static int
161linuxaio_nr_events (EV_P)
162{
163 /* we start with 16 iocbs and incraese from there
164 * that's tiny, but the kernel has a rather low system-wide
165 * limit that can be reached quickly, so let's be parsimonious
166 * with this resource.
167 * Rest assured, the kernel generously rounds up small and big numbers
168 * in different ways (but doesn't seem to charge you for it).
169 * The 15 here is because the kernel usually has a power of two as aio-max-nr,
170 * and this helps to take advantage of that limit.
171 */
172
173 /* we try to fill 4kB pages exactly.
174 * the ring buffer header is 32 bytes, every io event is 32 bytes.
175 * the kernel takes the io requests number, doubles it, adds 2
176 * and adds the ring buffer.
177 * the way we use this is by starting low, and then roughly doubling the
178 * size each time we hit a limit.
179 */
180
181 int requests = 15 << linuxaio_iteration;
182 int one_page = (4096
183 / sizeof (struct io_event) ) / 2; /* how many fit into one page */
184 int first_page = ((4096 - sizeof (struct aio_ring))
185 / sizeof (struct io_event) - 2) / 2; /* how many fit into the first page */
186
187 /* if everything fits into one page, use count exactly */
188 if (requests > first_page)
189 /* otherwise, round down to full pages and add the first page */
190 requests = requests / one_page * one_page + first_page;
191
192 return requests;
193}
194
115/* we use out own wrapper structure in acse we ever want to do something "clever" */ 195/* we use out own wrapper structure in case we ever want to do something "clever" */
116typedef struct aniocb 196typedef struct aniocb
117{ 197{
118 struct iocb io; 198 struct iocb io;
119 /*int inuse;*/ 199 /*int inuse;*/
120} *ANIOCBP; 200} *ANIOCBP;
121 201
122inline_size 202inline_size
123void 203void
124linuxaio_array_needsize_iocbp (ANIOCBP *base, int count) 204linuxaio_array_needsize_iocbp (ANIOCBP *base, int offset, int count)
125{ 205{
126 /* TODO: quite the overhead to allocate every iocb separately, maybe use our own alocator? */
127 while (count--) 206 while (count--)
128 { 207 {
208 /* TODO: quite the overhead to allocate every iocb separately, maybe use our own allocator? */
129 *base = (ANIOCBP)ev_malloc (sizeof (**base)); 209 ANIOCBP iocb = (ANIOCBP)ev_malloc (sizeof (*iocb));
130 /* TODO: full zero initialize required? */ 210
211 /* full zero initialise is probably not required at the moment, but
212 * this is not well documented, so we better do it.
213 */
131 memset (*base, 0, sizeof (**base)); 214 memset (iocb, 0, sizeof (*iocb));
132 /* would be nice to initialize fd/data as well, but array_needsize API doesn't support that */ 215
133 (*base)->io.aio_lio_opcode = IOCB_CMD_POLL; 216 iocb->io.aio_lio_opcode = IOCB_CMD_POLL;
134 ++base; 217 iocb->io.aio_data = offset;
218 iocb->io.aio_fildes = offset;
219
220 base [offset++] = iocb;
135 } 221 }
136} 222}
137 223
138ecb_cold 224ecb_cold
139static void 225static void
147 233
148static void 234static void
149linuxaio_modify (EV_P_ int fd, int oev, int nev) 235linuxaio_modify (EV_P_ int fd, int oev, int nev)
150{ 236{
151 array_needsize (ANIOCBP, linuxaio_iocbps, linuxaio_iocbpmax, fd + 1, linuxaio_array_needsize_iocbp); 237 array_needsize (ANIOCBP, linuxaio_iocbps, linuxaio_iocbpmax, fd + 1, linuxaio_array_needsize_iocbp);
152 struct aniocb *iocb = linuxaio_iocbps [fd]; 238 ANIOCBP iocb = linuxaio_iocbps [fd];
239
240 if (iocb->io.aio_reqprio < 0)
241 {
242 /* we handed this fd over to epoll, so undo this first */
243 /* we do it manually because the optimisations on epoll_modfy won't do us any good */
244 epoll_ctl (backend_fd, EPOLL_CTL_DEL, fd, 0);
245 iocb->io.aio_reqprio = 0;
246 }
153 247
154 if (iocb->io.aio_buf) 248 if (iocb->io.aio_buf)
155 ev_io_cancel (linuxaio_ctx, &iocb->io, (struct io_event *)0); /* always returns an error relevant kernels */ 249 /* io_cancel always returns some error on relevant kernels, but works */
250 evsys_io_cancel (linuxaio_ctx, &iocb->io, (struct io_event *)0);
156 251
157 if (nev) 252 if (nev)
158 { 253 {
159 iocb->io.aio_data = fd;
160 iocb->io.aio_fildes = fd;
161 iocb->io.aio_buf = 254 iocb->io.aio_buf =
162 (nev & EV_READ ? POLLIN : 0) 255 (nev & EV_READ ? POLLIN : 0)
163 | (nev & EV_WRITE ? POLLOUT : 0); 256 | (nev & EV_WRITE ? POLLOUT : 0);
164 257
165 /* queue iocb up for io_submit */ 258 /* queue iocb up for io_submit */
166 /* this assumes we only ever get one call per fd per loop iteration */ 259 /* this assumes we only ever get one call per fd per loop iteration */
169 linuxaio_submits [linuxaio_submitcnt - 1] = &iocb->io; 262 linuxaio_submits [linuxaio_submitcnt - 1] = &iocb->io;
170 } 263 }
171} 264}
172 265
173static void 266static void
267linuxaio_epoll_cb (EV_P_ struct ev_io *w, int revents)
268{
269 epoll_poll (EV_A_ 0);
270}
271
272static void
273linuxaio_fd_rearm (EV_P_ int fd)
274{
275 anfds [fd].events = 0;
276 linuxaio_iocbps [fd]->io.aio_buf = 0;
277 fd_change (EV_A_ fd, EV_ANFD_REIFY);
278}
279
280static void
174linuxaio_parse_events (EV_P_ struct io_event *ev, int nr) 281linuxaio_parse_events (EV_P_ struct io_event *ev, int nr)
175{ 282{
176 while (nr) 283 while (nr)
177 { 284 {
178 int fd = ev->data; 285 int fd = ev->data;
179 int res = ev->res; 286 int res = ev->res;
180 287
181 assert (("libev: iocb fd must be in-bounds", fd >= 0 && fd < anfdmax)); 288 assert (("libev: iocb fd must be in-bounds", fd >= 0 && fd < anfdmax));
182 289
183 /* linux aio is oneshot: rearm fd */
184 linuxaio_iocbps [fd]->io.aio_buf = 0;
185 anfds [fd].events = 0;
186 fd_change (EV_A_ fd, 0);
187
188 /* feed events, we do not expect or handle POLLNVAL */ 290 /* feed events, we do not expect or handle POLLNVAL */
189 if (ecb_expect_false (res & POLLNVAL))
190 fd_kill (EV_A_ fd);
191 else
192 fd_event ( 291 fd_event (
193 EV_A_ 292 EV_A_
194 fd, 293 fd,
195 (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) 294 (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0)
196 | (res & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) 295 | (res & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0)
197 ); 296 );
297
298 /* linux aio is oneshot: rearm fd. TODO: this does more work than needed */
299 linuxaio_fd_rearm (EV_A_ fd);
198 300
199 --nr; 301 --nr;
200 ++ev; 302 ++ev;
201 } 303 }
202} 304}
203 305
204/* get any events from ringbuffer, return true if any were handled */ 306/* get any events from ring buffer, return true if any were handled */
205static int 307static int
206linuxaio_get_events_from_ring (EV_P) 308linuxaio_get_events_from_ring (EV_P)
207{ 309{
208 struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx; 310 struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx;
209 311
210 unsigned head = ring->head; 312 /* the kernel reads and writes both of these variables, */
313 /* as a C extension, we assume that volatile use here */
314 /* both makes reads atomic and once-only */
315 unsigned head = *(volatile unsigned *)&ring->head;
211 unsigned tail = *(volatile unsigned *)&ring->tail; 316 unsigned tail = *(volatile unsigned *)&ring->tail;
212 317
213 if (head == tail) 318 if (head == tail)
214 return 0; 319 return 0;
215 320
216 /* bail out if the ring buffer doesn't match the expected layout */ 321 /* bail out if the ring buffer doesn't match the expected layout */
217 if (ecb_expect_false (ring->magic != AIO_RING_MAGIC) 322 if (expect_false (ring->magic != AIO_RING_MAGIC)
218 || ring->incompat_features != AIO_RING_INCOMPAT_FEATURES 323 || ring->incompat_features != AIO_RING_INCOMPAT_FEATURES
219 || ring->header_length != sizeof (struct aio_ring)) /* TODO: or use it to find io_event[0]? */ 324 || ring->header_length != sizeof (struct aio_ring)) /* TODO: or use it to find io_event[0]? */
220 return 0; 325 return 0;
221 326
327 /* make sure the events up to tail are visible */
222 ECB_MEMORY_FENCE_ACQUIRE; 328 ECB_MEMORY_FENCE_ACQUIRE;
223 329
224 /* parse all available events, but only once, to avoid starvation */ 330 /* parse all available events, but only once, to avoid starvation */
225 if (tail > head) /* normal case around */ 331 if (tail > head) /* normal case around */
226 linuxaio_parse_events (EV_A_ ring->io_events + head, tail - head); 332 linuxaio_parse_events (EV_A_ ring->io_events + head, tail - head);
228 { 334 {
229 linuxaio_parse_events (EV_A_ ring->io_events + head, ring->nr - head); 335 linuxaio_parse_events (EV_A_ ring->io_events + head, ring->nr - head);
230 linuxaio_parse_events (EV_A_ ring->io_events, tail); 336 linuxaio_parse_events (EV_A_ ring->io_events, tail);
231 } 337 }
232 338
233 ring->head = tail; 339 ECB_MEMORY_FENCE_RELAXED;
340 /* as an extension to C, we hope that the volatile will make this atomic and once-only */
341 *(volatile unsigned *)&ring->head = tail;
342 /* make sure kernel can see our new head value - probably not required */
343 ECB_MEMORY_FENCE_RELEASE;
234 344
235 return 1; 345 return 1;
236} 346}
237 347
238/* read at least one event from kernel, or timeout */ 348/* read at least one event from kernel, or timeout */
239inline_size 349inline_size
240void 350void
241linuxaio_get_events (EV_P_ ev_tstamp timeout) 351linuxaio_get_events (EV_P_ ev_tstamp timeout)
242{ 352{
243 struct timespec ts; 353 struct timespec ts;
244 struct io_event ioev; 354 struct io_event ioev[1];
245 int res; 355 int res;
246 356
247 if (linuxaio_get_events_from_ring (EV_A)) 357 if (linuxaio_get_events_from_ring (EV_A))
248 return; 358 return;
249 359
250 /* no events, so wait for at least one, then poll ring buffer again */ 360 /* no events, so wait for at least one, then poll ring buffer again */
251 /* this degrades to one event per loop iteration */ 361 /* this degrades to one event per loop iteration */
252 /* if the ring buffer changes layout, but so be it */ 362 /* if the ring buffer changes layout, but so be it */
253 363
364 EV_RELEASE_CB;
365
254 ts.tv_sec = (long)timeout; 366 ts.tv_sec = (long)timeout;
255 ts.tv_nsec = (long)((timeout - ts.tv_sec) * 1e9); 367 ts.tv_nsec = (long)((timeout - ts.tv_sec) * 1e9);
256 368
257 res = ev_io_getevents (linuxaio_ctx, 1, 1, &ioev, &ts); 369 res = evsys_io_getevents (linuxaio_ctx, 1, sizeof (ioev) / sizeof (ioev [0]), ioev, &ts);
370
371 EV_ACQUIRE_CB;
258 372
259 if (res < 0) 373 if (res < 0)
374 if (errno == EINTR)
375 /* ignored */;
376 else
260 ev_syserr ("(libev) linuxaio io_getevents"); 377 ev_syserr ("(libev) linuxaio io_getevents");
261 else if (res) 378 else if (res)
262 { 379 {
263 /* at least one event received, handle it and any remaining ones in the ring buffer */ 380 /* at least one event received, handle it and any remaining ones in the ring buffer */
264 linuxaio_parse_events (EV_A_ &ioev, 1); 381 linuxaio_parse_events (EV_A_ ioev, res);
265 linuxaio_get_events_from_ring (EV_A); 382 linuxaio_get_events_from_ring (EV_A);
266 } 383 }
384}
385
386static int
387linuxaio_io_setup (EV_P)
388{
389 linuxaio_ctx = 0;
390 return evsys_io_setup (linuxaio_nr_events (EV_A), &linuxaio_ctx);
267} 391}
268 392
269static void 393static void
270linuxaio_poll (EV_P_ ev_tstamp timeout) 394linuxaio_poll (EV_P_ ev_tstamp timeout)
271{ 395{
273 397
274 /* first phase: submit new iocbs */ 398 /* first phase: submit new iocbs */
275 399
276 /* io_submit might return less than the requested number of iocbs */ 400 /* io_submit might return less than the requested number of iocbs */
277 /* this is, afaics, only because of errors, but we go by the book and use a loop, */ 401 /* this is, afaics, only because of errors, but we go by the book and use a loop, */
278 /* which allows us to pinpoint the errornous iocb */ 402 /* which allows us to pinpoint the erroneous iocb */
279 for (submitted = 0; submitted < linuxaio_submitcnt; ) 403 for (submitted = 0; submitted < linuxaio_submitcnt; )
280 { 404 {
281 int res = ev_io_submit (linuxaio_ctx, linuxaio_submitcnt - submitted, linuxaio_submits + submitted); 405 int res = evsys_io_submit (linuxaio_ctx, linuxaio_submitcnt - submitted, linuxaio_submits + submitted);
282 406
283 if (ecb_expect_false (res < 0)) 407 if (expect_false (res < 0))
284 if (errno == EAGAIN) 408 if (errno == EINVAL)
285 { 409 {
286 /* This happens when the ring buffer is full, at least. I assume this means 410 /* This happens for unsupported fds, officially, but in my testing,
287 * that the event was queued synchronously during io_submit, and thus 411 * also randomly happens for supported fds. We fall back to good old
288 * the buffer overflowd. 412 * poll() here, under the assumption that this is a very rare case.
289 * In this case, we just try next loop iteration. 413 * See https://lore.kernel.org/patchwork/patch/1047453/ to see
290 * This should not result in a few fds taking priority, as the interface 414 * discussion about such a case (ttys) where polling for POLLIN
291 * is one-shot, and we submit iocb's in a round-robin fashion. 415 * fails but POLLIN|POLLOUT works.
292 */ 416 */
293 memmove (linuxaio_submits, linuxaio_submits + submitted, (linuxaio_submitcnt - submitted) * sizeof (*linuxaio_submits)); 417 struct iocb *iocb = linuxaio_submits [submitted];
418 epoll_modify (EV_A_ iocb->aio_fildes, 0, anfds [iocb->aio_fildes].events);
419 iocb->aio_reqprio = -1; /* mark iocb as epoll */
420
421 res = 1; /* skip this iocb - another iocb, another chance */
422 }
423 else if (errno == EAGAIN)
424 {
425 /* This happens when the ring buffer is full, or some other shit we
426 * don't know and isn't documented. Most likely because we have too
427 * many requests and linux aio can't be assed to handle them.
428 * In this case, we try to allocate a larger ring buffer, freeing
429 * ours first. This might fail, in which case we have to fall back to 100%
430 * epoll.
431 * God, how I hate linux not getting its act together. Ever.
432 */
433 evsys_io_destroy (linuxaio_ctx);
294 linuxaio_submitcnt -= submitted; 434 linuxaio_submitcnt = 0;
435
436 /* rearm all fds with active iocbs */
437 {
438 int fd;
439 for (fd = 0; fd < linuxaio_iocbpmax; ++fd)
440 if (linuxaio_iocbps [fd]->io.aio_buf)
441 linuxaio_fd_rearm (EV_A_ fd);
442 }
443
444 ++linuxaio_iteration;
445 if (linuxaio_io_setup (EV_A) < 0)
446 {
447 /* to bad, we can't get a new aio context, go 100% epoll */
448 linuxaio_free_iocbp (EV_A);
449 ev_io_stop (EV_A_ &linuxaio_epoll_w);
450 ev_ref (EV_A);
451 linuxaio_ctx = 0;
452 backend_modify = epoll_modify;
453 backend_poll = epoll_poll;
454 }
455
295 timeout = 0; 456 timeout = 0;
457 /* it's easiest to handle this mess in another iteration */
296 break; 458 return;
459 }
460 else if (errno == EBADF)
461 {
462 fd_kill (EV_A_ linuxaio_submits [submitted]->aio_fildes);
463
464 res = 1; /* skip this iocb */
297 } 465 }
298 else 466 else
299 ev_syserr ("(libev) linuxaio io_submit"); 467 ev_syserr ("(libev) linuxaio io_submit");
300 468
301 submitted += res; 469 submitted += res;
312int 480int
313linuxaio_init (EV_P_ int flags) 481linuxaio_init (EV_P_ int flags)
314{ 482{
315 /* would be great to have a nice test for IOCB_CMD_POLL instead */ 483 /* would be great to have a nice test for IOCB_CMD_POLL instead */
316 /* also: test some semi-common fd types, such as files and ttys in recommended_backends */ 484 /* also: test some semi-common fd types, such as files and ttys in recommended_backends */
317 if (ev_linux_version () < 0x041200) /* 4.18 introduced IOCB_CMD_POLL */ 485 /* 4.18 introduced IOCB_CMD_POLL, 4.19 made epoll work, and we need that */
486 if (ev_linux_version () < 0x041300)
318 return 0; 487 return 0;
319 488
320 linuxaio_ctx = 0; 489 if (!epoll_init (EV_A_ 0))
321 if (ev_io_setup (EV_LINUXAIO_DEPTH, &linuxaio_ctx) < 0)
322 return 0; 490 return 0;
491
492 linuxaio_iteration = 0;
493
494 if (linuxaio_io_setup (EV_A) < 0)
495 {
496 epoll_destroy (EV_A);
497 return 0;
498 }
499
500 ev_io_init (EV_A_ &linuxaio_epoll_w, linuxaio_epoll_cb, backend_fd, EV_READ);
501 ev_set_priority (&linuxaio_epoll_w, EV_MAXPRI);
502 ev_io_start (EV_A_ &linuxaio_epoll_w);
503 ev_unref (EV_A); /* watcher should not keep loop alive */
323 504
324 backend_modify = linuxaio_modify; 505 backend_modify = linuxaio_modify;
325 backend_poll = linuxaio_poll; 506 backend_poll = linuxaio_poll;
326 507
327 linuxaio_iocbpmax = 0; 508 linuxaio_iocbpmax = 0;
336 517
337inline_size 518inline_size
338void 519void
339linuxaio_destroy (EV_P) 520linuxaio_destroy (EV_P)
340{ 521{
522 epoll_destroy (EV_A);
341 linuxaio_free_iocbp (EV_A); 523 linuxaio_free_iocbp (EV_A);
342 ev_io_destroy (linuxaio_ctx); 524 evsys_io_destroy (linuxaio_ctx);
343} 525}
344 526
345inline_size 527inline_size
346void 528void
347linuxaio_fork (EV_P) 529linuxaio_fork (EV_P)
348{ 530{
349 /* this frees all iocbs, which is very heavy-handed */ 531 /* this frees all iocbs, which is very heavy-handed */
350 linuxaio_destroy (EV_A); 532 linuxaio_destroy (EV_A);
351 linuxaio_submitcnt = 0; /* all pointers were invalidated */ 533 linuxaio_submitcnt = 0; /* all pointers were invalidated */
352 534
353 linuxaio_ctx = 0; 535 linuxaio_iteration = 0; /* we start over in the child */
354 while (ev_io_setup (EV_LINUXAIO_DEPTH, &linuxaio_ctx) < 0) 536
537 while (linuxaio_io_setup (EV_A) < 0)
355 ev_syserr ("(libev) linuxaio io_setup"); 538 ev_syserr ("(libev) linuxaio io_setup");
356 539
540 epoll_fork (EV_A);
541
542 ev_io_stop (EV_A_ &linuxaio_epoll_w);
543 ev_io_set (EV_A_ &linuxaio_epoll_w, backend_fd, EV_READ);
544 ev_io_start (EV_A_ &linuxaio_epoll_w);
545
546 /* epoll_fork already did this. hopefully */
357 fd_rearm_all (EV_A); 547 /*fd_rearm_all (EV_A);*/
358} 548}
359 549

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines