… | |
… | |
100 | * not only is this totally undocumented, not even the source code |
100 | * not only is this totally undocumented, not even the source code |
101 | * can tell you what the future semantics of compat_features and |
101 | * can tell you what the future semantics of compat_features and |
102 | * incompat_features are, or what header_length actually is for. |
102 | * incompat_features are, or what header_length actually is for. |
103 | */ |
103 | */ |
104 | #define AIO_RING_MAGIC 0xa10a10a1 |
104 | #define AIO_RING_MAGIC 0xa10a10a1 |
105 | #define AIO_RING_INCOMPAT_FEATURES 0 |
105 | #define EV_AIO_RING_INCOMPAT_FEATURES 0 |
106 | struct aio_ring |
106 | struct aio_ring |
107 | { |
107 | { |
108 | unsigned id; /* kernel internal index number */ |
108 | unsigned id; /* kernel internal index number */ |
109 | unsigned nr; /* number of io_events */ |
109 | unsigned nr; /* number of io_events */ |
110 | unsigned head; /* Written to by userland or by kernel. */ |
110 | unsigned head; /* Written to by userland or by kernel. */ |
… | |
… | |
116 | unsigned header_length; /* size of aio_ring */ |
116 | unsigned header_length; /* size of aio_ring */ |
117 | |
117 | |
118 | struct io_event io_events[0]; |
118 | struct io_event io_events[0]; |
119 | }; |
119 | }; |
120 | |
120 | |
|
|
121 | /* |
|
|
122 | * define some syscall wrappers for common architectures |
|
|
123 | * this is mostly for nice looks during debugging, not performance. |
|
|
124 | * our syscalls return < 0, not == -1, on error. which is good |
|
|
125 | * enough for linux aio. |
|
|
126 | * TODO: arm is also common nowadays, maybe even mips and x86 |
|
|
127 | * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove... |
|
|
128 | */ |
|
|
129 | #if __GNUC__ && __linux && ECB_AMD64 && !defined __OPTIMIZE_SIZE__ |
|
|
130 | /* the costly errno access probably kills this for size optimisation */ |
|
|
131 | |
|
|
132 | #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5) \ |
|
|
133 | ({ \ |
|
|
134 | long res; \ |
|
|
135 | register unsigned long r5 __asm__ ("r8" ); \ |
|
|
136 | register unsigned long r4 __asm__ ("r10"); \ |
|
|
137 | register unsigned long r3 __asm__ ("rdx"); \ |
|
|
138 | register unsigned long r2 __asm__ ("rsi"); \ |
|
|
139 | register unsigned long r1 __asm__ ("rdi"); \ |
|
|
140 | if (narg >= 5) r5 = (unsigned long)(arg5); \ |
|
|
141 | if (narg >= 4) r4 = (unsigned long)(arg4); \ |
|
|
142 | if (narg >= 3) r3 = (unsigned long)(arg3); \ |
|
|
143 | if (narg >= 2) r2 = (unsigned long)(arg2); \ |
|
|
144 | if (narg >= 1) r1 = (unsigned long)(arg1); \ |
|
|
145 | __asm__ __volatile__ ( \ |
|
|
146 | "syscall\n\t" \ |
|
|
147 | : "=a" (res) \ |
|
|
148 | : "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \ |
|
|
149 | : "cc", "r11", "cx", "memory"); \ |
|
|
150 | errno = -res; \ |
|
|
151 | res; \ |
|
|
152 | }) |
|
|
153 | |
|
|
154 | #endif |
|
|
155 | |
|
|
156 | #ifdef ev_syscall |
|
|
157 | #define ev_syscall0(nr) ev_syscall (nr, 0, 0, 0, 0, 0, 0 |
|
|
158 | #define ev_syscall1(nr,arg1) ev_syscall (nr, 1, arg1, 0, 0, 0, 0) |
|
|
159 | #define ev_syscall2(nr,arg1,arg2) ev_syscall (nr, 2, arg1, arg2, 0, 0, 0) |
|
|
160 | #define ev_syscall3(nr,arg1,arg2,arg3) ev_syscall (nr, 3, arg1, arg2, arg3, 0, 0) |
|
|
161 | #define ev_syscall4(nr,arg1,arg2,arg3,arg4) ev_syscall (nr, 3, arg1, arg2, arg3, arg4, 0) |
|
|
162 | #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5) |
|
|
163 | #else |
|
|
164 | #define ev_syscall0(nr) syscall (nr) |
|
|
165 | #define ev_syscall1(nr,arg1) syscall (nr, arg1) |
|
|
166 | #define ev_syscall2(nr,arg1,arg2) syscall (nr, arg1, arg2) |
|
|
167 | #define ev_syscall3(nr,arg1,arg2,arg3) syscall (nr, arg1, arg2, arg3) |
|
|
168 | #define ev_syscall4(nr,arg1,arg2,arg3,arg4) syscall (nr, arg1, arg2, arg3, arg4) |
|
|
169 | #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) syscall (nr, arg1, arg2, arg3, arg4, arg5) |
|
|
170 | #endif |
|
|
171 | |
121 | inline_size |
172 | inline_size |
122 | int |
173 | int |
123 | evsys_io_setup (unsigned nr_events, aio_context_t *ctx_idp) |
174 | evsys_io_setup (unsigned nr_events, aio_context_t *ctx_idp) |
124 | { |
175 | { |
125 | return syscall (SYS_io_setup, nr_events, ctx_idp); |
176 | return ev_syscall2 (SYS_io_setup, nr_events, ctx_idp); |
126 | } |
177 | } |
127 | |
178 | |
128 | inline_size |
179 | inline_size |
129 | int |
180 | int |
130 | evsys_io_destroy (aio_context_t ctx_id) |
181 | evsys_io_destroy (aio_context_t ctx_id) |
131 | { |
182 | { |
132 | return syscall (SYS_io_destroy, ctx_id); |
183 | return ev_syscall1 (SYS_io_destroy, ctx_id); |
133 | } |
184 | } |
134 | |
185 | |
135 | inline_size |
186 | inline_size |
136 | int |
187 | int |
137 | evsys_io_submit (aio_context_t ctx_id, long nr, struct iocb *cbp[]) |
188 | evsys_io_submit (aio_context_t ctx_id, long nr, struct iocb *cbp[]) |
138 | { |
189 | { |
139 | return syscall (SYS_io_submit, ctx_id, nr, cbp); |
190 | return ev_syscall3 (SYS_io_submit, ctx_id, nr, cbp); |
140 | } |
191 | } |
141 | |
192 | |
142 | inline_size |
193 | inline_size |
143 | int |
194 | int |
144 | evsys_io_cancel (aio_context_t ctx_id, struct iocb *cbp, struct io_event *result) |
195 | evsys_io_cancel (aio_context_t ctx_id, struct iocb *cbp, struct io_event *result) |
145 | { |
196 | { |
146 | return syscall (SYS_io_cancel, ctx_id, cbp, result); |
197 | return ev_syscall3 (SYS_io_cancel, ctx_id, cbp, result); |
147 | } |
198 | } |
148 | |
199 | |
149 | inline_size |
200 | inline_size |
150 | int |
201 | int |
151 | evsys_io_getevents (aio_context_t ctx_id, long min_nr, long nr, struct io_event *events, struct timespec *timeout) |
202 | evsys_io_getevents (aio_context_t ctx_id, long min_nr, long nr, struct io_event *events, struct timespec *timeout) |
152 | { |
203 | { |
153 | return syscall (SYS_io_getevents, ctx_id, min_nr, nr, events, timeout); |
204 | return ev_syscall5 (SYS_io_getevents, ctx_id, min_nr, nr, events, timeout); |
154 | } |
205 | } |
155 | |
206 | |
156 | /*****************************************************************************/ |
207 | /*****************************************************************************/ |
157 | /* actual backed implementation */ |
208 | /* actual backed implementation */ |
158 | |
209 | |
… | |
… | |
235 | linuxaio_modify (EV_P_ int fd, int oev, int nev) |
286 | linuxaio_modify (EV_P_ int fd, int oev, int nev) |
236 | { |
287 | { |
237 | array_needsize (ANIOCBP, linuxaio_iocbps, linuxaio_iocbpmax, fd + 1, linuxaio_array_needsize_iocbp); |
288 | array_needsize (ANIOCBP, linuxaio_iocbps, linuxaio_iocbpmax, fd + 1, linuxaio_array_needsize_iocbp); |
238 | ANIOCBP iocb = linuxaio_iocbps [fd]; |
289 | ANIOCBP iocb = linuxaio_iocbps [fd]; |
239 | |
290 | |
240 | if (iocb->io.aio_reqprio < 0) |
291 | if (ecb_expect_false (iocb->io.aio_reqprio < 0)) |
241 | { |
292 | { |
242 | /* we handed this fd over to epoll, so undo this first */ |
293 | /* we handed this fd over to epoll, so undo this first */ |
243 | /* we do it manually because the optimisations on epoll_modfy won't do us any good */ |
294 | /* we do it manually because the optimisations on epoll_modify won't do us any good */ |
244 | epoll_ctl (backend_fd, EPOLL_CTL_DEL, fd, 0); |
295 | epoll_ctl (backend_fd, EPOLL_CTL_DEL, fd, 0); |
|
|
296 | anfds [fd].emask = 0; |
245 | iocb->io.aio_reqprio = 0; |
297 | iocb->io.aio_reqprio = 0; |
246 | } |
298 | } |
247 | |
299 | |
|
|
300 | if (ecb_expect_false (iocb->io.aio_buf)) |
|
|
301 | { |
|
|
302 | /* iocb active, so cancel it first before resubmit */ |
|
|
303 | for (;;) |
|
|
304 | { |
|
|
305 | /* on all relevant kernels, io_cancel fails with EINPROGRESS on "success" */ |
|
|
306 | if (ecb_expect_false (evsys_io_cancel (linuxaio_ctx, &iocb->io, (struct io_event *)0) == 0)) |
|
|
307 | break; |
|
|
308 | |
|
|
309 | if (ecb_expect_true (errno == EINPROGRESS)) |
|
|
310 | break; |
|
|
311 | |
|
|
312 | /* the EINPROGRESS test is for nicer error message. clumsy. */ |
|
|
313 | assert (("libev: linuxaio unexpected io_cancel failed", errno != EINPROGRESS && errno != EINTR)); |
|
|
314 | } |
|
|
315 | } |
|
|
316 | |
248 | if (iocb->io.aio_buf) |
317 | iocb->io.aio_buf = |
249 | /* io_cancel always returns some error on relevant kernels, but works */ |
318 | (nev & EV_READ ? POLLIN : 0) |
250 | evsys_io_cancel (linuxaio_ctx, &iocb->io, (struct io_event *)0); |
319 | | (nev & EV_WRITE ? POLLOUT : 0); |
251 | |
320 | |
252 | if (nev) |
321 | if (nev) |
253 | { |
322 | { |
254 | iocb->io.aio_buf = |
|
|
255 | (nev & EV_READ ? POLLIN : 0) |
|
|
256 | | (nev & EV_WRITE ? POLLOUT : 0); |
|
|
257 | |
|
|
258 | /* queue iocb up for io_submit */ |
323 | /* queue iocb up for io_submit */ |
259 | /* this assumes we only ever get one call per fd per loop iteration */ |
324 | /* this assumes we only ever get one call per fd per loop iteration */ |
260 | ++linuxaio_submitcnt; |
325 | ++linuxaio_submitcnt; |
261 | array_needsize (struct iocb *, linuxaio_submits, linuxaio_submitmax, linuxaio_submitcnt, array_needsize_noinit); |
326 | array_needsize (struct iocb *, linuxaio_submits, linuxaio_submitmax, linuxaio_submitcnt, array_needsize_noinit); |
262 | linuxaio_submits [linuxaio_submitcnt - 1] = &iocb->io; |
327 | linuxaio_submits [linuxaio_submitcnt - 1] = &iocb->io; |
… | |
… | |
267 | linuxaio_epoll_cb (EV_P_ struct ev_io *w, int revents) |
332 | linuxaio_epoll_cb (EV_P_ struct ev_io *w, int revents) |
268 | { |
333 | { |
269 | epoll_poll (EV_A_ 0); |
334 | epoll_poll (EV_A_ 0); |
270 | } |
335 | } |
271 | |
336 | |
272 | static void |
337 | inline_speed |
|
|
338 | void |
273 | linuxaio_fd_rearm (EV_P_ int fd) |
339 | linuxaio_fd_rearm (EV_P_ int fd) |
274 | { |
340 | { |
275 | anfds [fd].events = 0; |
341 | anfds [fd].events = 0; |
276 | linuxaio_iocbps [fd]->io.aio_buf = 0; |
342 | linuxaio_iocbps [fd]->io.aio_buf = 0; |
277 | fd_change (EV_A_ fd, EV_ANFD_REIFY); |
343 | fd_change (EV_A_ fd, EV_ANFD_REIFY); |
… | |
… | |
293 | fd, |
359 | fd, |
294 | (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) |
360 | (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) |
295 | | (res & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) |
361 | | (res & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) |
296 | ); |
362 | ); |
297 | |
363 | |
298 | /* linux aio is oneshot: rearm fd. TODO: this does more work than needed */ |
364 | /* linux aio is oneshot: rearm fd. TODO: this does more work than strictly needed */ |
299 | linuxaio_fd_rearm (EV_A_ fd); |
365 | linuxaio_fd_rearm (EV_A_ fd); |
300 | |
366 | |
301 | --nr; |
367 | --nr; |
302 | ++ev; |
368 | ++ev; |
303 | } |
369 | } |
… | |
… | |
316 | unsigned tail = *(volatile unsigned *)&ring->tail; |
382 | unsigned tail = *(volatile unsigned *)&ring->tail; |
317 | |
383 | |
318 | if (head == tail) |
384 | if (head == tail) |
319 | return 0; |
385 | return 0; |
320 | |
386 | |
321 | /* bail out if the ring buffer doesn't match the expected layout */ |
|
|
322 | if (expect_false (ring->magic != AIO_RING_MAGIC) |
|
|
323 | || ring->incompat_features != AIO_RING_INCOMPAT_FEATURES |
|
|
324 | || ring->header_length != sizeof (struct aio_ring)) /* TODO: or use it to find io_event[0]? */ |
|
|
325 | return 0; |
|
|
326 | |
|
|
327 | /* make sure the events up to tail are visible */ |
387 | /* make sure the events up to tail are visible */ |
328 | ECB_MEMORY_FENCE_ACQUIRE; |
388 | ECB_MEMORY_FENCE_ACQUIRE; |
329 | |
389 | |
330 | /* parse all available events, but only once, to avoid starvation */ |
390 | /* parse all available events, but only once, to avoid starvation */ |
331 | if (tail > head) /* normal case around */ |
391 | if (tail > head) /* normal case around */ |
… | |
… | |
337 | } |
397 | } |
338 | |
398 | |
339 | ECB_MEMORY_FENCE_RELEASE; |
399 | ECB_MEMORY_FENCE_RELEASE; |
340 | /* as an extension to C, we hope that the volatile will make this atomic and once-only */ |
400 | /* as an extension to C, we hope that the volatile will make this atomic and once-only */ |
341 | *(volatile unsigned *)&ring->head = tail; |
401 | *(volatile unsigned *)&ring->head = tail; |
342 | /* make sure kernel can see our new head value - probably not required */ |
|
|
343 | |
402 | |
344 | return 1; |
403 | return 1; |
|
|
404 | } |
|
|
405 | |
|
|
406 | inline_size |
|
|
407 | int |
|
|
408 | linuxaio_ringbuf_valid (EV_P) |
|
|
409 | { |
|
|
410 | struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx; |
|
|
411 | |
|
|
412 | return ecb_expect_true (ring->magic == AIO_RING_MAGIC) |
|
|
413 | && ring->incompat_features == EV_AIO_RING_INCOMPAT_FEATURES |
|
|
414 | && ring->header_length == sizeof (struct aio_ring); /* TODO: or use it to find io_event[0]? */ |
345 | } |
415 | } |
346 | |
416 | |
347 | /* read at least one event from kernel, or timeout */ |
417 | /* read at least one event from kernel, or timeout */ |
348 | inline_size |
418 | inline_size |
349 | void |
419 | void |
350 | linuxaio_get_events (EV_P_ ev_tstamp timeout) |
420 | linuxaio_get_events (EV_P_ ev_tstamp timeout) |
351 | { |
421 | { |
352 | struct timespec ts; |
422 | struct timespec ts; |
353 | struct io_event ioev[1]; |
423 | struct io_event ioev[8]; /* 256 octet stack space */ |
354 | int res; |
424 | int want = 1; /* how many events to request */ |
|
|
425 | int ringbuf_valid = linuxaio_ringbuf_valid (EV_A); |
355 | |
426 | |
|
|
427 | if (ecb_expect_true (ringbuf_valid)) |
|
|
428 | { |
|
|
429 | /* if the ring buffer has any events, we don't wait or call the kernel at all */ |
356 | if (linuxaio_get_events_from_ring (EV_A)) |
430 | if (linuxaio_get_events_from_ring (EV_A)) |
357 | return; |
431 | return; |
358 | |
432 | |
359 | /* no events, so wait for at least one, then poll ring buffer again */ |
433 | /* if the ring buffer is empty, and we don't have a timeout, then don't call the kernel */ |
360 | /* this degrades to one event per loop iteration */ |
434 | if (!timeout) |
361 | /* if the ring buffer changes layout, but so be it */ |
435 | return; |
|
|
436 | } |
|
|
437 | else |
|
|
438 | /* no ringbuffer, request slightly larger batch */ |
|
|
439 | want = sizeof (ioev) / sizeof (ioev [0]); |
362 | |
440 | |
|
|
441 | /* no events, so wait for some |
|
|
442 | * for fairness reasons, we do this in a loop, to fetch all events |
|
|
443 | */ |
|
|
444 | for (;;) |
|
|
445 | { |
|
|
446 | int res; |
|
|
447 | |
363 | EV_RELEASE_CB; |
448 | EV_RELEASE_CB; |
364 | |
449 | |
365 | ts.tv_sec = (long)timeout; |
450 | ts.tv_sec = (long)timeout; |
366 | ts.tv_nsec = (long)((timeout - ts.tv_sec) * 1e9); |
451 | ts.tv_nsec = (long)((timeout - ts.tv_sec) * 1e9); |
367 | |
452 | |
368 | res = evsys_io_getevents (linuxaio_ctx, 1, sizeof (ioev) / sizeof (ioev [0]), ioev, &ts); |
453 | res = evsys_io_getevents (linuxaio_ctx, 1, want, ioev, &ts); |
369 | |
454 | |
370 | EV_ACQUIRE_CB; |
455 | EV_ACQUIRE_CB; |
371 | |
456 | |
372 | if (res < 0) |
457 | if (res < 0) |
373 | if (errno == EINTR) |
458 | if (errno == EINTR) |
374 | /* ignored */; |
459 | /* ignored, retry */; |
375 | else |
460 | else |
376 | ev_syserr ("(libev) linuxaio io_getevents"); |
461 | ev_syserr ("(libev) linuxaio io_getevents"); |
377 | else if (res) |
462 | else if (res) |
378 | { |
463 | { |
379 | /* at least one event received, handle it and any remaining ones in the ring buffer */ |
464 | /* at least one event available, handle them */ |
380 | linuxaio_parse_events (EV_A_ ioev, res); |
465 | linuxaio_parse_events (EV_A_ ioev, res); |
|
|
466 | |
|
|
467 | if (ecb_expect_true (ringbuf_valid)) |
|
|
468 | { |
|
|
469 | /* if we have a ring buffer, handle any remaining events in it */ |
381 | linuxaio_get_events_from_ring (EV_A); |
470 | linuxaio_get_events_from_ring (EV_A); |
382 | } |
|
|
383 | } |
|
|
384 | |
471 | |
385 | static int |
472 | /* at this point, we should have handled all outstanding events */ |
|
|
473 | break; |
|
|
474 | } |
|
|
475 | else if (res < want) |
|
|
476 | /* otherwise, if there were fewere events than we wanted, we assume there are no more */ |
|
|
477 | break; |
|
|
478 | } |
|
|
479 | else |
|
|
480 | break; /* no events from the kernel, we are done */ |
|
|
481 | |
|
|
482 | timeout = 0; /* only wait in the first iteration */ |
|
|
483 | } |
|
|
484 | } |
|
|
485 | |
|
|
486 | inline_size |
|
|
487 | int |
386 | linuxaio_io_setup (EV_P) |
488 | linuxaio_io_setup (EV_P) |
387 | { |
489 | { |
388 | linuxaio_ctx = 0; |
490 | linuxaio_ctx = 0; |
389 | return evsys_io_setup (linuxaio_nr_events (EV_A), &linuxaio_ctx); |
491 | return evsys_io_setup (linuxaio_nr_events (EV_A), &linuxaio_ctx); |
390 | } |
492 | } |
… | |
… | |
401 | /* which allows us to pinpoint the erroneous iocb */ |
503 | /* which allows us to pinpoint the erroneous iocb */ |
402 | for (submitted = 0; submitted < linuxaio_submitcnt; ) |
504 | for (submitted = 0; submitted < linuxaio_submitcnt; ) |
403 | { |
505 | { |
404 | int res = evsys_io_submit (linuxaio_ctx, linuxaio_submitcnt - submitted, linuxaio_submits + submitted); |
506 | int res = evsys_io_submit (linuxaio_ctx, linuxaio_submitcnt - submitted, linuxaio_submits + submitted); |
405 | |
507 | |
406 | if (expect_false (res < 0)) |
508 | if (ecb_expect_false (res < 0)) |
407 | if (errno == EINVAL) |
509 | if (errno == EINVAL) |
408 | { |
510 | { |
409 | /* This happens for unsupported fds, officially, but in my testing, |
511 | /* This happens for unsupported fds, officially, but in my testing, |
410 | * also randomly happens for supported fds. We fall back to good old |
512 | * also randomly happens for supported fds. We fall back to good old |
411 | * poll() here, under the assumption that this is a very rare case. |
513 | * poll() here, under the assumption that this is a very rare case. |
… | |
… | |
456 | /* it's easiest to handle this mess in another iteration */ |
558 | /* it's easiest to handle this mess in another iteration */ |
457 | return; |
559 | return; |
458 | } |
560 | } |
459 | else if (errno == EBADF) |
561 | else if (errno == EBADF) |
460 | { |
562 | { |
|
|
563 | assert (("libev: event loop rejected bad fd", errno != EBADF)); |
461 | fd_kill (EV_A_ linuxaio_submits [submitted]->aio_fildes); |
564 | fd_kill (EV_A_ linuxaio_submits [submitted]->aio_fildes); |
462 | |
565 | |
463 | res = 1; /* skip this iocb */ |
566 | res = 1; /* skip this iocb */ |
464 | } |
567 | } |
|
|
568 | else if (errno == EINTR) /* not seen in reality, not documented */ |
|
|
569 | res = 0; /* silently ignore and retry */ |
465 | else |
570 | else |
466 | ev_syserr ("(libev) linuxaio io_submit"); |
571 | ev_syserr ("(libev) linuxaio io_submit"); |
467 | |
572 | |
468 | submitted += res; |
573 | submitted += res; |
469 | } |
574 | } |
… | |
… | |
518 | void |
623 | void |
519 | linuxaio_destroy (EV_P) |
624 | linuxaio_destroy (EV_P) |
520 | { |
625 | { |
521 | epoll_destroy (EV_A); |
626 | epoll_destroy (EV_A); |
522 | linuxaio_free_iocbp (EV_A); |
627 | linuxaio_free_iocbp (EV_A); |
523 | evsys_io_destroy (linuxaio_ctx); |
628 | evsys_io_destroy (linuxaio_ctx); /* fails in child, aio context is destroyed */ |
524 | } |
629 | } |
525 | |
630 | |
526 | inline_size |
631 | inline_size |
527 | void |
632 | void |
528 | linuxaio_fork (EV_P) |
633 | linuxaio_fork (EV_P) |
… | |
… | |
534 | linuxaio_iteration = 0; /* we start over in the child */ |
639 | linuxaio_iteration = 0; /* we start over in the child */ |
535 | |
640 | |
536 | while (linuxaio_io_setup (EV_A) < 0) |
641 | while (linuxaio_io_setup (EV_A) < 0) |
537 | ev_syserr ("(libev) linuxaio io_setup"); |
642 | ev_syserr ("(libev) linuxaio io_setup"); |
538 | |
643 | |
|
|
644 | /* forking epoll should also effectively unregister all fds from the backend */ |
539 | epoll_fork (EV_A); |
645 | epoll_fork (EV_A); |
540 | |
646 | |
541 | ev_io_stop (EV_A_ &linuxaio_epoll_w); |
647 | ev_io_stop (EV_A_ &linuxaio_epoll_w); |
542 | ev_io_set (EV_A_ &linuxaio_epoll_w, backend_fd, EV_READ); |
648 | ev_io_set (EV_A_ &linuxaio_epoll_w, backend_fd, EV_READ); |
543 | ev_io_start (EV_A_ &linuxaio_epoll_w); |
649 | ev_io_start (EV_A_ &linuxaio_epoll_w); |