… | |
… | |
91 | /*****************************************************************************/ |
91 | /*****************************************************************************/ |
92 | /* syscall wrapdadoop - this section has the raw api/abi definitions */ |
92 | /* syscall wrapdadoop - this section has the raw api/abi definitions */ |
93 | |
93 | |
94 | #include <sys/syscall.h> /* no glibc wrappers */ |
94 | #include <sys/syscall.h> /* no glibc wrappers */ |
95 | |
95 | |
96 | /* aio_abi.h is not versioned in any way, so we cannot test for its existance */ |
96 | /* aio_abi.h is not versioned in any way, so we cannot test for its existence */ |
97 | #define IOCB_CMD_POLL 5 |
97 | #define IOCB_CMD_POLL 5 |
98 | |
98 | |
99 | /* taken from linux/fs/aio.c. yup, that's a .c file. |
99 | /* taken from linux/fs/aio.c. yup, that's a .c file. |
100 | * not only is this totally undocumented, not even the source code |
100 | * not only is this totally undocumented, not even the source code |
101 | * can tell you what the future semantics of compat_features and |
101 | * can tell you what the future semantics of compat_features and |
102 | * incompat_features are, or what header_length actually is for. |
102 | * incompat_features are, or what header_length actually is for. |
103 | */ |
103 | */ |
104 | #define AIO_RING_MAGIC 0xa10a10a1 |
104 | #define AIO_RING_MAGIC 0xa10a10a1 |
105 | #define AIO_RING_INCOMPAT_FEATURES 0 |
105 | #define EV_AIO_RING_INCOMPAT_FEATURES 0 |
106 | struct aio_ring |
106 | struct aio_ring |
107 | { |
107 | { |
108 | unsigned id; /* kernel internal index number */ |
108 | unsigned id; /* kernel internal index number */ |
109 | unsigned nr; /* number of io_events */ |
109 | unsigned nr; /* number of io_events */ |
110 | unsigned head; /* Written to by userland or by kernel. */ |
110 | unsigned head; /* Written to by userland or by kernel. */ |
… | |
… | |
120 | |
120 | |
121 | inline_size |
121 | inline_size |
122 | int |
122 | int |
123 | evsys_io_setup (unsigned nr_events, aio_context_t *ctx_idp) |
123 | evsys_io_setup (unsigned nr_events, aio_context_t *ctx_idp) |
124 | { |
124 | { |
125 | return syscall (SYS_io_setup, nr_events, ctx_idp); |
125 | return ev_syscall2 (SYS_io_setup, nr_events, ctx_idp); |
126 | } |
126 | } |
127 | |
127 | |
128 | inline_size |
128 | inline_size |
129 | int |
129 | int |
130 | evsys_io_destroy (aio_context_t ctx_id) |
130 | evsys_io_destroy (aio_context_t ctx_id) |
131 | { |
131 | { |
132 | return syscall (SYS_io_destroy, ctx_id); |
132 | return ev_syscall1 (SYS_io_destroy, ctx_id); |
133 | } |
133 | } |
134 | |
134 | |
135 | inline_size |
135 | inline_size |
136 | int |
136 | int |
137 | evsys_io_submit (aio_context_t ctx_id, long nr, struct iocb *cbp[]) |
137 | evsys_io_submit (aio_context_t ctx_id, long nr, struct iocb *cbp[]) |
138 | { |
138 | { |
139 | return syscall (SYS_io_submit, ctx_id, nr, cbp); |
139 | return ev_syscall3 (SYS_io_submit, ctx_id, nr, cbp); |
140 | } |
140 | } |
141 | |
141 | |
142 | inline_size |
142 | inline_size |
143 | int |
143 | int |
144 | evsys_io_cancel (aio_context_t ctx_id, struct iocb *cbp, struct io_event *result) |
144 | evsys_io_cancel (aio_context_t ctx_id, struct iocb *cbp, struct io_event *result) |
145 | { |
145 | { |
146 | return syscall (SYS_io_cancel, ctx_id, cbp, result); |
146 | return ev_syscall3 (SYS_io_cancel, ctx_id, cbp, result); |
147 | } |
147 | } |
148 | |
148 | |
149 | inline_size |
149 | inline_size |
150 | int |
150 | int |
151 | evsys_io_getevents (aio_context_t ctx_id, long min_nr, long nr, struct io_event *events, struct timespec *timeout) |
151 | evsys_io_getevents (aio_context_t ctx_id, long min_nr, long nr, struct io_event *events, struct timespec *timeout) |
152 | { |
152 | { |
153 | return syscall (SYS_io_getevents, ctx_id, min_nr, nr, events, timeout); |
153 | return ev_syscall5 (SYS_io_getevents, ctx_id, min_nr, nr, events, timeout); |
154 | } |
154 | } |
155 | |
155 | |
156 | /*****************************************************************************/ |
156 | /*****************************************************************************/ |
157 | /* actual backed implementation */ |
157 | /* actual backed implementation */ |
158 | |
158 | |
… | |
… | |
212 | * this is not well documented, so we better do it. |
212 | * this is not well documented, so we better do it. |
213 | */ |
213 | */ |
214 | memset (iocb, 0, sizeof (*iocb)); |
214 | memset (iocb, 0, sizeof (*iocb)); |
215 | |
215 | |
216 | iocb->io.aio_lio_opcode = IOCB_CMD_POLL; |
216 | iocb->io.aio_lio_opcode = IOCB_CMD_POLL; |
217 | iocb->io.aio_data = offset; |
|
|
218 | iocb->io.aio_fildes = offset; |
217 | iocb->io.aio_fildes = offset; |
219 | |
218 | |
220 | base [offset++] = iocb; |
219 | base [offset++] = iocb; |
221 | } |
220 | } |
222 | } |
221 | } |
… | |
… | |
234 | static void |
233 | static void |
235 | linuxaio_modify (EV_P_ int fd, int oev, int nev) |
234 | linuxaio_modify (EV_P_ int fd, int oev, int nev) |
236 | { |
235 | { |
237 | array_needsize (ANIOCBP, linuxaio_iocbps, linuxaio_iocbpmax, fd + 1, linuxaio_array_needsize_iocbp); |
236 | array_needsize (ANIOCBP, linuxaio_iocbps, linuxaio_iocbpmax, fd + 1, linuxaio_array_needsize_iocbp); |
238 | ANIOCBP iocb = linuxaio_iocbps [fd]; |
237 | ANIOCBP iocb = linuxaio_iocbps [fd]; |
|
|
238 | ANFD *anfd = &anfds [fd]; |
239 | |
239 | |
240 | if (iocb->io.aio_reqprio < 0) |
240 | if (ecb_expect_false (iocb->io.aio_reqprio < 0)) |
241 | { |
241 | { |
242 | /* we handed this fd over to epoll, so undo this first */ |
242 | /* we handed this fd over to epoll, so undo this first */ |
243 | /* we do it manually because the optimisations on epoll_modfy won't do us any good */ |
243 | /* we do it manually because the optimisations on epoll_modify won't do us any good */ |
244 | epoll_ctl (backend_fd, EPOLL_CTL_DEL, fd, 0); |
244 | epoll_ctl (backend_fd, EPOLL_CTL_DEL, fd, 0); |
|
|
245 | anfd->emask = 0; |
245 | iocb->io.aio_reqprio = 0; |
246 | iocb->io.aio_reqprio = 0; |
246 | } |
247 | } |
247 | |
248 | else if (ecb_expect_false (iocb->io.aio_buf)) |
248 | if (iocb->io.aio_buf) |
249 | { |
249 | /* io_cancel always returns some error on relevant kernels, but works */ |
250 | /* iocb active, so cancel it first before resubmit */ |
|
|
251 | /* this assumes we only ever get one call per fd per loop iteration */ |
|
|
252 | for (;;) |
|
|
253 | { |
|
|
254 | /* on all relevant kernels, io_cancel fails with EINPROGRESS on "success" */ |
250 | evsys_io_cancel (linuxaio_ctx, &iocb->io, (struct io_event *)0); |
255 | if (ecb_expect_false (evsys_io_cancel (linuxaio_ctx, &iocb->io, (struct io_event *)0) == 0)) |
|
|
256 | break; |
|
|
257 | |
|
|
258 | if (ecb_expect_true (errno == EINPROGRESS)) |
|
|
259 | break; |
|
|
260 | |
|
|
261 | /* the EINPROGRESS test is for nicer error message. clumsy. */ |
|
|
262 | if (errno != EINTR) |
|
|
263 | { |
|
|
264 | assert (("libev: linuxaio unexpected io_cancel failed", errno != EINTR && errno != EINPROGRESS)); |
|
|
265 | break; |
|
|
266 | } |
|
|
267 | } |
|
|
268 | |
|
|
269 | /* increment generation counter to avoid handling old events */ |
|
|
270 | ++anfd->egen; |
|
|
271 | } |
|
|
272 | |
|
|
273 | iocb->io.aio_buf = (nev & EV_READ ? POLLIN : 0) |
|
|
274 | | (nev & EV_WRITE ? POLLOUT : 0); |
251 | |
275 | |
252 | if (nev) |
276 | if (nev) |
253 | { |
277 | { |
254 | iocb->io.aio_buf = |
278 | iocb->io.aio_data = (uint32_t)fd | ((__u64)(uint32_t)anfd->egen << 32); |
255 | (nev & EV_READ ? POLLIN : 0) |
|
|
256 | | (nev & EV_WRITE ? POLLOUT : 0); |
|
|
257 | |
279 | |
258 | /* queue iocb up for io_submit */ |
280 | /* queue iocb up for io_submit */ |
259 | /* this assumes we only ever get one call per fd per loop iteration */ |
281 | /* this assumes we only ever get one call per fd per loop iteration */ |
260 | ++linuxaio_submitcnt; |
282 | ++linuxaio_submitcnt; |
261 | array_needsize (struct iocb *, linuxaio_submits, linuxaio_submitmax, linuxaio_submitcnt, array_needsize_noinit); |
283 | array_needsize (struct iocb *, linuxaio_submits, linuxaio_submitmax, linuxaio_submitcnt, array_needsize_noinit); |
… | |
… | |
267 | linuxaio_epoll_cb (EV_P_ struct ev_io *w, int revents) |
289 | linuxaio_epoll_cb (EV_P_ struct ev_io *w, int revents) |
268 | { |
290 | { |
269 | epoll_poll (EV_A_ 0); |
291 | epoll_poll (EV_A_ 0); |
270 | } |
292 | } |
271 | |
293 | |
272 | static void |
294 | inline_speed |
|
|
295 | void |
273 | linuxaio_fd_rearm (EV_P_ int fd) |
296 | linuxaio_fd_rearm (EV_P_ int fd) |
274 | { |
297 | { |
275 | anfds [fd].events = 0; |
298 | anfds [fd].events = 0; |
276 | linuxaio_iocbps [fd]->io.aio_buf = 0; |
299 | linuxaio_iocbps [fd]->io.aio_buf = 0; |
277 | fd_change (EV_A_ fd, EV_ANFD_REIFY); |
300 | fd_change (EV_A_ fd, EV_ANFD_REIFY); |
… | |
… | |
280 | static void |
303 | static void |
281 | linuxaio_parse_events (EV_P_ struct io_event *ev, int nr) |
304 | linuxaio_parse_events (EV_P_ struct io_event *ev, int nr) |
282 | { |
305 | { |
283 | while (nr) |
306 | while (nr) |
284 | { |
307 | { |
285 | int fd = ev->data; |
308 | int fd = ev->data & 0xffffffff; |
|
|
309 | uint32_t gen = ev->data >> 32; |
286 | int res = ev->res; |
310 | int res = ev->res; |
287 | |
311 | |
288 | assert (("libev: iocb fd must be in-bounds", fd >= 0 && fd < anfdmax)); |
312 | assert (("libev: iocb fd must be in-bounds", fd >= 0 && fd < anfdmax)); |
289 | |
313 | |
|
|
314 | /* only accept events if generation counter matches */ |
|
|
315 | if (ecb_expect_true (gen == (uint32_t)anfds [fd].egen)) |
|
|
316 | { |
290 | /* feed events, we do not expect or handle POLLNVAL */ |
317 | /* feed events, we do not expect or handle POLLNVAL */ |
291 | fd_event ( |
318 | fd_event ( |
292 | EV_A_ |
319 | EV_A_ |
293 | fd, |
320 | fd, |
294 | (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) |
321 | (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) |
295 | | (res & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) |
322 | | (res & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) |
296 | ); |
323 | ); |
297 | |
324 | |
298 | /* linux aio is oneshot: rearm fd. TODO: this does more work than needed */ |
325 | /* linux aio is oneshot: rearm fd. TODO: this does more work than strictly needed */ |
299 | linuxaio_fd_rearm (EV_A_ fd); |
326 | linuxaio_fd_rearm (EV_A_ fd); |
|
|
327 | } |
300 | |
328 | |
301 | --nr; |
329 | --nr; |
302 | ++ev; |
330 | ++ev; |
303 | } |
331 | } |
304 | } |
332 | } |
… | |
… | |
306 | /* get any events from ring buffer, return true if any were handled */ |
334 | /* get any events from ring buffer, return true if any were handled */ |
307 | static int |
335 | static int |
308 | linuxaio_get_events_from_ring (EV_P) |
336 | linuxaio_get_events_from_ring (EV_P) |
309 | { |
337 | { |
310 | struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx; |
338 | struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx; |
|
|
339 | unsigned head, tail; |
311 | |
340 | |
312 | /* the kernel reads and writes both of these variables, */ |
341 | /* the kernel reads and writes both of these variables, */ |
313 | /* as a C extension, we assume that volatile use here */ |
342 | /* as a C extension, we assume that volatile use here */ |
314 | /* both makes reads atomic and once-only */ |
343 | /* both makes reads atomic and once-only */ |
315 | unsigned head = *(volatile unsigned *)&ring->head; |
344 | head = *(volatile unsigned *)&ring->head; |
|
|
345 | ECB_MEMORY_FENCE_ACQUIRE; |
316 | unsigned tail = *(volatile unsigned *)&ring->tail; |
346 | tail = *(volatile unsigned *)&ring->tail; |
317 | |
347 | |
318 | if (head == tail) |
348 | if (head == tail) |
319 | return 0; |
349 | return 0; |
320 | |
350 | |
321 | /* bail out if the ring buffer doesn't match the expected layout */ |
|
|
322 | if (expect_false (ring->magic != AIO_RING_MAGIC) |
|
|
323 | || ring->incompat_features != AIO_RING_INCOMPAT_FEATURES |
|
|
324 | || ring->header_length != sizeof (struct aio_ring)) /* TODO: or use it to find io_event[0]? */ |
|
|
325 | return 0; |
|
|
326 | |
|
|
327 | /* make sure the events up to tail are visible */ |
|
|
328 | ECB_MEMORY_FENCE_ACQUIRE; |
|
|
329 | |
|
|
330 | /* parse all available events, but only once, to avoid starvation */ |
351 | /* parse all available events, but only once, to avoid starvation */ |
331 | if (tail > head) /* normal case around */ |
352 | if (ecb_expect_true (tail > head)) /* normal case around */ |
332 | linuxaio_parse_events (EV_A_ ring->io_events + head, tail - head); |
353 | linuxaio_parse_events (EV_A_ ring->io_events + head, tail - head); |
333 | else /* wrapped around */ |
354 | else /* wrapped around */ |
334 | { |
355 | { |
335 | linuxaio_parse_events (EV_A_ ring->io_events + head, ring->nr - head); |
356 | linuxaio_parse_events (EV_A_ ring->io_events + head, ring->nr - head); |
336 | linuxaio_parse_events (EV_A_ ring->io_events, tail); |
357 | linuxaio_parse_events (EV_A_ ring->io_events, tail); |
337 | } |
358 | } |
338 | |
359 | |
339 | ECB_MEMORY_FENCE_RELEASE; |
360 | ECB_MEMORY_FENCE_RELEASE; |
340 | /* as an extension to C, we hope that the volatile will make this atomic and once-only */ |
361 | /* as an extension to C, we hope that the volatile will make this atomic and once-only */ |
341 | *(volatile unsigned *)&ring->head = tail; |
362 | *(volatile unsigned *)&ring->head = tail; |
342 | /* make sure kernel can see our new head value - probably not required */ |
|
|
343 | |
363 | |
344 | return 1; |
364 | return 1; |
|
|
365 | } |
|
|
366 | |
|
|
367 | inline_size |
|
|
368 | int |
|
|
369 | linuxaio_ringbuf_valid (EV_P) |
|
|
370 | { |
|
|
371 | struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx; |
|
|
372 | |
|
|
373 | return ecb_expect_true (ring->magic == AIO_RING_MAGIC) |
|
|
374 | && ring->incompat_features == EV_AIO_RING_INCOMPAT_FEATURES |
|
|
375 | && ring->header_length == sizeof (struct aio_ring); /* TODO: or use it to find io_event[0]? */ |
345 | } |
376 | } |
346 | |
377 | |
347 | /* read at least one event from kernel, or timeout */ |
378 | /* read at least one event from kernel, or timeout */ |
348 | inline_size |
379 | inline_size |
349 | void |
380 | void |
350 | linuxaio_get_events (EV_P_ ev_tstamp timeout) |
381 | linuxaio_get_events (EV_P_ ev_tstamp timeout) |
351 | { |
382 | { |
352 | struct timespec ts; |
383 | struct timespec ts; |
353 | struct io_event ioev[1]; |
384 | struct io_event ioev[8]; /* 256 octet stack space */ |
354 | int res; |
385 | int want = 1; /* how many events to request */ |
|
|
386 | int ringbuf_valid = linuxaio_ringbuf_valid (EV_A); |
355 | |
387 | |
|
|
388 | if (ecb_expect_true (ringbuf_valid)) |
|
|
389 | { |
|
|
390 | /* if the ring buffer has any events, we don't wait or call the kernel at all */ |
356 | if (linuxaio_get_events_from_ring (EV_A)) |
391 | if (linuxaio_get_events_from_ring (EV_A)) |
357 | return; |
392 | return; |
358 | |
393 | |
359 | /* no events, so wait for at least one, then poll ring buffer again */ |
394 | /* if the ring buffer is empty, and we don't have a timeout, then don't call the kernel */ |
360 | /* this degrades to one event per loop iteration */ |
395 | if (!timeout) |
361 | /* if the ring buffer changes layout, but so be it */ |
396 | return; |
|
|
397 | } |
|
|
398 | else |
|
|
399 | /* no ringbuffer, request slightly larger batch */ |
|
|
400 | want = sizeof (ioev) / sizeof (ioev [0]); |
362 | |
401 | |
|
|
402 | /* no events, so wait for some |
|
|
403 | * for fairness reasons, we do this in a loop, to fetch all events |
|
|
404 | */ |
|
|
405 | for (;;) |
|
|
406 | { |
|
|
407 | int res; |
|
|
408 | |
363 | EV_RELEASE_CB; |
409 | EV_RELEASE_CB; |
364 | |
410 | |
365 | ts.tv_sec = (long)timeout; |
411 | EV_TS_SET (ts, timeout); |
366 | ts.tv_nsec = (long)((timeout - ts.tv_sec) * 1e9); |
412 | res = evsys_io_getevents (linuxaio_ctx, 1, want, ioev, &ts); |
367 | |
413 | |
368 | res = evsys_io_getevents (linuxaio_ctx, 1, sizeof (ioev) / sizeof (ioev [0]), ioev, &ts); |
|
|
369 | |
|
|
370 | EV_ACQUIRE_CB; |
414 | EV_ACQUIRE_CB; |
371 | |
415 | |
372 | if (res < 0) |
416 | if (res < 0) |
373 | if (errno == EINTR) |
417 | if (errno == EINTR) |
374 | /* ignored */; |
418 | /* ignored, retry */; |
375 | else |
419 | else |
376 | ev_syserr ("(libev) linuxaio io_getevents"); |
420 | ev_syserr ("(libev) linuxaio io_getevents"); |
377 | else if (res) |
421 | else if (res) |
378 | { |
422 | { |
379 | /* at least one event received, handle it and any remaining ones in the ring buffer */ |
423 | /* at least one event available, handle them */ |
380 | linuxaio_parse_events (EV_A_ ioev, res); |
424 | linuxaio_parse_events (EV_A_ ioev, res); |
|
|
425 | |
|
|
426 | if (ecb_expect_true (ringbuf_valid)) |
|
|
427 | { |
|
|
428 | /* if we have a ring buffer, handle any remaining events in it */ |
381 | linuxaio_get_events_from_ring (EV_A); |
429 | linuxaio_get_events_from_ring (EV_A); |
382 | } |
|
|
383 | } |
|
|
384 | |
430 | |
385 | static int |
431 | /* at this point, we should have handled all outstanding events */ |
|
|
432 | break; |
|
|
433 | } |
|
|
434 | else if (res < want) |
|
|
435 | /* otherwise, if there were fewere events than we wanted, we assume there are no more */ |
|
|
436 | break; |
|
|
437 | } |
|
|
438 | else |
|
|
439 | break; /* no events from the kernel, we are done */ |
|
|
440 | |
|
|
441 | timeout = EV_TS_CONST (0.); /* only wait in the first iteration */ |
|
|
442 | } |
|
|
443 | } |
|
|
444 | |
|
|
445 | inline_size |
|
|
446 | int |
386 | linuxaio_io_setup (EV_P) |
447 | linuxaio_io_setup (EV_P) |
387 | { |
448 | { |
388 | linuxaio_ctx = 0; |
449 | linuxaio_ctx = 0; |
389 | return evsys_io_setup (linuxaio_nr_events (EV_A), &linuxaio_ctx); |
450 | return evsys_io_setup (linuxaio_nr_events (EV_A), &linuxaio_ctx); |
390 | } |
451 | } |
… | |
… | |
401 | /* which allows us to pinpoint the erroneous iocb */ |
462 | /* which allows us to pinpoint the erroneous iocb */ |
402 | for (submitted = 0; submitted < linuxaio_submitcnt; ) |
463 | for (submitted = 0; submitted < linuxaio_submitcnt; ) |
403 | { |
464 | { |
404 | int res = evsys_io_submit (linuxaio_ctx, linuxaio_submitcnt - submitted, linuxaio_submits + submitted); |
465 | int res = evsys_io_submit (linuxaio_ctx, linuxaio_submitcnt - submitted, linuxaio_submits + submitted); |
405 | |
466 | |
406 | if (expect_false (res < 0)) |
467 | if (ecb_expect_false (res < 0)) |
407 | if (errno == EINVAL) |
468 | if (errno == EINVAL) |
408 | { |
469 | { |
409 | /* This happens for unsupported fds, officially, but in my testing, |
470 | /* This happens for unsupported fds, officially, but in my testing, |
410 | * also randomly happens for supported fds. We fall back to good old |
471 | * also randomly happens for supported fds. We fall back to good old |
411 | * poll() here, under the assumption that this is a very rare case. |
472 | * poll() here, under the assumption that this is a very rare case. |
… | |
… | |
441 | } |
502 | } |
442 | |
503 | |
443 | ++linuxaio_iteration; |
504 | ++linuxaio_iteration; |
444 | if (linuxaio_io_setup (EV_A) < 0) |
505 | if (linuxaio_io_setup (EV_A) < 0) |
445 | { |
506 | { |
|
|
507 | /* TODO: rearm all and recreate epoll backend from scratch */ |
|
|
508 | /* TODO: might be more prudent? */ |
|
|
509 | |
446 | /* to bad, we can't get a new aio context, go 100% epoll */ |
510 | /* to bad, we can't get a new aio context, go 100% epoll */ |
447 | linuxaio_free_iocbp (EV_A); |
511 | linuxaio_free_iocbp (EV_A); |
448 | ev_io_stop (EV_A_ &linuxaio_epoll_w); |
512 | ev_io_stop (EV_A_ &linuxaio_epoll_w); |
449 | ev_ref (EV_A); |
513 | ev_ref (EV_A); |
450 | linuxaio_ctx = 0; |
514 | linuxaio_ctx = 0; |
|
|
515 | |
|
|
516 | backend = EVBACKEND_EPOLL; |
451 | backend_modify = epoll_modify; |
517 | backend_modify = epoll_modify; |
452 | backend_poll = epoll_poll; |
518 | backend_poll = epoll_poll; |
453 | } |
519 | } |
454 | |
520 | |
455 | timeout = 0; |
521 | timeout = EV_TS_CONST (0.); |
456 | /* it's easiest to handle this mess in another iteration */ |
522 | /* it's easiest to handle this mess in another iteration */ |
457 | return; |
523 | return; |
458 | } |
524 | } |
459 | else if (errno == EBADF) |
525 | else if (errno == EBADF) |
460 | { |
526 | { |
|
|
527 | assert (("libev: event loop rejected bad fd", errno != EBADF)); |
461 | fd_kill (EV_A_ linuxaio_submits [submitted]->aio_fildes); |
528 | fd_kill (EV_A_ linuxaio_submits [submitted]->aio_fildes); |
462 | |
529 | |
463 | res = 1; /* skip this iocb */ |
530 | res = 1; /* skip this iocb */ |
464 | } |
531 | } |
|
|
532 | else if (errno == EINTR) /* not seen in reality, not documented */ |
|
|
533 | res = 0; /* silently ignore and retry */ |
465 | else |
534 | else |
|
|
535 | { |
466 | ev_syserr ("(libev) linuxaio io_submit"); |
536 | ev_syserr ("(libev) linuxaio io_submit"); |
|
|
537 | res = 0; |
|
|
538 | } |
467 | |
539 | |
468 | submitted += res; |
540 | submitted += res; |
469 | } |
541 | } |
470 | |
542 | |
471 | linuxaio_submitcnt = 0; |
543 | linuxaio_submitcnt = 0; |
… | |
… | |
494 | { |
566 | { |
495 | epoll_destroy (EV_A); |
567 | epoll_destroy (EV_A); |
496 | return 0; |
568 | return 0; |
497 | } |
569 | } |
498 | |
570 | |
499 | ev_io_init (EV_A_ &linuxaio_epoll_w, linuxaio_epoll_cb, backend_fd, EV_READ); |
571 | ev_io_init (&linuxaio_epoll_w, linuxaio_epoll_cb, backend_fd, EV_READ); |
500 | ev_set_priority (&linuxaio_epoll_w, EV_MAXPRI); |
572 | ev_set_priority (&linuxaio_epoll_w, EV_MAXPRI); |
501 | ev_io_start (EV_A_ &linuxaio_epoll_w); |
573 | ev_io_start (EV_A_ &linuxaio_epoll_w); |
502 | ev_unref (EV_A); /* watcher should not keep loop alive */ |
574 | ev_unref (EV_A); /* watcher should not keep loop alive */ |
503 | |
575 | |
504 | backend_modify = linuxaio_modify; |
576 | backend_modify = linuxaio_modify; |
505 | backend_poll = linuxaio_poll; |
577 | backend_poll = linuxaio_poll; |
506 | |
578 | |
507 | linuxaio_iocbpmax = 0; |
579 | linuxaio_iocbpmax = 0; |
508 | linuxaio_iocbps = 0; |
580 | linuxaio_iocbps = 0; |
509 | |
581 | |
510 | linuxaio_submits = 0; |
582 | linuxaio_submits = 0; |
… | |
… | |
518 | void |
590 | void |
519 | linuxaio_destroy (EV_P) |
591 | linuxaio_destroy (EV_P) |
520 | { |
592 | { |
521 | epoll_destroy (EV_A); |
593 | epoll_destroy (EV_A); |
522 | linuxaio_free_iocbp (EV_A); |
594 | linuxaio_free_iocbp (EV_A); |
523 | evsys_io_destroy (linuxaio_ctx); |
595 | evsys_io_destroy (linuxaio_ctx); /* fails in child, aio context is destroyed */ |
524 | } |
596 | } |
525 | |
597 | |
526 | inline_size |
598 | ecb_cold |
527 | void |
599 | static void |
528 | linuxaio_fork (EV_P) |
600 | linuxaio_fork (EV_P) |
529 | { |
601 | { |
530 | /* this frees all iocbs, which is very heavy-handed */ |
|
|
531 | linuxaio_destroy (EV_A); |
|
|
532 | linuxaio_submitcnt = 0; /* all pointers were invalidated */ |
602 | linuxaio_submitcnt = 0; /* all pointers were invalidated */ |
|
|
603 | linuxaio_free_iocbp (EV_A); /* this frees all iocbs, which is very heavy-handed */ |
|
|
604 | evsys_io_destroy (linuxaio_ctx); /* fails in child, aio context is destroyed */ |
533 | |
605 | |
534 | linuxaio_iteration = 0; /* we start over in the child */ |
606 | linuxaio_iteration = 0; /* we start over in the child */ |
535 | |
607 | |
536 | while (linuxaio_io_setup (EV_A) < 0) |
608 | while (linuxaio_io_setup (EV_A) < 0) |
537 | ev_syserr ("(libev) linuxaio io_setup"); |
609 | ev_syserr ("(libev) linuxaio io_setup"); |
538 | |
610 | |
|
|
611 | /* forking epoll should also effectively unregister all fds from the backend */ |
539 | epoll_fork (EV_A); |
612 | epoll_fork (EV_A); |
|
|
613 | /* epoll_fork already did this. hopefully */ |
|
|
614 | /*fd_rearm_all (EV_A);*/ |
540 | |
615 | |
541 | ev_io_stop (EV_A_ &linuxaio_epoll_w); |
616 | ev_io_stop (EV_A_ &linuxaio_epoll_w); |
542 | ev_io_set (EV_A_ &linuxaio_epoll_w, backend_fd, EV_READ); |
617 | ev_io_set (EV_A_ &linuxaio_epoll_w, backend_fd, EV_READ); |
543 | ev_io_start (EV_A_ &linuxaio_epoll_w); |
618 | ev_io_start (EV_A_ &linuxaio_epoll_w); |
544 | |
|
|
545 | /* epoll_fork already did this. hopefully */ |
|
|
546 | /*fd_rearm_all (EV_A);*/ |
|
|
547 | } |
619 | } |
548 | |
620 | |