… | |
… | |
100 | * not only is this totally undocumented, not even the source code |
100 | * not only is this totally undocumented, not even the source code |
101 | * can tell you what the future semantics of compat_features and |
101 | * can tell you what the future semantics of compat_features and |
102 | * incompat_features are, or what header_length actually is for. |
102 | * incompat_features are, or what header_length actually is for. |
103 | */ |
103 | */ |
104 | #define AIO_RING_MAGIC 0xa10a10a1 |
104 | #define AIO_RING_MAGIC 0xa10a10a1 |
105 | #define AIO_RING_INCOMPAT_FEATURES 0 |
105 | #define EV_AIO_RING_INCOMPAT_FEATURES 0 |
106 | struct aio_ring |
106 | struct aio_ring |
107 | { |
107 | { |
108 | unsigned id; /* kernel internal index number */ |
108 | unsigned id; /* kernel internal index number */ |
109 | unsigned nr; /* number of io_events */ |
109 | unsigned nr; /* number of io_events */ |
110 | unsigned head; /* Written to by userland or by kernel. */ |
110 | unsigned head; /* Written to by userland or by kernel. */ |
… | |
… | |
115 | unsigned incompat_features; |
115 | unsigned incompat_features; |
116 | unsigned header_length; /* size of aio_ring */ |
116 | unsigned header_length; /* size of aio_ring */ |
117 | |
117 | |
118 | struct io_event io_events[0]; |
118 | struct io_event io_events[0]; |
119 | }; |
119 | }; |
120 | |
|
|
121 | /* |
|
|
122 | * define some syscall wrappers for common architectures |
|
|
123 | * this is mostly for nice looks during debugging, not performance. |
|
|
124 | * our syscalls return < 0, not == -1, on error. which is good |
|
|
125 | * enough for linux aio. |
|
|
126 | * TODO: arm is also common nowadays, maybe even mips and x86 |
|
|
127 | * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove... |
|
|
128 | */ |
|
|
129 | #if __GNUC__ && __linux && ECB_AMD64 |
|
|
130 | |
|
|
131 | #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5) \ |
|
|
132 | ({ \ |
|
|
133 | long res; \ |
|
|
134 | register unsigned long r5 __asm__ ("r8" ); \ |
|
|
135 | register unsigned long r4 __asm__ ("r10"); \ |
|
|
136 | register unsigned long r3 __asm__ ("rdx"); \ |
|
|
137 | register unsigned long r2 __asm__ ("rsi"); \ |
|
|
138 | register unsigned long r1 __asm__ ("rdi"); \ |
|
|
139 | if (narg >= 5) r5 = (unsigned long)(arg5); \ |
|
|
140 | if (narg >= 4) r4 = (unsigned long)(arg4); \ |
|
|
141 | if (narg >= 3) r3 = (unsigned long)(arg3); \ |
|
|
142 | if (narg >= 2) r2 = (unsigned long)(arg2); \ |
|
|
143 | if (narg >= 1) r1 = (unsigned long)(arg1); \ |
|
|
144 | __asm__ __volatile__ ( \ |
|
|
145 | "syscall\n\t" \ |
|
|
146 | : "=a" (res) \ |
|
|
147 | : "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \ |
|
|
148 | : "cc", "r11", "cx", "memory"); \ |
|
|
149 | errno = -res; \ |
|
|
150 | res; \ |
|
|
151 | }) |
|
|
152 | |
|
|
153 | #endif |
|
|
154 | |
|
|
155 | #ifdef ev_syscall |
|
|
156 | #define ev_syscall0(nr) ev_syscall (nr, 0, 0, 0, 0, 0, 0 |
|
|
157 | #define ev_syscall1(nr,arg1) ev_syscall (nr, 1, arg1, 0, 0, 0, 0) |
|
|
158 | #define ev_syscall2(nr,arg1,arg2) ev_syscall (nr, 2, arg1, arg2, 0, 0, 0) |
|
|
159 | #define ev_syscall3(nr,arg1,arg2,arg3) ev_syscall (nr, 3, arg1, arg2, arg3, 0, 0) |
|
|
160 | #define ev_syscall4(nr,arg1,arg2,arg3,arg4) ev_syscall (nr, 3, arg1, arg2, arg3, arg4, 0) |
|
|
161 | #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5) |
|
|
162 | #else |
|
|
163 | #define ev_syscall0(nr) syscall (nr) |
|
|
164 | #define ev_syscall1(nr,arg1) syscall (nr, arg1) |
|
|
165 | #define ev_syscall2(nr,arg1,arg2) syscall (nr, arg1, arg2) |
|
|
166 | #define ev_syscall3(nr,arg1,arg2,arg3) syscall (nr, arg1, arg2, arg3) |
|
|
167 | #define ev_syscall4(nr,arg1,arg2,arg3,arg4) syscall (nr, arg1, arg2, arg3, arg4) |
|
|
168 | #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) syscall (nr, arg1, arg2, arg3, arg4, arg5) |
|
|
169 | #endif |
|
|
170 | |
120 | |
171 | inline_size |
121 | inline_size |
172 | int |
122 | int |
173 | evsys_io_setup (unsigned nr_events, aio_context_t *ctx_idp) |
123 | evsys_io_setup (unsigned nr_events, aio_context_t *ctx_idp) |
174 | { |
124 | { |
… | |
… | |
262 | * this is not well documented, so we better do it. |
212 | * this is not well documented, so we better do it. |
263 | */ |
213 | */ |
264 | memset (iocb, 0, sizeof (*iocb)); |
214 | memset (iocb, 0, sizeof (*iocb)); |
265 | |
215 | |
266 | iocb->io.aio_lio_opcode = IOCB_CMD_POLL; |
216 | iocb->io.aio_lio_opcode = IOCB_CMD_POLL; |
267 | iocb->io.aio_data = offset; |
|
|
268 | iocb->io.aio_fildes = offset; |
217 | iocb->io.aio_fildes = offset; |
269 | |
218 | |
270 | base [offset++] = iocb; |
219 | base [offset++] = iocb; |
271 | } |
220 | } |
272 | } |
221 | } |
… | |
… | |
284 | static void |
233 | static void |
285 | linuxaio_modify (EV_P_ int fd, int oev, int nev) |
234 | linuxaio_modify (EV_P_ int fd, int oev, int nev) |
286 | { |
235 | { |
287 | array_needsize (ANIOCBP, linuxaio_iocbps, linuxaio_iocbpmax, fd + 1, linuxaio_array_needsize_iocbp); |
236 | array_needsize (ANIOCBP, linuxaio_iocbps, linuxaio_iocbpmax, fd + 1, linuxaio_array_needsize_iocbp); |
288 | ANIOCBP iocb = linuxaio_iocbps [fd]; |
237 | ANIOCBP iocb = linuxaio_iocbps [fd]; |
|
|
238 | ANFD *anfd = &anfds [fd]; |
289 | |
239 | |
290 | if (iocb->io.aio_reqprio < 0) |
240 | if (ecb_expect_false (iocb->io.aio_reqprio < 0)) |
291 | { |
241 | { |
292 | /* we handed this fd over to epoll, so undo this first */ |
242 | /* we handed this fd over to epoll, so undo this first */ |
293 | /* we do it manually because the optimisations on epoll_modfy won't do us any good */ |
243 | /* we do it manually because the optimisations on epoll_modify won't do us any good */ |
294 | epoll_ctl (backend_fd, EPOLL_CTL_DEL, fd, 0); |
244 | epoll_ctl (backend_fd, EPOLL_CTL_DEL, fd, 0); |
295 | anfds [fd].emask = 0; |
245 | anfd->emask = 0; |
296 | iocb->io.aio_reqprio = 0; |
246 | iocb->io.aio_reqprio = 0; |
297 | } |
247 | } |
|
|
248 | else if (ecb_expect_false (iocb->io.aio_buf)) |
|
|
249 | { |
|
|
250 | /* iocb active, so cancel it first before resubmit */ |
|
|
251 | /* this assumes we only ever get one call per fd per loop iteration */ |
|
|
252 | for (;;) |
|
|
253 | { |
|
|
254 | /* on all relevant kernels, io_cancel fails with EINPROGRESS on "success" */ |
|
|
255 | if (ecb_expect_false (evsys_io_cancel (linuxaio_ctx, &iocb->io, (struct io_event *)0) == 0)) |
|
|
256 | break; |
298 | |
257 | |
|
|
258 | if (ecb_expect_true (errno == EINPROGRESS)) |
|
|
259 | break; |
|
|
260 | |
|
|
261 | /* the EINPROGRESS test is for nicer error message. clumsy. */ |
|
|
262 | if (errno != EINTR) |
|
|
263 | { |
|
|
264 | assert (("libev: linuxaio unexpected io_cancel failed", errno != EINTR && errno != EINPROGRESS)); |
|
|
265 | break; |
|
|
266 | } |
|
|
267 | } |
|
|
268 | |
|
|
269 | /* increment generation counter to avoid handling old events */ |
|
|
270 | ++anfd->egen; |
|
|
271 | } |
|
|
272 | |
299 | if (iocb->io.aio_buf) |
273 | iocb->io.aio_buf = |
300 | /* io_cancel always returns some error on relevant kernels, but works */ |
274 | (nev & EV_READ ? POLLIN : 0) |
301 | evsys_io_cancel (linuxaio_ctx, &iocb->io, (struct io_event *)0); |
275 | | (nev & EV_WRITE ? POLLOUT : 0); |
302 | |
276 | |
303 | if (nev) |
277 | if (nev) |
304 | { |
278 | { |
305 | iocb->io.aio_buf = |
279 | iocb->io.aio_data = (uint32_t)fd | ((__u64)(uint32_t)anfd->egen << 32); |
306 | (nev & EV_READ ? POLLIN : 0) |
|
|
307 | | (nev & EV_WRITE ? POLLOUT : 0); |
|
|
308 | |
280 | |
309 | /* queue iocb up for io_submit */ |
281 | /* queue iocb up for io_submit */ |
310 | /* this assumes we only ever get one call per fd per loop iteration */ |
282 | /* this assumes we only ever get one call per fd per loop iteration */ |
311 | ++linuxaio_submitcnt; |
283 | ++linuxaio_submitcnt; |
312 | array_needsize (struct iocb *, linuxaio_submits, linuxaio_submitmax, linuxaio_submitcnt, array_needsize_noinit); |
284 | array_needsize (struct iocb *, linuxaio_submits, linuxaio_submitmax, linuxaio_submitcnt, array_needsize_noinit); |
… | |
… | |
318 | linuxaio_epoll_cb (EV_P_ struct ev_io *w, int revents) |
290 | linuxaio_epoll_cb (EV_P_ struct ev_io *w, int revents) |
319 | { |
291 | { |
320 | epoll_poll (EV_A_ 0); |
292 | epoll_poll (EV_A_ 0); |
321 | } |
293 | } |
322 | |
294 | |
323 | static void |
295 | inline_speed |
|
|
296 | void |
324 | linuxaio_fd_rearm (EV_P_ int fd) |
297 | linuxaio_fd_rearm (EV_P_ int fd) |
325 | { |
298 | { |
326 | anfds [fd].events = 0; |
299 | anfds [fd].events = 0; |
327 | linuxaio_iocbps [fd]->io.aio_buf = 0; |
300 | linuxaio_iocbps [fd]->io.aio_buf = 0; |
328 | fd_change (EV_A_ fd, EV_ANFD_REIFY); |
301 | fd_change (EV_A_ fd, EV_ANFD_REIFY); |
… | |
… | |
331 | static void |
304 | static void |
332 | linuxaio_parse_events (EV_P_ struct io_event *ev, int nr) |
305 | linuxaio_parse_events (EV_P_ struct io_event *ev, int nr) |
333 | { |
306 | { |
334 | while (nr) |
307 | while (nr) |
335 | { |
308 | { |
336 | int fd = ev->data; |
309 | int fd = ev->data & 0xffffffff; |
|
|
310 | uint32_t gen = ev->data >> 32; |
337 | int res = ev->res; |
311 | int res = ev->res; |
338 | |
312 | |
339 | assert (("libev: iocb fd must be in-bounds", fd >= 0 && fd < anfdmax)); |
313 | assert (("libev: iocb fd must be in-bounds", fd >= 0 && fd < anfdmax)); |
340 | |
314 | |
|
|
315 | /* only accept events if generation counter matches */ |
|
|
316 | if (ecb_expect_true (gen == (uint32_t)anfds [fd].egen)) |
|
|
317 | { |
341 | /* feed events, we do not expect or handle POLLNVAL */ |
318 | /* feed events, we do not expect or handle POLLNVAL */ |
342 | fd_event ( |
319 | fd_event ( |
343 | EV_A_ |
320 | EV_A_ |
344 | fd, |
321 | fd, |
345 | (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) |
322 | (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0) |
346 | | (res & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) |
323 | | (res & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0) |
347 | ); |
324 | ); |
348 | |
325 | |
349 | /* linux aio is oneshot: rearm fd. TODO: this does more work than needed */ |
326 | /* linux aio is oneshot: rearm fd. TODO: this does more work than strictly needed */ |
350 | linuxaio_fd_rearm (EV_A_ fd); |
327 | linuxaio_fd_rearm (EV_A_ fd); |
|
|
328 | } |
351 | |
329 | |
352 | --nr; |
330 | --nr; |
353 | ++ev; |
331 | ++ev; |
354 | } |
332 | } |
355 | } |
333 | } |
… | |
… | |
357 | /* get any events from ring buffer, return true if any were handled */ |
335 | /* get any events from ring buffer, return true if any were handled */ |
358 | static int |
336 | static int |
359 | linuxaio_get_events_from_ring (EV_P) |
337 | linuxaio_get_events_from_ring (EV_P) |
360 | { |
338 | { |
361 | struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx; |
339 | struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx; |
|
|
340 | unsigned head, tail; |
362 | |
341 | |
363 | /* the kernel reads and writes both of these variables, */ |
342 | /* the kernel reads and writes both of these variables, */ |
364 | /* as a C extension, we assume that volatile use here */ |
343 | /* as a C extension, we assume that volatile use here */ |
365 | /* both makes reads atomic and once-only */ |
344 | /* both makes reads atomic and once-only */ |
366 | unsigned head = *(volatile unsigned *)&ring->head; |
345 | head = *(volatile unsigned *)&ring->head; |
|
|
346 | ECB_MEMORY_FENCE_ACQUIRE; |
367 | unsigned tail = *(volatile unsigned *)&ring->tail; |
347 | tail = *(volatile unsigned *)&ring->tail; |
368 | |
348 | |
369 | if (head == tail) |
349 | if (head == tail) |
370 | return 0; |
350 | return 0; |
371 | |
351 | |
372 | /* bail out if the ring buffer doesn't match the expected layout */ |
|
|
373 | if (expect_false (ring->magic != AIO_RING_MAGIC) |
|
|
374 | || ring->incompat_features != AIO_RING_INCOMPAT_FEATURES |
|
|
375 | || ring->header_length != sizeof (struct aio_ring)) /* TODO: or use it to find io_event[0]? */ |
|
|
376 | return 0; |
|
|
377 | |
|
|
378 | /* make sure the events up to tail are visible */ |
|
|
379 | ECB_MEMORY_FENCE_ACQUIRE; |
|
|
380 | |
|
|
381 | /* parse all available events, but only once, to avoid starvation */ |
352 | /* parse all available events, but only once, to avoid starvation */ |
382 | if (tail > head) /* normal case around */ |
353 | if (ecb_expect_true (tail > head)) /* normal case around */ |
383 | linuxaio_parse_events (EV_A_ ring->io_events + head, tail - head); |
354 | linuxaio_parse_events (EV_A_ ring->io_events + head, tail - head); |
384 | else /* wrapped around */ |
355 | else /* wrapped around */ |
385 | { |
356 | { |
386 | linuxaio_parse_events (EV_A_ ring->io_events + head, ring->nr - head); |
357 | linuxaio_parse_events (EV_A_ ring->io_events + head, ring->nr - head); |
387 | linuxaio_parse_events (EV_A_ ring->io_events, tail); |
358 | linuxaio_parse_events (EV_A_ ring->io_events, tail); |
… | |
… | |
392 | *(volatile unsigned *)&ring->head = tail; |
363 | *(volatile unsigned *)&ring->head = tail; |
393 | |
364 | |
394 | return 1; |
365 | return 1; |
395 | } |
366 | } |
396 | |
367 | |
|
|
368 | inline_size |
|
|
369 | int |
|
|
370 | linuxaio_ringbuf_valid (EV_P) |
|
|
371 | { |
|
|
372 | struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx; |
|
|
373 | |
|
|
374 | return ecb_expect_true (ring->magic == AIO_RING_MAGIC) |
|
|
375 | && ring->incompat_features == EV_AIO_RING_INCOMPAT_FEATURES |
|
|
376 | && ring->header_length == sizeof (struct aio_ring); /* TODO: or use it to find io_event[0]? */ |
|
|
377 | } |
|
|
378 | |
397 | /* read at least one event from kernel, or timeout */ |
379 | /* read at least one event from kernel, or timeout */ |
398 | inline_size |
380 | inline_size |
399 | void |
381 | void |
400 | linuxaio_get_events (EV_P_ ev_tstamp timeout) |
382 | linuxaio_get_events (EV_P_ ev_tstamp timeout) |
401 | { |
383 | { |
402 | struct timespec ts; |
384 | struct timespec ts; |
403 | struct io_event ioev[1]; |
385 | struct io_event ioev[8]; /* 256 octet stack space */ |
404 | int res; |
386 | int want = 1; /* how many events to request */ |
|
|
387 | int ringbuf_valid = linuxaio_ringbuf_valid (EV_A); |
405 | |
388 | |
|
|
389 | if (ecb_expect_true (ringbuf_valid)) |
|
|
390 | { |
|
|
391 | /* if the ring buffer has any events, we don't wait or call the kernel at all */ |
406 | if (linuxaio_get_events_from_ring (EV_A)) |
392 | if (linuxaio_get_events_from_ring (EV_A)) |
407 | return; |
393 | return; |
408 | |
394 | |
409 | /* no events, so wait for at least one, then poll ring buffer again */ |
395 | /* if the ring buffer is empty, and we don't have a timeout, then don't call the kernel */ |
410 | /* this degrades to one event per loop iteration */ |
396 | if (!timeout) |
411 | /* if the ring buffer changes layout, but so be it */ |
397 | return; |
|
|
398 | } |
|
|
399 | else |
|
|
400 | /* no ringbuffer, request slightly larger batch */ |
|
|
401 | want = sizeof (ioev) / sizeof (ioev [0]); |
412 | |
402 | |
|
|
403 | /* no events, so wait for some |
|
|
404 | * for fairness reasons, we do this in a loop, to fetch all events |
|
|
405 | */ |
|
|
406 | for (;;) |
|
|
407 | { |
|
|
408 | int res; |
|
|
409 | |
413 | EV_RELEASE_CB; |
410 | EV_RELEASE_CB; |
414 | |
411 | |
415 | ts.tv_sec = (long)timeout; |
412 | EV_TS_SET (ts, timeout); |
416 | ts.tv_nsec = (long)((timeout - ts.tv_sec) * 1e9); |
413 | res = evsys_io_getevents (linuxaio_ctx, 1, want, ioev, &ts); |
417 | |
414 | |
418 | res = evsys_io_getevents (linuxaio_ctx, 1, sizeof (ioev) / sizeof (ioev [0]), ioev, &ts); |
|
|
419 | |
|
|
420 | EV_ACQUIRE_CB; |
415 | EV_ACQUIRE_CB; |
421 | |
416 | |
422 | if (res < 0) |
417 | if (res < 0) |
423 | if (errno == EINTR) |
418 | if (errno == EINTR) |
424 | /* ignored */; |
419 | /* ignored, retry */; |
425 | else |
420 | else |
426 | ev_syserr ("(libev) linuxaio io_getevents"); |
421 | ev_syserr ("(libev) linuxaio io_getevents"); |
427 | else if (res) |
422 | else if (res) |
428 | { |
423 | { |
429 | /* at least one event received, handle it and any remaining ones in the ring buffer */ |
424 | /* at least one event available, handle them */ |
430 | linuxaio_parse_events (EV_A_ ioev, res); |
425 | linuxaio_parse_events (EV_A_ ioev, res); |
|
|
426 | |
|
|
427 | if (ecb_expect_true (ringbuf_valid)) |
|
|
428 | { |
|
|
429 | /* if we have a ring buffer, handle any remaining events in it */ |
431 | linuxaio_get_events_from_ring (EV_A); |
430 | linuxaio_get_events_from_ring (EV_A); |
432 | } |
|
|
433 | } |
|
|
434 | |
431 | |
435 | static int |
432 | /* at this point, we should have handled all outstanding events */ |
|
|
433 | break; |
|
|
434 | } |
|
|
435 | else if (res < want) |
|
|
436 | /* otherwise, if there were fewere events than we wanted, we assume there are no more */ |
|
|
437 | break; |
|
|
438 | } |
|
|
439 | else |
|
|
440 | break; /* no events from the kernel, we are done */ |
|
|
441 | |
|
|
442 | timeout = 0; /* only wait in the first iteration */ |
|
|
443 | } |
|
|
444 | } |
|
|
445 | |
|
|
446 | inline_size |
|
|
447 | int |
436 | linuxaio_io_setup (EV_P) |
448 | linuxaio_io_setup (EV_P) |
437 | { |
449 | { |
438 | linuxaio_ctx = 0; |
450 | linuxaio_ctx = 0; |
439 | return evsys_io_setup (linuxaio_nr_events (EV_A), &linuxaio_ctx); |
451 | return evsys_io_setup (linuxaio_nr_events (EV_A), &linuxaio_ctx); |
440 | } |
452 | } |
… | |
… | |
451 | /* which allows us to pinpoint the erroneous iocb */ |
463 | /* which allows us to pinpoint the erroneous iocb */ |
452 | for (submitted = 0; submitted < linuxaio_submitcnt; ) |
464 | for (submitted = 0; submitted < linuxaio_submitcnt; ) |
453 | { |
465 | { |
454 | int res = evsys_io_submit (linuxaio_ctx, linuxaio_submitcnt - submitted, linuxaio_submits + submitted); |
466 | int res = evsys_io_submit (linuxaio_ctx, linuxaio_submitcnt - submitted, linuxaio_submits + submitted); |
455 | |
467 | |
456 | if (expect_false (res < 0)) |
468 | if (ecb_expect_false (res < 0)) |
457 | if (errno == EINVAL) |
469 | if (errno == EINVAL) |
458 | { |
470 | { |
459 | /* This happens for unsupported fds, officially, but in my testing, |
471 | /* This happens for unsupported fds, officially, but in my testing, |
460 | * also randomly happens for supported fds. We fall back to good old |
472 | * also randomly happens for supported fds. We fall back to good old |
461 | * poll() here, under the assumption that this is a very rare case. |
473 | * poll() here, under the assumption that this is a very rare case. |
… | |
… | |
491 | } |
503 | } |
492 | |
504 | |
493 | ++linuxaio_iteration; |
505 | ++linuxaio_iteration; |
494 | if (linuxaio_io_setup (EV_A) < 0) |
506 | if (linuxaio_io_setup (EV_A) < 0) |
495 | { |
507 | { |
|
|
508 | /* TODO: rearm all and recreate epoll backend from scratch */ |
|
|
509 | /* TODO: might be more prudent? */ |
|
|
510 | |
496 | /* to bad, we can't get a new aio context, go 100% epoll */ |
511 | /* to bad, we can't get a new aio context, go 100% epoll */ |
497 | linuxaio_free_iocbp (EV_A); |
512 | linuxaio_free_iocbp (EV_A); |
498 | ev_io_stop (EV_A_ &linuxaio_epoll_w); |
513 | ev_io_stop (EV_A_ &linuxaio_epoll_w); |
499 | ev_ref (EV_A); |
514 | ev_ref (EV_A); |
500 | linuxaio_ctx = 0; |
515 | linuxaio_ctx = 0; |
|
|
516 | |
|
|
517 | backend = EVBACKEND_EPOLL; |
501 | backend_modify = epoll_modify; |
518 | backend_modify = epoll_modify; |
502 | backend_poll = epoll_poll; |
519 | backend_poll = epoll_poll; |
503 | } |
520 | } |
504 | |
521 | |
505 | timeout = 0; |
522 | timeout = 0; |
506 | /* it's easiest to handle this mess in another iteration */ |
523 | /* it's easiest to handle this mess in another iteration */ |
507 | return; |
524 | return; |
508 | } |
525 | } |
509 | else if (errno == EBADF) |
526 | else if (errno == EBADF) |
510 | { |
527 | { |
|
|
528 | assert (("libev: event loop rejected bad fd", errno != EBADF)); |
511 | fd_kill (EV_A_ linuxaio_submits [submitted]->aio_fildes); |
529 | fd_kill (EV_A_ linuxaio_submits [submitted]->aio_fildes); |
512 | |
530 | |
513 | res = 1; /* skip this iocb */ |
531 | res = 1; /* skip this iocb */ |
514 | } |
532 | } |
|
|
533 | else if (errno == EINTR) /* not seen in reality, not documented */ |
|
|
534 | res = 0; /* silently ignore and retry */ |
515 | else |
535 | else |
|
|
536 | { |
516 | ev_syserr ("(libev) linuxaio io_submit"); |
537 | ev_syserr ("(libev) linuxaio io_submit"); |
|
|
538 | res = 0; |
|
|
539 | } |
517 | |
540 | |
518 | submitted += res; |
541 | submitted += res; |
519 | } |
542 | } |
520 | |
543 | |
521 | linuxaio_submitcnt = 0; |
544 | linuxaio_submitcnt = 0; |
… | |
… | |
549 | ev_io_init (EV_A_ &linuxaio_epoll_w, linuxaio_epoll_cb, backend_fd, EV_READ); |
572 | ev_io_init (EV_A_ &linuxaio_epoll_w, linuxaio_epoll_cb, backend_fd, EV_READ); |
550 | ev_set_priority (&linuxaio_epoll_w, EV_MAXPRI); |
573 | ev_set_priority (&linuxaio_epoll_w, EV_MAXPRI); |
551 | ev_io_start (EV_A_ &linuxaio_epoll_w); |
574 | ev_io_start (EV_A_ &linuxaio_epoll_w); |
552 | ev_unref (EV_A); /* watcher should not keep loop alive */ |
575 | ev_unref (EV_A); /* watcher should not keep loop alive */ |
553 | |
576 | |
554 | backend_modify = linuxaio_modify; |
577 | backend_modify = linuxaio_modify; |
555 | backend_poll = linuxaio_poll; |
578 | backend_poll = linuxaio_poll; |
556 | |
579 | |
557 | linuxaio_iocbpmax = 0; |
580 | linuxaio_iocbpmax = 0; |
558 | linuxaio_iocbps = 0; |
581 | linuxaio_iocbps = 0; |
559 | |
582 | |
560 | linuxaio_submits = 0; |
583 | linuxaio_submits = 0; |
… | |
… | |
568 | void |
591 | void |
569 | linuxaio_destroy (EV_P) |
592 | linuxaio_destroy (EV_P) |
570 | { |
593 | { |
571 | epoll_destroy (EV_A); |
594 | epoll_destroy (EV_A); |
572 | linuxaio_free_iocbp (EV_A); |
595 | linuxaio_free_iocbp (EV_A); |
573 | evsys_io_destroy (linuxaio_ctx); |
596 | evsys_io_destroy (linuxaio_ctx); /* fails in child, aio context is destroyed */ |
574 | } |
597 | } |
575 | |
598 | |
576 | inline_size |
599 | ecb_cold |
577 | void |
600 | static void |
578 | linuxaio_fork (EV_P) |
601 | linuxaio_fork (EV_P) |
579 | { |
602 | { |
580 | /* this frees all iocbs, which is very heavy-handed */ |
|
|
581 | linuxaio_destroy (EV_A); |
|
|
582 | linuxaio_submitcnt = 0; /* all pointers were invalidated */ |
603 | linuxaio_submitcnt = 0; /* all pointers were invalidated */ |
|
|
604 | linuxaio_free_iocbp (EV_A); /* this frees all iocbs, which is very heavy-handed */ |
|
|
605 | evsys_io_destroy (linuxaio_ctx); /* fails in child, aio context is destroyed */ |
583 | |
606 | |
584 | linuxaio_iteration = 0; /* we start over in the child */ |
607 | linuxaio_iteration = 0; /* we start over in the child */ |
585 | |
608 | |
586 | while (linuxaio_io_setup (EV_A) < 0) |
609 | while (linuxaio_io_setup (EV_A) < 0) |
587 | ev_syserr ("(libev) linuxaio io_setup"); |
610 | ev_syserr ("(libev) linuxaio io_setup"); |
588 | |
611 | |
|
|
612 | /* forking epoll should also effectively unregister all fds from the backend */ |
589 | epoll_fork (EV_A); |
613 | epoll_fork (EV_A); |
|
|
614 | /* epoll_fork already did this. hopefully */ |
|
|
615 | /*fd_rearm_all (EV_A);*/ |
590 | |
616 | |
591 | ev_io_stop (EV_A_ &linuxaio_epoll_w); |
617 | ev_io_stop (EV_A_ &linuxaio_epoll_w); |
592 | ev_io_set (EV_A_ &linuxaio_epoll_w, backend_fd, EV_READ); |
618 | ev_io_set (EV_A_ &linuxaio_epoll_w, backend_fd, EV_READ); |
593 | ev_io_start (EV_A_ &linuxaio_epoll_w); |
619 | ev_io_start (EV_A_ &linuxaio_epoll_w); |
594 | |
|
|
595 | /* epoll_fork already did this. hopefully */ |
|
|
596 | /*fd_rearm_all (EV_A);*/ |
|
|
597 | } |
620 | } |
598 | |
621 | |