… | |
… | |
65 | * of timeouts later :). |
65 | * of timeouts later :). |
66 | * h) initially there was a hardcoded limit of 4096 outstanding events. |
66 | * h) initially there was a hardcoded limit of 4096 outstanding events. |
67 | * later versions not only bump this to 32k, but also can handle |
67 | * later versions not only bump this to 32k, but also can handle |
68 | * an unlimited amount of events, so this only affects the batch size. |
68 | * an unlimited amount of events, so this only affects the batch size. |
69 | * i) unlike linux aio, you *can* register more then the limit |
69 | * i) unlike linux aio, you *can* register more then the limit |
70 | * of fd events. while early verisons of io_uring signalled an overflow |
70 | * of fd events. while early versions of io_uring signalled an overflow |
71 | * and you ended up getting wet. 5.5+ does not do this anymore. |
71 | * and you ended up getting wet. 5.5+ does not do this anymore. |
72 | * j) but, oh my! it had exactly the same bugs as the linux aio backend, |
72 | * j) but, oh my! it had exactly the same bugs as the linux aio backend, |
73 | * where some undocumented poll combinations just fail. fortunately, |
73 | * where some undocumented poll combinations just fail. fortunately, |
74 | * after finally reaching the author, he was more than willing to fix |
74 | * after finally reaching the author, he was more than willing to fix |
75 | * this probably in 5.6+. |
75 | * this probably in 5.6+. |
… | |
… | |
320 | |
320 | |
321 | return EV_SQES + (tail & EV_SQ_VAR (ring_mask)); |
321 | return EV_SQES + (tail & EV_SQ_VAR (ring_mask)); |
322 | } |
322 | } |
323 | |
323 | |
324 | inline_size |
324 | inline_size |
325 | struct io_uring_sqe * |
325 | void |
326 | iouring_sqe_submit (EV_P_ struct io_uring_sqe *sqe) |
326 | iouring_sqe_submit (EV_P_ struct io_uring_sqe *sqe) |
327 | { |
327 | { |
328 | unsigned idx = sqe - EV_SQES; |
328 | unsigned idx = sqe - EV_SQES; |
329 | |
|
|
330 | printf ("submit idx %d, op %d, fd %d, us5r %p, poll %d\n", idx, sqe->opcode, sqe->fd, sqe->user_data, sqe->poll_events); |
|
|
331 | |
329 | |
332 | EV_SQ_ARRAY [idx] = idx; |
330 | EV_SQ_ARRAY [idx] = idx; |
333 | ECB_MEMORY_FENCE_RELEASE; |
331 | ECB_MEMORY_FENCE_RELEASE; |
334 | ++EV_SQ_VAR (tail); |
332 | ++EV_SQ_VAR (tail); |
335 | /*ECB_MEMORY_FENCE_RELEASE; /* for the time being we assume this is not needed */ |
333 | /*ECB_MEMORY_FENCE_RELEASE; /* for the time being we assume this is not needed */ |
… | |
… | |
348 | iouring_tfd_to = EV_TSTAMP_HUGE; |
346 | iouring_tfd_to = EV_TSTAMP_HUGE; |
349 | } |
347 | } |
350 | |
348 | |
351 | /* called for full and partial cleanup */ |
349 | /* called for full and partial cleanup */ |
352 | ecb_cold |
350 | ecb_cold |
353 | static int |
351 | static void |
354 | iouring_internal_destroy (EV_P) |
352 | iouring_internal_destroy (EV_P) |
355 | { |
353 | { |
356 | close (iouring_tfd); |
354 | close (iouring_tfd); |
357 | close (iouring_fd); |
355 | close (iouring_fd); |
358 | |
356 | |