… | |
… | |
320 | |
320 | |
321 | return EV_SQES + (tail & EV_SQ_VAR (ring_mask)); |
321 | return EV_SQES + (tail & EV_SQ_VAR (ring_mask)); |
322 | } |
322 | } |
323 | |
323 | |
324 | inline_size |
324 | inline_size |
325 | struct io_uring_sqe * |
325 | void |
326 | iouring_sqe_submit (EV_P_ struct io_uring_sqe *sqe) |
326 | iouring_sqe_submit (EV_P_ struct io_uring_sqe *sqe) |
327 | { |
327 | { |
328 | unsigned idx = sqe - EV_SQES; |
328 | unsigned idx = sqe - EV_SQES; |
329 | |
|
|
330 | printf ("submit idx %d, op %d, fd %d, us5r %p, poll %d\n", idx, sqe->opcode, sqe->fd, sqe->user_data, sqe->poll_events); |
|
|
331 | |
329 | |
332 | EV_SQ_ARRAY [idx] = idx; |
330 | EV_SQ_ARRAY [idx] = idx; |
333 | ECB_MEMORY_FENCE_RELEASE; |
331 | ECB_MEMORY_FENCE_RELEASE; |
334 | ++EV_SQ_VAR (tail); |
332 | ++EV_SQ_VAR (tail); |
335 | /*ECB_MEMORY_FENCE_RELEASE; /* for the time being we assume this is not needed */ |
333 | /*ECB_MEMORY_FENCE_RELEASE; /* for the time being we assume this is not needed */ |
… | |
… | |
348 | iouring_tfd_to = EV_TSTAMP_HUGE; |
346 | iouring_tfd_to = EV_TSTAMP_HUGE; |
349 | } |
347 | } |
350 | |
348 | |
351 | /* called for full and partial cleanup */ |
349 | /* called for full and partial cleanup */ |
352 | ecb_cold |
350 | ecb_cold |
353 | static int |
351 | static void |
354 | iouring_internal_destroy (EV_P) |
352 | iouring_internal_destroy (EV_P) |
355 | { |
353 | { |
356 | close (iouring_tfd); |
354 | close (iouring_tfd); |
357 | close (iouring_fd); |
355 | close (iouring_fd); |
358 | |
356 | |