… | |
… | |
127 | |
127 | |
128 | inline_size |
128 | inline_size |
129 | void |
129 | void |
130 | linuxaio_array_needsize_iocbp (ANIOCBP *base, int offset, int count) |
130 | linuxaio_array_needsize_iocbp (ANIOCBP *base, int offset, int count) |
131 | { |
131 | { |
132 | /* TODO: quite the overhead to allocate every iocb separately, maybe use our own alocator? */ |
|
|
133 | while (count--) |
132 | while (count--) |
134 | { |
133 | { |
|
|
134 | /* TODO: quite the overhead to allocate every iocb separately, maybe use our own alocator? */ |
135 | ANIOCBP iocb = (ANIOCBP)ev_malloc (sizeof (*iocb)); |
135 | ANIOCBP iocb = (ANIOCBP)ev_malloc (sizeof (*iocb)); |
136 | |
136 | |
137 | /* full zero initialise is probably not required at the moment, but |
137 | /* full zero initialise is probably not required at the moment, but |
138 | * this is not well documented, so we better do it. |
138 | * this is not well documented, so we better do it. |
139 | */ |
139 | */ |
… | |
… | |
299 | { |
299 | { |
300 | linuxaio_parse_events (EV_A_ ring->io_events + head, ring->nr - head); |
300 | linuxaio_parse_events (EV_A_ ring->io_events + head, ring->nr - head); |
301 | linuxaio_parse_events (EV_A_ ring->io_events, tail); |
301 | linuxaio_parse_events (EV_A_ ring->io_events, tail); |
302 | } |
302 | } |
303 | |
303 | |
304 | /* TODO: we only need a compiler barrier here, not a read fence */ |
|
|
305 | ECB_MEMORY_FENCE_RELEASE; |
304 | ECB_MEMORY_FENCE_RELAXED; |
306 | /* as an extension to C, we hope that the volatile will make this atomic and once-only */ |
305 | /* as an extension to C, we hope that the volatile will make this atomic and once-only */ |
307 | *(volatile unsigned *)&ring->head = tail; |
306 | *(volatile unsigned *)&ring->head = tail; |
308 | /* make sure kernel can see our new head value - probably not required */ |
307 | /* make sure kernel can see our new head value - probably not required */ |
309 | ECB_MEMORY_FENCE_RELEASE; |
308 | ECB_MEMORY_FENCE_RELEASE; |
310 | |
309 | |