… | |
… | |
116 | VARx(int, linuxaio_submitcnt) |
116 | VARx(int, linuxaio_submitcnt) |
117 | VARx(int, linuxaio_submitmax) |
117 | VARx(int, linuxaio_submitmax) |
118 | VARx(ev_io, linuxaio_epoll_w) |
118 | VARx(ev_io, linuxaio_epoll_w) |
119 | #endif |
119 | #endif |
120 | |
120 | |
|
|
121 | #if EV_USE_IOURING || EV_GENWRAP |
|
|
122 | VARx(int, iouring_fd) |
|
|
123 | VARx(unsigned, iouring_to_submit); |
|
|
124 | VARx(int, iouring_entries) |
|
|
125 | VARx(int, iouring_max_entries) |
|
|
126 | VARx(void *, iouring_sq_ring) |
|
|
127 | VARx(void *, iouring_cq_ring) |
|
|
128 | VARx(void *, iouring_sqes) |
|
|
129 | VARx(uint32_t, iouring_sq_ring_size) |
|
|
130 | VARx(uint32_t, iouring_cq_ring_size) |
|
|
131 | VARx(uint32_t, iouring_sqes_size) |
|
|
132 | VARx(uint32_t, iouring_sq_head) |
|
|
133 | VARx(uint32_t, iouring_sq_tail) |
|
|
134 | VARx(uint32_t, iouring_sq_ring_mask) |
|
|
135 | VARx(uint32_t, iouring_sq_ring_entries) |
|
|
136 | VARx(uint32_t, iouring_sq_flags) |
|
|
137 | VARx(uint32_t, iouring_sq_dropped) |
|
|
138 | VARx(uint32_t, iouring_sq_array) |
|
|
139 | VARx(uint32_t, iouring_cq_head) |
|
|
140 | VARx(uint32_t, iouring_cq_tail) |
|
|
141 | VARx(uint32_t, iouring_cq_ring_mask) |
|
|
142 | VARx(uint32_t, iouring_cq_ring_entries) |
|
|
143 | VARx(uint32_t, iouring_cq_overflow) |
|
|
144 | VARx(uint32_t, iouring_cq_cqes) |
|
|
145 | VARx(ev_tstamp, iouring_tfd_to) |
|
|
146 | VARx(int, iouring_tfd) |
|
|
147 | VARx(ev_io, iouring_tfd_w) |
|
|
148 | VARx(ev_io, iouring_epoll_w) |
|
|
149 | #endif |
|
|
150 | |
121 | #if EV_USE_KQUEUE || EV_GENWRAP |
151 | #if EV_USE_KQUEUE || EV_GENWRAP |
122 | VARx(pid_t, kqueue_fd_pid) |
152 | VARx(pid_t, kqueue_fd_pid) |
123 | VARx(struct kevent *, kqueue_changes) |
153 | VARx(struct kevent *, kqueue_changes) |
124 | VARx(int, kqueue_changemax) |
154 | VARx(int, kqueue_changemax) |
125 | VARx(int, kqueue_changecnt) |
155 | VARx(int, kqueue_changecnt) |