… | |
… | |
117 | # define EV_USE_EPOLL 0 |
117 | # define EV_USE_EPOLL 0 |
118 | # endif |
118 | # endif |
119 | |
119 | |
120 | # if HAVE_LINUX_AIO_ABI_H |
120 | # if HAVE_LINUX_AIO_ABI_H |
121 | # ifndef EV_USE_LINUXAIO |
121 | # ifndef EV_USE_LINUXAIO |
122 | # define EV_USE_LINUXAIO EV_FEATURE_BACKENDS |
122 | # define EV_USE_LINUXAIO 0 /* was: EV_FEATURE_BACKENDS, always off by default */ |
123 | # endif |
123 | # endif |
124 | # else |
124 | # else |
125 | # undef EV_USE_LINUXAIO |
125 | # undef EV_USE_LINUXAIO |
126 | # define EV_USE_LINUXAIO 0 |
126 | # define EV_USE_LINUXAIO 0 |
127 | # endif |
127 | # endif |
… | |
… | |
344 | # define EV_USE_PORT 0 |
344 | # define EV_USE_PORT 0 |
345 | #endif |
345 | #endif |
346 | |
346 | |
347 | #ifndef EV_USE_LINUXAIO |
347 | #ifndef EV_USE_LINUXAIO |
348 | # if __linux /* libev currently assumes linux/aio_abi.h is always available on linux */ |
348 | # if __linux /* libev currently assumes linux/aio_abi.h is always available on linux */ |
349 | # define EV_USE_LINUXAIO 1 |
349 | # define EV_USE_LINUXAIO 0 /* was: 1, always off by default */ |
350 | # else |
350 | # else |
351 | # define EV_USE_LINUXAIO 0 |
351 | # define EV_USE_LINUXAIO 0 |
352 | # endif |
352 | # endif |
353 | #endif |
353 | #endif |
354 | |
354 | |
… | |
… | |
1658 | * our syscalls return < 0, not == -1, on error. which is good |
1658 | * our syscalls return < 0, not == -1, on error. which is good |
1659 | * enough for linux aio. |
1659 | * enough for linux aio. |
1660 | * TODO: arm is also common nowadays, maybe even mips and x86 |
1660 | * TODO: arm is also common nowadays, maybe even mips and x86 |
1661 | * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove... |
1661 | * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove... |
1662 | */ |
1662 | */ |
1663 | #if __GNUC__ && __linux && ECB_AMD64 && !defined __OPTIMIZE_SIZE__ |
1663 | #if __GNUC__ && __linux && ECB_AMD64 && !EV_FEATURE_CODE |
1664 | /* the costly errno access probably kills this for size optimisation */ |
1664 | /* the costly errno access probably kills this for size optimisation */ |
1665 | |
1665 | |
1666 | #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5,arg6) \ |
1666 | #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5,arg6) \ |
1667 | ({ \ |
1667 | ({ \ |
1668 | long res; \ |
1668 | long res; \ |
… | |
… | |
2262 | inline_size void |
2262 | inline_size void |
2263 | fd_reify (EV_P) |
2263 | fd_reify (EV_P) |
2264 | { |
2264 | { |
2265 | int i; |
2265 | int i; |
2266 | |
2266 | |
|
|
2267 | /* most backends do not modify the fdchanges list in backend_modfiy. |
|
|
2268 | * except io_uring, which has fixed-size buffers which might force us |
|
|
2269 | * to handle events in backend_modify, causing fdchangesd to be amended, |
|
|
2270 | * which could result in an endless loop. |
|
|
2271 | * to avoid this, we do not dynamically handle fds that were added |
|
|
2272 | * during fd_reify. that menas thast for those backends, fdchangecnt |
|
|
2273 | * might be non-zero during poll, which must cause them to not block. |
|
|
2274 | * to not put too much of a burden on other backends, this detail |
|
|
2275 | * needs to be handled in the backend. |
|
|
2276 | */ |
|
|
2277 | int changecnt = fdchangecnt; |
|
|
2278 | |
2267 | #if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP |
2279 | #if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP |
2268 | for (i = 0; i < fdchangecnt; ++i) |
2280 | for (i = 0; i < changecnt; ++i) |
2269 | { |
2281 | { |
2270 | int fd = fdchanges [i]; |
2282 | int fd = fdchanges [i]; |
2271 | ANFD *anfd = anfds + fd; |
2283 | ANFD *anfd = anfds + fd; |
2272 | |
2284 | |
2273 | if (anfd->reify & EV__IOFDSET && anfd->head) |
2285 | if (anfd->reify & EV__IOFDSET && anfd->head) |
… | |
… | |
2287 | } |
2299 | } |
2288 | } |
2300 | } |
2289 | } |
2301 | } |
2290 | #endif |
2302 | #endif |
2291 | |
2303 | |
2292 | for (i = 0; i < fdchangecnt; ++i) |
2304 | for (i = 0; i < changecnt; ++i) |
2293 | { |
2305 | { |
2294 | int fd = fdchanges [i]; |
2306 | int fd = fdchanges [i]; |
2295 | ANFD *anfd = anfds + fd; |
2307 | ANFD *anfd = anfds + fd; |
2296 | ev_io *w; |
2308 | ev_io *w; |
2297 | |
2309 | |
… | |
… | |
2313 | |
2325 | |
2314 | if (o_reify & EV__IOFDSET) |
2326 | if (o_reify & EV__IOFDSET) |
2315 | backend_modify (EV_A_ fd, o_events, anfd->events); |
2327 | backend_modify (EV_A_ fd, o_events, anfd->events); |
2316 | } |
2328 | } |
2317 | |
2329 | |
|
|
2330 | /* normally, fdchangecnt hasn't changed. if it has, then new fds have been added. |
|
|
2331 | * this is a rare case (see beginning comment in this function), so we copy them to the |
|
|
2332 | * front and hope the backend handles this case. |
|
|
2333 | */ |
|
|
2334 | if (ecb_expect_false (fdchangecnt != changecnt)) |
|
|
2335 | memmove (fdchanges, fdchanges + changecnt, (fdchangecnt - changecnt) * sizeof (*fdchanges)); |
|
|
2336 | |
2318 | fdchangecnt = 0; |
2337 | fdchangecnt -= changecnt; |
2319 | } |
2338 | } |
2320 | |
2339 | |
2321 | /* something about the given fd changed */ |
2340 | /* something about the given fd changed */ |
2322 | inline_size |
2341 | inline_size |
2323 | void |
2342 | void |
… | |
… | |
2561 | upheap (heap, i + HEAP0); |
2580 | upheap (heap, i + HEAP0); |
2562 | } |
2581 | } |
2563 | |
2582 | |
2564 | /*****************************************************************************/ |
2583 | /*****************************************************************************/ |
2565 | |
2584 | |
2566 | /* associate signal watchers to a signal signal */ |
2585 | /* associate signal watchers to a signal */ |
2567 | typedef struct |
2586 | typedef struct |
2568 | { |
2587 | { |
2569 | EV_ATOMIC_T pending; |
2588 | EV_ATOMIC_T pending; |
2570 | #if EV_MULTIPLICITY |
2589 | #if EV_MULTIPLICITY |
2571 | EV_P; |
2590 | EV_P; |
… | |
… | |
2913 | if (timerfd >= 0) |
2932 | if (timerfd >= 0) |
2914 | { |
2933 | { |
2915 | fd_intern (timerfd); /* just to be sure */ |
2934 | fd_intern (timerfd); /* just to be sure */ |
2916 | |
2935 | |
2917 | ev_io_init (&timerfd_w, timerfdcb, timerfd, EV_READ); |
2936 | ev_io_init (&timerfd_w, timerfdcb, timerfd, EV_READ); |
2918 | ev_set_priority (&sigfd_w, EV_MINPRI); |
2937 | ev_set_priority (&timerfd_w, EV_MINPRI); |
2919 | ev_io_start (EV_A_ &timerfd_w); |
2938 | ev_io_start (EV_A_ &timerfd_w); |
2920 | ev_unref (EV_A); /* watcher should not keep loop alive */ |
2939 | ev_unref (EV_A); /* watcher should not keep loop alive */ |
2921 | |
2940 | |
2922 | /* (re-) arm timer */ |
2941 | /* (re-) arm timer */ |
2923 | timerfdcb (EV_A_ 0, 0); |
2942 | timerfdcb (EV_A_ 0, 0); |
… | |
… | |
2982 | unsigned int |
3001 | unsigned int |
2983 | ev_supported_backends (void) EV_NOEXCEPT |
3002 | ev_supported_backends (void) EV_NOEXCEPT |
2984 | { |
3003 | { |
2985 | unsigned int flags = 0; |
3004 | unsigned int flags = 0; |
2986 | |
3005 | |
2987 | if (EV_USE_PORT ) flags |= EVBACKEND_PORT; |
3006 | if (EV_USE_PORT ) flags |= EVBACKEND_PORT; |
2988 | if (EV_USE_KQUEUE ) flags |= EVBACKEND_KQUEUE; |
3007 | if (EV_USE_KQUEUE ) flags |= EVBACKEND_KQUEUE; |
2989 | if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL; |
3008 | if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL; |
2990 | if (EV_USE_LINUXAIO) flags |= EVBACKEND_LINUXAIO; |
3009 | if (EV_USE_LINUXAIO ) flags |= EVBACKEND_LINUXAIO; |
2991 | if (EV_USE_IOURING ) flags |= EVBACKEND_IOURING; |
3010 | if (EV_USE_IOURING && ev_linux_version () >= 0x050601) flags |= EVBACKEND_IOURING; /* 5.6.1+ */ |
2992 | if (EV_USE_POLL ) flags |= EVBACKEND_POLL; |
3011 | if (EV_USE_POLL ) flags |= EVBACKEND_POLL; |
2993 | if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT; |
3012 | if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT; |
2994 | |
3013 | |
2995 | return flags; |
3014 | return flags; |
2996 | } |
3015 | } |
2997 | |
3016 | |
2998 | ecb_cold |
3017 | ecb_cold |
2999 | unsigned int |
3018 | unsigned int |
… | |
… | |
3029 | |
3048 | |
3030 | ecb_cold |
3049 | ecb_cold |
3031 | unsigned int |
3050 | unsigned int |
3032 | ev_embeddable_backends (void) EV_NOEXCEPT |
3051 | ev_embeddable_backends (void) EV_NOEXCEPT |
3033 | { |
3052 | { |
3034 | int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT; |
3053 | int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT | EVBACKEND_IOURING; |
3035 | |
3054 | |
3036 | /* epoll embeddability broken on all linux versions up to at least 2.6.23 */ |
3055 | /* epoll embeddability broken on all linux versions up to at least 2.6.23 */ |
3037 | if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */ |
3056 | if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */ |
3038 | flags &= ~EVBACKEND_EPOLL; |
3057 | flags &= ~EVBACKEND_EPOLL; |
3039 | |
3058 | |
3040 | /* EVBACKEND_LINUXAIO is theoretically embeddable, but suffers from a performance overhead */ |
3059 | /* EVBACKEND_LINUXAIO is theoretically embeddable, but suffers from a performance overhead */ |
3041 | |
|
|
3042 | /* EVBACKEND_IOURING is practically embeddable, but the current implementation is not |
|
|
3043 | * because our backend_fd is the epoll fd we need as fallback. |
|
|
3044 | * if the kernel ever is fixed, this might change... |
|
|
3045 | */ |
|
|
3046 | |
3060 | |
3047 | return flags; |
3061 | return flags; |
3048 | } |
3062 | } |
3049 | |
3063 | |
3050 | unsigned int |
3064 | unsigned int |
… | |
… | |
5076 | ev_run (EV_A_ EVRUN_NOWAIT); |
5090 | ev_run (EV_A_ EVRUN_NOWAIT); |
5077 | } |
5091 | } |
5078 | } |
5092 | } |
5079 | } |
5093 | } |
5080 | |
5094 | |
|
|
5095 | #if EV_FORK_ENABLE |
5081 | static void |
5096 | static void |
5082 | embed_fork_cb (EV_P_ ev_fork *fork_w, int revents) |
5097 | embed_fork_cb (EV_P_ ev_fork *fork_w, int revents) |
5083 | { |
5098 | { |
5084 | ev_embed *w = (ev_embed *)(((char *)fork_w) - offsetof (ev_embed, fork)); |
5099 | ev_embed *w = (ev_embed *)(((char *)fork_w) - offsetof (ev_embed, fork)); |
5085 | |
5100 | |
… | |
… | |
5092 | ev_run (EV_A_ EVRUN_NOWAIT); |
5107 | ev_run (EV_A_ EVRUN_NOWAIT); |
5093 | } |
5108 | } |
5094 | |
5109 | |
5095 | ev_embed_start (EV_A_ w); |
5110 | ev_embed_start (EV_A_ w); |
5096 | } |
5111 | } |
|
|
5112 | #endif |
5097 | |
5113 | |
5098 | #if 0 |
5114 | #if 0 |
5099 | static void |
5115 | static void |
5100 | embed_idle_cb (EV_P_ ev_idle *idle, int revents) |
5116 | embed_idle_cb (EV_P_ ev_idle *idle, int revents) |
5101 | { |
5117 | { |
… | |
… | |
5122 | |
5138 | |
5123 | ev_prepare_init (&w->prepare, embed_prepare_cb); |
5139 | ev_prepare_init (&w->prepare, embed_prepare_cb); |
5124 | ev_set_priority (&w->prepare, EV_MINPRI); |
5140 | ev_set_priority (&w->prepare, EV_MINPRI); |
5125 | ev_prepare_start (EV_A_ &w->prepare); |
5141 | ev_prepare_start (EV_A_ &w->prepare); |
5126 | |
5142 | |
|
|
5143 | #if EV_FORK_ENABLE |
5127 | ev_fork_init (&w->fork, embed_fork_cb); |
5144 | ev_fork_init (&w->fork, embed_fork_cb); |
5128 | ev_fork_start (EV_A_ &w->fork); |
5145 | ev_fork_start (EV_A_ &w->fork); |
|
|
5146 | #endif |
5129 | |
5147 | |
5130 | /*ev_idle_init (&w->idle, e,bed_idle_cb);*/ |
5148 | /*ev_idle_init (&w->idle, e,bed_idle_cb);*/ |
5131 | |
5149 | |
5132 | ev_start (EV_A_ (W)w, 1); |
5150 | ev_start (EV_A_ (W)w, 1); |
5133 | |
5151 | |
… | |
… | |
5143 | |
5161 | |
5144 | EV_FREQUENT_CHECK; |
5162 | EV_FREQUENT_CHECK; |
5145 | |
5163 | |
5146 | ev_io_stop (EV_A_ &w->io); |
5164 | ev_io_stop (EV_A_ &w->io); |
5147 | ev_prepare_stop (EV_A_ &w->prepare); |
5165 | ev_prepare_stop (EV_A_ &w->prepare); |
|
|
5166 | #if EV_FORK_ENABLE |
5148 | ev_fork_stop (EV_A_ &w->fork); |
5167 | ev_fork_stop (EV_A_ &w->fork); |
|
|
5168 | #endif |
5149 | |
5169 | |
5150 | ev_stop (EV_A_ (W)w); |
5170 | ev_stop (EV_A_ (W)w); |
5151 | |
5171 | |
5152 | EV_FREQUENT_CHECK; |
5172 | EV_FREQUENT_CHECK; |
5153 | } |
5173 | } |