ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev.c
(Generate patch)

Comparing libev/ev.c (file contents):
Revision 1.505 by root, Wed Jul 10 14:25:35 2019 UTC vs.
Revision 1.521 by root, Sat Dec 28 07:47:35 2019 UTC

117# define EV_USE_EPOLL 0 117# define EV_USE_EPOLL 0
118# endif 118# endif
119 119
120# if HAVE_LINUX_AIO_ABI_H 120# if HAVE_LINUX_AIO_ABI_H
121# ifndef EV_USE_LINUXAIO 121# ifndef EV_USE_LINUXAIO
122# define EV_USE_LINUXAIO EV_FEATURE_BACKENDS 122# define EV_USE_LINUXAIO 0 /* was: EV_FEATURE_BACKENDS, always off by default */
123# endif 123# endif
124# else 124# else
125# undef EV_USE_LINUXAIO 125# undef EV_USE_LINUXAIO
126# define EV_USE_LINUXAIO 0 126# define EV_USE_LINUXAIO 0
127# endif 127# endif
128 128
129# if HAVE_LINUX_FS_H && HAVE_SYS_TIMERFD_H && HAVE_KERNEL_RWF_T
130# ifndef EV_USE_IOURING
131# define EV_USE_IOURING EV_FEATURE_BACKENDS
132# endif
133# else
134# undef EV_USE_IOURING
135# define EV_USE_IOURING 0
136# endif
137
129# if HAVE_KQUEUE && HAVE_SYS_EVENT_H 138# if HAVE_KQUEUE && HAVE_SYS_EVENT_H
130# ifndef EV_USE_KQUEUE 139# ifndef EV_USE_KQUEUE
131# define EV_USE_KQUEUE EV_FEATURE_BACKENDS 140# define EV_USE_KQUEUE EV_FEATURE_BACKENDS
132# endif 141# endif
133# else 142# else
168# endif 177# endif
169# else 178# else
170# undef EV_USE_EVENTFD 179# undef EV_USE_EVENTFD
171# define EV_USE_EVENTFD 0 180# define EV_USE_EVENTFD 0
172# endif 181# endif
173 182
183# if HAVE_SYS_TIMERFD_H
184# ifndef EV_USE_TIMERFD
185# define EV_USE_TIMERFD EV_FEATURE_OS
186# endif
187# else
188# undef EV_USE_TIMERFD
189# define EV_USE_TIMERFD 0
190# endif
191
174#endif 192#endif
175 193
176/* OS X, in its infinite idiocy, actually HARDCODES 194/* OS X, in its infinite idiocy, actually HARDCODES
177 * a limit of 1024 into their select. Where people have brains, 195 * a limit of 1024 into their select. Where people have brains,
178 * OS X engineers apparently have a vacuum. Or maybe they were 196 * OS X engineers apparently have a vacuum. Or maybe they were
326# define EV_USE_PORT 0 344# define EV_USE_PORT 0
327#endif 345#endif
328 346
329#ifndef EV_USE_LINUXAIO 347#ifndef EV_USE_LINUXAIO
330# if __linux /* libev currently assumes linux/aio_abi.h is always available on linux */ 348# if __linux /* libev currently assumes linux/aio_abi.h is always available on linux */
331# define EV_USE_LINUXAIO 1 349# define EV_USE_LINUXAIO 0 /* was: 1, always off by default */
332# else 350# else
333# define EV_USE_LINUXAIO 0 351# define EV_USE_LINUXAIO 0
334# endif 352# endif
335#endif 353#endif
336 354
337#ifndef EV_USE_IOURING 355#ifndef EV_USE_IOURING
338# if __linux 356# if __linux /* later checks might disable again */
339# define EV_USE_IOURING 0 357# define EV_USE_IOURING 1
340# else 358# else
341# define EV_USE_IOURING 0 359# define EV_USE_IOURING 0
342# endif 360# endif
343#endif 361#endif
344 362
369#ifndef EV_USE_SIGNALFD 387#ifndef EV_USE_SIGNALFD
370# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7)) 388# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7))
371# define EV_USE_SIGNALFD EV_FEATURE_OS 389# define EV_USE_SIGNALFD EV_FEATURE_OS
372# else 390# else
373# define EV_USE_SIGNALFD 0 391# define EV_USE_SIGNALFD 0
392# endif
393#endif
394
395#ifndef EV_USE_TIMERFD
396# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 8))
397# define EV_USE_TIMERFD EV_FEATURE_OS
398# else
399# define EV_USE_TIMERFD 0
374# endif 400# endif
375#endif 401#endif
376 402
377#if 0 /* debugging */ 403#if 0 /* debugging */
378# define EV_VERIFY 3 404# define EV_VERIFY 3
438#if !EV_STAT_ENABLE 464#if !EV_STAT_ENABLE
439# undef EV_USE_INOTIFY 465# undef EV_USE_INOTIFY
440# define EV_USE_INOTIFY 0 466# define EV_USE_INOTIFY 0
441#endif 467#endif
442 468
469#if __linux && EV_USE_IOURING
470# include <linux/version.h>
471# if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)
472# undef EV_USE_IOURING
473# define EV_USE_IOURING 0
474# endif
475#endif
476
443#if !EV_USE_NANOSLEEP 477#if !EV_USE_NANOSLEEP
444/* hp-ux has it in sys/time.h, which we unconditionally include above */ 478/* hp-ux has it in sys/time.h, which we unconditionally include above */
445# if !defined _WIN32 && !defined __hpux 479# if !defined _WIN32 && !defined __hpux
446# include <sys/select.h> 480# include <sys/select.h>
447# endif 481# endif
481# define EV_USE_INOTIFY 0 515# define EV_USE_INOTIFY 0
482# endif 516# endif
483#endif 517#endif
484 518
485#if EV_USE_EVENTFD 519#if EV_USE_EVENTFD
486/* our minimum requirement is glibc 2.7 which has the stub, but not the header */ 520/* our minimum requirement is glibc 2.7 which has the stub, but not the full header */
487# include <stdint.h> 521# include <stdint.h>
488# ifndef EFD_NONBLOCK 522# ifndef EFD_NONBLOCK
489# define EFD_NONBLOCK O_NONBLOCK 523# define EFD_NONBLOCK O_NONBLOCK
490# endif 524# endif
491# ifndef EFD_CLOEXEC 525# ifndef EFD_CLOEXEC
497# endif 531# endif
498EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags); 532EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags);
499#endif 533#endif
500 534
501#if EV_USE_SIGNALFD 535#if EV_USE_SIGNALFD
502/* our minimum requirement is glibc 2.7 which has the stub, but not the header */ 536/* our minimum requirement is glibc 2.7 which has the stub, but not the full header */
503# include <stdint.h> 537# include <stdint.h>
504# ifndef SFD_NONBLOCK 538# ifndef SFD_NONBLOCK
505# define SFD_NONBLOCK O_NONBLOCK 539# define SFD_NONBLOCK O_NONBLOCK
506# endif 540# endif
507# ifndef SFD_CLOEXEC 541# ifndef SFD_CLOEXEC
509# define SFD_CLOEXEC O_CLOEXEC 543# define SFD_CLOEXEC O_CLOEXEC
510# else 544# else
511# define SFD_CLOEXEC 02000000 545# define SFD_CLOEXEC 02000000
512# endif 546# endif
513# endif 547# endif
514EV_CPP (extern "C") int signalfd (int fd, const sigset_t *mask, int flags); 548EV_CPP (extern "C") int (signalfd) (int fd, const sigset_t *mask, int flags);
515 549
516struct signalfd_siginfo 550struct signalfd_siginfo
517{ 551{
518 uint32_t ssi_signo; 552 uint32_t ssi_signo;
519 char pad[128 - sizeof (uint32_t)]; 553 char pad[128 - sizeof (uint32_t)];
520}; 554};
555#endif
556
557/* for timerfd, libev core requires TFD_TIMER_CANCEL_ON_SET &c */
558#if EV_USE_TIMERFD
559# include <sys/timerfd.h>
560/* timerfd is only used for periodics */
561# if !(defined (TFD_TIMER_CANCEL_ON_SET) && defined (TFD_CLOEXEC) && defined (TFD_NONBLOCK)) || !EV_PERIODIC_ENABLE
562# undef EV_USE_TIMERFD
563# define EV_USE_TIMERFD 0
564# endif
521#endif 565#endif
522 566
523/*****************************************************************************/ 567/*****************************************************************************/
524 568
525#if EV_VERIFY >= 3 569#if EV_VERIFY >= 3
544#define EV_TSTAMP_HUGE \ 588#define EV_TSTAMP_HUGE \
545 (sizeof (time_t) >= 8 ? 10000000000000. \ 589 (sizeof (time_t) >= 8 ? 10000000000000. \
546 : 0 < (time_t)4294967295 ? 4294967295. \ 590 : 0 < (time_t)4294967295 ? 4294967295. \
547 : 2147483647.) \ 591 : 2147483647.) \
548 592
593#ifndef EV_TS_CONST
594# define EV_TS_CONST(nv) nv
595# define EV_TS_TO_MSEC(a) a * 1e3 + 0.9999
596# define EV_TS_FROM_USEC(us) us * 1e-6
549#define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0) 597# define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0)
550#define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0) 598# define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0)
551#define EV_TV_GET(tv) ((tv).tv_sec + (tv).tv_usec * 1e6) 599# define EV_TV_GET(tv) ((tv).tv_sec + (tv).tv_usec * 1e-6)
552#define EV_TS_GET(ts) ((ts).tv_sec + (ts).tv_nsec * 1e9) 600# define EV_TS_GET(ts) ((ts).tv_sec + (ts).tv_nsec * 1e-9)
601#endif
553 602
554/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */ 603/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */
555/* ECB.H BEGIN */ 604/* ECB.H BEGIN */
556/* 605/*
557 * libecb - http://software.schmorp.de/pkg/libecb 606 * libecb - http://software.schmorp.de/pkg/libecb
1609 * our syscalls return < 0, not == -1, on error. which is good 1658 * our syscalls return < 0, not == -1, on error. which is good
1610 * enough for linux aio. 1659 * enough for linux aio.
1611 * TODO: arm is also common nowadays, maybe even mips and x86 1660 * TODO: arm is also common nowadays, maybe even mips and x86
1612 * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove... 1661 * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove...
1613 */ 1662 */
1614#if __GNUC__ && __linux && ECB_AMD64 && !defined __OPTIMIZE_SIZE__ 1663#if __GNUC__ && __linux && ECB_AMD64 && !EV_FEATURE_CODE
1615 /* the costly errno access probably kills this for size optimisation */ 1664 /* the costly errno access probably kills this for size optimisation */
1616 1665
1617 #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5,arg6) \ 1666 #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5,arg6) \
1618 ({ \ 1667 ({ \
1619 long res; \ 1668 long res; \
1963 static struct ev_loop default_loop_struct; 2012 static struct ev_loop default_loop_struct;
1964 EV_API_DECL struct ev_loop *ev_default_loop_ptr = 0; /* needs to be initialised to make it a definition despite extern */ 2013 EV_API_DECL struct ev_loop *ev_default_loop_ptr = 0; /* needs to be initialised to make it a definition despite extern */
1965 2014
1966#else 2015#else
1967 2016
1968 EV_API_DECL ev_tstamp ev_rt_now = 0; /* needs to be initialised to make it a definition despite extern */ 2017 EV_API_DECL ev_tstamp ev_rt_now = EV_TS_CONST (0.); /* needs to be initialised to make it a definition despite extern */
1969 #define VAR(name,decl) static decl; 2018 #define VAR(name,decl) static decl;
1970 #include "ev_vars.h" 2019 #include "ev_vars.h"
1971 #undef VAR 2020 #undef VAR
1972 2021
1973 static int ev_default_loop_ptr; 2022 static int ev_default_loop_ptr;
1999 clock_gettime (CLOCK_REALTIME, &ts); 2048 clock_gettime (CLOCK_REALTIME, &ts);
2000 return EV_TS_GET (ts); 2049 return EV_TS_GET (ts);
2001 } 2050 }
2002#endif 2051#endif
2003 2052
2053 {
2004 struct timeval tv; 2054 struct timeval tv;
2005 gettimeofday (&tv, 0); 2055 gettimeofday (&tv, 0);
2006 return EV_TV_GET (tv); 2056 return EV_TV_GET (tv);
2057 }
2007} 2058}
2008#endif 2059#endif
2009 2060
2010inline_size ev_tstamp 2061inline_size ev_tstamp
2011get_clock (void) 2062get_clock (void)
2031#endif 2082#endif
2032 2083
2033void 2084void
2034ev_sleep (ev_tstamp delay) EV_NOEXCEPT 2085ev_sleep (ev_tstamp delay) EV_NOEXCEPT
2035{ 2086{
2036 if (delay > 0.) 2087 if (delay > EV_TS_CONST (0.))
2037 { 2088 {
2038#if EV_USE_NANOSLEEP 2089#if EV_USE_NANOSLEEP
2039 struct timespec ts; 2090 struct timespec ts;
2040 2091
2041 EV_TS_SET (ts, delay); 2092 EV_TS_SET (ts, delay);
2042 nanosleep (&ts, 0); 2093 nanosleep (&ts, 0);
2043#elif defined _WIN32 2094#elif defined _WIN32
2044 /* maybe this should round up, as ms is very low resolution */ 2095 /* maybe this should round up, as ms is very low resolution */
2045 /* compared to select (µs) or nanosleep (ns) */ 2096 /* compared to select (µs) or nanosleep (ns) */
2046 Sleep ((unsigned long)(delay * 1e3)); 2097 Sleep ((unsigned long)(EV_TS_TO_MSEC (delay)));
2047#else 2098#else
2048 struct timeval tv; 2099 struct timeval tv;
2049 2100
2050 /* here we rely on sys/time.h + sys/types.h + unistd.h providing select */ 2101 /* here we rely on sys/time.h + sys/types.h + unistd.h providing select */
2051 /* something not guaranteed by newer posix versions, but guaranteed */ 2102 /* something not guaranteed by newer posix versions, but guaranteed */
2211inline_size void 2262inline_size void
2212fd_reify (EV_P) 2263fd_reify (EV_P)
2213{ 2264{
2214 int i; 2265 int i;
2215 2266
2267 /* most backends do not modify the fdchanges list in backend_modfiy.
2268 * except io_uring, which has fixed-size buffers which might force us
2269 * to handle events in backend_modify, causing fdchangesd to be amended,
2270 * which could result in an endless loop.
2271 * to avoid this, we do not dynamically handle fds that were added
2272 * during fd_reify. that menas thast for those backends, fdchangecnt
2273 * might be non-zero during poll, which must cause them to not block.
2274 * to not put too much of a burden on other backends, this detail
2275 * needs to be handled in the backend.
2276 */
2277 int changecnt = fdchangecnt;
2278
2216#if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP 2279#if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP
2217 for (i = 0; i < fdchangecnt; ++i) 2280 for (i = 0; i < changecnt; ++i)
2218 { 2281 {
2219 int fd = fdchanges [i]; 2282 int fd = fdchanges [i];
2220 ANFD *anfd = anfds + fd; 2283 ANFD *anfd = anfds + fd;
2221 2284
2222 if (anfd->reify & EV__IOFDSET && anfd->head) 2285 if (anfd->reify & EV__IOFDSET && anfd->head)
2236 } 2299 }
2237 } 2300 }
2238 } 2301 }
2239#endif 2302#endif
2240 2303
2241 for (i = 0; i < fdchangecnt; ++i) 2304 for (i = 0; i < changecnt; ++i)
2242 { 2305 {
2243 int fd = fdchanges [i]; 2306 int fd = fdchanges [i];
2244 ANFD *anfd = anfds + fd; 2307 ANFD *anfd = anfds + fd;
2245 ev_io *w; 2308 ev_io *w;
2246 2309
2262 2325
2263 if (o_reify & EV__IOFDSET) 2326 if (o_reify & EV__IOFDSET)
2264 backend_modify (EV_A_ fd, o_events, anfd->events); 2327 backend_modify (EV_A_ fd, o_events, anfd->events);
2265 } 2328 }
2266 2329
2330 /* normally, fdchangecnt hasn't changed. if it has, then new fds have been added.
2331 * this is a rare case (see beginning comment in this function), so we copy them to the
2332 * front and hope the backend handles this case.
2333 */
2334 if (ecb_expect_false (fdchangecnt != changecnt))
2335 memmove (fdchanges, fdchanges + changecnt, (fdchangecnt - changecnt) * sizeof (*fdchanges));
2336
2267 fdchangecnt = 0; 2337 fdchangecnt -= changecnt;
2268} 2338}
2269 2339
2270/* something about the given fd changed */ 2340/* something about the given fd changed */
2271inline_size 2341inline_size
2272void 2342void
2401 2471
2402 /* find minimum child */ 2472 /* find minimum child */
2403 if (ecb_expect_true (pos + DHEAP - 1 < E)) 2473 if (ecb_expect_true (pos + DHEAP - 1 < E))
2404 { 2474 {
2405 /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos)); 2475 /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
2406 if ( ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos)); 2476 if ( minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));
2407 if ( ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos)); 2477 if ( minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));
2408 if ( ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos)); 2478 if ( minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));
2409 } 2479 }
2410 else if (pos < E) 2480 else if (pos < E)
2411 { 2481 {
2412 /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos)); 2482 /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
2413 if (pos + 1 < E && ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos)); 2483 if (pos + 1 < E && minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));
2414 if (pos + 2 < E && ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos)); 2484 if (pos + 2 < E && minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));
2415 if (pos + 3 < E && ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos)); 2485 if (pos + 3 < E && minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));
2416 } 2486 }
2417 else 2487 else
2418 break; 2488 break;
2419 2489
2420 if (ANHE_at (he) <= minat) 2490 if (ANHE_at (he) <= minat)
2428 2498
2429 heap [k] = he; 2499 heap [k] = he;
2430 ev_active (ANHE_w (he)) = k; 2500 ev_active (ANHE_w (he)) = k;
2431} 2501}
2432 2502
2433#else /* 4HEAP */ 2503#else /* not 4HEAP */
2434 2504
2435#define HEAP0 1 2505#define HEAP0 1
2436#define HPARENT(k) ((k) >> 1) 2506#define HPARENT(k) ((k) >> 1)
2437#define UPHEAP_DONE(p,k) (!(p)) 2507#define UPHEAP_DONE(p,k) (!(p))
2438 2508
2824 2894
2825#endif 2895#endif
2826 2896
2827/*****************************************************************************/ 2897/*****************************************************************************/
2828 2898
2899#if EV_USE_TIMERFD
2900
2901static void periodics_reschedule (EV_P);
2902
2903static void
2904timerfdcb (EV_P_ ev_io *iow, int revents)
2905{
2906 struct itimerspec its = { 0 };
2907
2908 /* since we can't easily come zup with a (portable) maximum value of time_t,
2909 * we wake up once per month, which hopefully is rare enough to not
2910 * be a problem. */
2911 its.it_value.tv_sec = ev_rt_now + 86400 * 30;
2912 timerfd_settime (timerfd, TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET, &its, 0);
2913
2914 ev_rt_now = ev_time ();
2915 /* periodics_reschedule only needs ev_rt_now */
2916 /* but maybe in the future we want the full treatment. */
2917 /*
2918 now_floor = EV_TS_CONST (0.);
2919 time_update (EV_A_ EV_TSTAMP_HUGE);
2920 */
2921 periodics_reschedule (EV_A);
2922}
2923
2924ecb_noinline ecb_cold
2925static void
2926evtimerfd_init (EV_P)
2927{
2928 if (!ev_is_active (&timerfd_w))
2929 {
2930 timerfd = timerfd_create (CLOCK_REALTIME, TFD_NONBLOCK | TFD_CLOEXEC);
2931
2932 if (timerfd >= 0)
2933 {
2934 fd_intern (timerfd); /* just to be sure */
2935
2936 ev_io_init (&timerfd_w, timerfdcb, timerfd, EV_READ);
2937 ev_set_priority (&timerfd_w, EV_MINPRI);
2938 ev_io_start (EV_A_ &timerfd_w);
2939 ev_unref (EV_A); /* watcher should not keep loop alive */
2940
2941 /* (re-) arm timer */
2942 timerfdcb (EV_A_ 0, 0);
2943 }
2944 }
2945}
2946
2947#endif
2948
2949/*****************************************************************************/
2950
2829#if EV_USE_IOCP 2951#if EV_USE_IOCP
2830# include "ev_iocp.c" 2952# include "ev_iocp.c"
2831#endif 2953#endif
2832#if EV_USE_PORT 2954#if EV_USE_PORT
2833# include "ev_port.c" 2955# include "ev_port.c"
2879unsigned int 3001unsigned int
2880ev_supported_backends (void) EV_NOEXCEPT 3002ev_supported_backends (void) EV_NOEXCEPT
2881{ 3003{
2882 unsigned int flags = 0; 3004 unsigned int flags = 0;
2883 3005
2884 if (EV_USE_PORT ) flags |= EVBACKEND_PORT; 3006 if (EV_USE_PORT ) flags |= EVBACKEND_PORT;
2885 if (EV_USE_KQUEUE ) flags |= EVBACKEND_KQUEUE; 3007 if (EV_USE_KQUEUE ) flags |= EVBACKEND_KQUEUE;
2886 if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL; 3008 if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL;
2887 if (EV_USE_LINUXAIO) flags |= EVBACKEND_LINUXAIO; 3009 if (EV_USE_LINUXAIO ) flags |= EVBACKEND_LINUXAIO;
2888 if (EV_USE_IOURING ) flags |= EVBACKEND_IOURING; 3010 if (EV_USE_IOURING && ev_linux_version () >= 0x050601) flags |= EVBACKEND_IOURING; /* 5.6.1+ */
2889 if (EV_USE_POLL ) flags |= EVBACKEND_POLL; 3011 if (EV_USE_POLL ) flags |= EVBACKEND_POLL;
2890 if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT; 3012 if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT;
2891 3013
2892 return flags; 3014 return flags;
2893} 3015}
2894 3016
2895ecb_cold 3017ecb_cold
2896unsigned int 3018unsigned int
2926 3048
2927ecb_cold 3049ecb_cold
2928unsigned int 3050unsigned int
2929ev_embeddable_backends (void) EV_NOEXCEPT 3051ev_embeddable_backends (void) EV_NOEXCEPT
2930{ 3052{
2931 int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT; 3053 int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT | EVBACKEND_IOURING;
2932 3054
2933 /* epoll embeddability broken on all linux versions up to at least 2.6.23 */ 3055 /* epoll embeddability broken on all linux versions up to at least 2.6.23 */
2934 if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */ 3056 if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */
2935 flags &= ~EVBACKEND_EPOLL; 3057 flags &= ~EVBACKEND_EPOLL;
2936 3058
2937 /* EVBACKEND_LINUXAIO is theoretically embeddable, but suffers from a performance overhead */ 3059 /* EVBACKEND_LINUXAIO is theoretically embeddable, but suffers from a performance overhead */
2938
2939 /* EVBACKEND_IOURING is practically embeddable, but the current implementation is not
2940 * because our backend_fd is the epoll fd we need as fallback.
2941 * if the kernel ever is fixed, this might change...
2942 */
2943 3060
2944 return flags; 3061 return flags;
2945} 3062}
2946 3063
2947unsigned int 3064unsigned int
3065 fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2; 3182 fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2;
3066#endif 3183#endif
3067#if EV_USE_SIGNALFD 3184#if EV_USE_SIGNALFD
3068 sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1; 3185 sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1;
3069#endif 3186#endif
3187#if EV_USE_TIMERFD
3188 timerfd = flags & EVFLAG_NOTIMERFD ? -1 : -2;
3189#endif
3070 3190
3071 if (!(flags & EVBACKEND_MASK)) 3191 if (!(flags & EVBACKEND_MASK))
3072 flags |= ev_recommended_backends (); 3192 flags |= ev_recommended_backends ();
3073 3193
3074#if EV_USE_IOCP 3194#if EV_USE_IOCP
3145 } 3265 }
3146 3266
3147#if EV_USE_SIGNALFD 3267#if EV_USE_SIGNALFD
3148 if (ev_is_active (&sigfd_w)) 3268 if (ev_is_active (&sigfd_w))
3149 close (sigfd); 3269 close (sigfd);
3270#endif
3271
3272#if EV_USE_TIMERFD
3273 if (ev_is_active (&timerfd_w))
3274 close (timerfd);
3150#endif 3275#endif
3151 3276
3152#if EV_USE_INOTIFY 3277#if EV_USE_INOTIFY
3153 if (fs_fd >= 0) 3278 if (fs_fd >= 0)
3154 close (fs_fd); 3279 close (fs_fd);
3247#endif 3372#endif
3248#if EV_USE_INOTIFY 3373#if EV_USE_INOTIFY
3249 infy_fork (EV_A); 3374 infy_fork (EV_A);
3250#endif 3375#endif
3251 3376
3377 if (postfork != 2)
3378 {
3379 #if EV_USE_SIGNALFD
3380 /* surprisingly, nothing needs to be done for signalfd, accoridng to docs, it does the right thing on fork */
3381 #endif
3382
3383 #if EV_USE_TIMERFD
3384 if (ev_is_active (&timerfd_w))
3385 {
3386 ev_ref (EV_A);
3387 ev_io_stop (EV_A_ &timerfd_w);
3388
3389 close (timerfd);
3390 timerfd = -2;
3391
3392 evtimerfd_init (EV_A);
3393 /* reschedule periodics, in case we missed something */
3394 ev_feed_event (EV_A_ &timerfd_w, EV_CUSTOM);
3395 }
3396 #endif
3397
3252#if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE 3398 #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
3253 if (ev_is_active (&pipe_w) && postfork != 2) 3399 if (ev_is_active (&pipe_w))
3254 { 3400 {
3255 /* pipe_write_wanted must be false now, so modifying fd vars should be safe */ 3401 /* pipe_write_wanted must be false now, so modifying fd vars should be safe */
3256 3402
3257 ev_ref (EV_A); 3403 ev_ref (EV_A);
3258 ev_io_stop (EV_A_ &pipe_w); 3404 ev_io_stop (EV_A_ &pipe_w);
3259 3405
3260 if (evpipe [0] >= 0) 3406 if (evpipe [0] >= 0)
3261 EV_WIN32_CLOSE_FD (evpipe [0]); 3407 EV_WIN32_CLOSE_FD (evpipe [0]);
3262 3408
3263 evpipe_init (EV_A); 3409 evpipe_init (EV_A);
3264 /* iterate over everything, in case we missed something before */ 3410 /* iterate over everything, in case we missed something before */
3265 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM); 3411 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
3412 }
3413 #endif
3266 } 3414 }
3267#endif
3268 3415
3269 postfork = 0; 3416 postfork = 0;
3270} 3417}
3271 3418
3272#if EV_MULTIPLICITY 3419#if EV_MULTIPLICITY
3542 { 3689 {
3543 ev_at (w) += w->repeat; 3690 ev_at (w) += w->repeat;
3544 if (ev_at (w) < mn_now) 3691 if (ev_at (w) < mn_now)
3545 ev_at (w) = mn_now; 3692 ev_at (w) = mn_now;
3546 3693
3547 assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > 0.)); 3694 assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > EV_TS_CONST (0.)));
3548 3695
3549 ANHE_at_cache (timers [HEAP0]); 3696 ANHE_at_cache (timers [HEAP0]);
3550 downheap (timers, timercnt, HEAP0); 3697 downheap (timers, timercnt, HEAP0);
3551 } 3698 }
3552 else 3699 else
3683 3830
3684 mn_now = get_clock (); 3831 mn_now = get_clock ();
3685 3832
3686 /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */ 3833 /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */
3687 /* interpolate in the meantime */ 3834 /* interpolate in the meantime */
3688 if (ecb_expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5)) 3835 if (ecb_expect_true (mn_now - now_floor < EV_TS_CONST (MIN_TIMEJUMP * .5)))
3689 { 3836 {
3690 ev_rt_now = rtmn_diff + mn_now; 3837 ev_rt_now = rtmn_diff + mn_now;
3691 return; 3838 return;
3692 } 3839 }
3693 3840
3707 ev_tstamp diff; 3854 ev_tstamp diff;
3708 rtmn_diff = ev_rt_now - mn_now; 3855 rtmn_diff = ev_rt_now - mn_now;
3709 3856
3710 diff = odiff - rtmn_diff; 3857 diff = odiff - rtmn_diff;
3711 3858
3712 if (ecb_expect_true ((diff < 0. ? -diff : diff) < MIN_TIMEJUMP)) 3859 if (ecb_expect_true ((diff < EV_TS_CONST (0.) ? -diff : diff) < EV_TS_CONST (MIN_TIMEJUMP)))
3713 return; /* all is well */ 3860 return; /* all is well */
3714 3861
3715 ev_rt_now = ev_time (); 3862 ev_rt_now = ev_time ();
3716 mn_now = get_clock (); 3863 mn_now = get_clock ();
3717 now_floor = mn_now; 3864 now_floor = mn_now;
3726 else 3873 else
3727#endif 3874#endif
3728 { 3875 {
3729 ev_rt_now = ev_time (); 3876 ev_rt_now = ev_time ();
3730 3877
3731 if (ecb_expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + MIN_TIMEJUMP)) 3878 if (ecb_expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + EV_TS_CONST (MIN_TIMEJUMP)))
3732 { 3879 {
3733 /* adjust timers. this is easy, as the offset is the same for all of them */ 3880 /* adjust timers. this is easy, as the offset is the same for all of them */
3734 timers_reschedule (EV_A_ ev_rt_now - mn_now); 3881 timers_reschedule (EV_A_ ev_rt_now - mn_now);
3735#if EV_PERIODIC_ENABLE 3882#if EV_PERIODIC_ENABLE
3736 periodics_reschedule (EV_A); 3883 periodics_reschedule (EV_A);
3805 3952
3806 /* remember old timestamp for io_blocktime calculation */ 3953 /* remember old timestamp for io_blocktime calculation */
3807 ev_tstamp prev_mn_now = mn_now; 3954 ev_tstamp prev_mn_now = mn_now;
3808 3955
3809 /* update time to cancel out callback processing overhead */ 3956 /* update time to cancel out callback processing overhead */
3810 time_update (EV_A_ 1e100); 3957 time_update (EV_A_ EV_TS_CONST (EV_TSTAMP_HUGE));
3811 3958
3812 /* from now on, we want a pipe-wake-up */ 3959 /* from now on, we want a pipe-wake-up */
3813 pipe_write_wanted = 1; 3960 pipe_write_wanted = 1;
3814 3961
3815 ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */ 3962 ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */
3816 3963
3817 if (ecb_expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped))) 3964 if (ecb_expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped)))
3818 { 3965 {
3819 waittime = MAX_BLOCKTIME; 3966 waittime = EV_TS_CONST (MAX_BLOCKTIME);
3820 3967
3821 if (timercnt) 3968 if (timercnt)
3822 { 3969 {
3823 ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now; 3970 ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now;
3824 if (waittime > to) waittime = to; 3971 if (waittime > to) waittime = to;
3834 3981
3835 /* don't let timeouts decrease the waittime below timeout_blocktime */ 3982 /* don't let timeouts decrease the waittime below timeout_blocktime */
3836 if (ecb_expect_false (waittime < timeout_blocktime)) 3983 if (ecb_expect_false (waittime < timeout_blocktime))
3837 waittime = timeout_blocktime; 3984 waittime = timeout_blocktime;
3838 3985
3839 /* at this point, we NEED to wait, so we have to ensure */ 3986 /* now there are two more special cases left, either we have
3840 /* to pass a minimum nonzero value to the backend */ 3987 * already-expired timers, so we should not sleep, or we have timers
3988 * that expire very soon, in which case we need to wait for a minimum
3989 * amount of time for some event loop backends.
3990 */
3841 if (ecb_expect_false (waittime < backend_mintime)) 3991 if (ecb_expect_false (waittime < backend_mintime))
3992 waittime = waittime <= EV_TS_CONST (0.)
3993 ? EV_TS_CONST (0.)
3842 waittime = backend_mintime; 3994 : backend_mintime;
3843 3995
3844 /* extra check because io_blocktime is commonly 0 */ 3996 /* extra check because io_blocktime is commonly 0 */
3845 if (ecb_expect_false (io_blocktime)) 3997 if (ecb_expect_false (io_blocktime))
3846 { 3998 {
3847 sleeptime = io_blocktime - (mn_now - prev_mn_now); 3999 sleeptime = io_blocktime - (mn_now - prev_mn_now);
3848 4000
3849 if (sleeptime > waittime - backend_mintime) 4001 if (sleeptime > waittime - backend_mintime)
3850 sleeptime = waittime - backend_mintime; 4002 sleeptime = waittime - backend_mintime;
3851 4003
3852 if (ecb_expect_true (sleeptime > 0.)) 4004 if (ecb_expect_true (sleeptime > EV_TS_CONST (0.)))
3853 { 4005 {
3854 ev_sleep (sleeptime); 4006 ev_sleep (sleeptime);
3855 waittime -= sleeptime; 4007 waittime -= sleeptime;
3856 } 4008 }
3857 } 4009 }
3931} 4083}
3932 4084
3933void 4085void
3934ev_now_update (EV_P) EV_NOEXCEPT 4086ev_now_update (EV_P) EV_NOEXCEPT
3935{ 4087{
3936 time_update (EV_A_ 1e100); 4088 time_update (EV_A_ EV_TSTAMP_HUGE);
3937} 4089}
3938 4090
3939void 4091void
3940ev_suspend (EV_P) EV_NOEXCEPT 4092ev_suspend (EV_P) EV_NOEXCEPT
3941{ 4093{
4172} 4324}
4173 4325
4174ev_tstamp 4326ev_tstamp
4175ev_timer_remaining (EV_P_ ev_timer *w) EV_NOEXCEPT 4327ev_timer_remaining (EV_P_ ev_timer *w) EV_NOEXCEPT
4176{ 4328{
4177 return ev_at (w) - (ev_is_active (w) ? mn_now : 0.); 4329 return ev_at (w) - (ev_is_active (w) ? mn_now : EV_TS_CONST (0.));
4178} 4330}
4179 4331
4180#if EV_PERIODIC_ENABLE 4332#if EV_PERIODIC_ENABLE
4181ecb_noinline 4333ecb_noinline
4182void 4334void
4183ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT 4335ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT
4184{ 4336{
4185 if (ecb_expect_false (ev_is_active (w))) 4337 if (ecb_expect_false (ev_is_active (w)))
4186 return; 4338 return;
4339
4340#if EV_USE_TIMERFD
4341 if (timerfd == -2)
4342 evtimerfd_init (EV_A);
4343#endif
4187 4344
4188 if (w->reschedule_cb) 4345 if (w->reschedule_cb)
4189 ev_at (w) = w->reschedule_cb (w, ev_rt_now); 4346 ev_at (w) = w->reschedule_cb (w, ev_rt_now);
4190 else if (w->interval) 4347 else if (w->interval)
4191 { 4348 {
4933 ev_run (EV_A_ EVRUN_NOWAIT); 5090 ev_run (EV_A_ EVRUN_NOWAIT);
4934 } 5091 }
4935 } 5092 }
4936} 5093}
4937 5094
5095#if EV_FORK_ENABLE
4938static void 5096static void
4939embed_fork_cb (EV_P_ ev_fork *fork_w, int revents) 5097embed_fork_cb (EV_P_ ev_fork *fork_w, int revents)
4940{ 5098{
4941 ev_embed *w = (ev_embed *)(((char *)fork_w) - offsetof (ev_embed, fork)); 5099 ev_embed *w = (ev_embed *)(((char *)fork_w) - offsetof (ev_embed, fork));
4942 5100
4949 ev_run (EV_A_ EVRUN_NOWAIT); 5107 ev_run (EV_A_ EVRUN_NOWAIT);
4950 } 5108 }
4951 5109
4952 ev_embed_start (EV_A_ w); 5110 ev_embed_start (EV_A_ w);
4953} 5111}
5112#endif
4954 5113
4955#if 0 5114#if 0
4956static void 5115static void
4957embed_idle_cb (EV_P_ ev_idle *idle, int revents) 5116embed_idle_cb (EV_P_ ev_idle *idle, int revents)
4958{ 5117{
4979 5138
4980 ev_prepare_init (&w->prepare, embed_prepare_cb); 5139 ev_prepare_init (&w->prepare, embed_prepare_cb);
4981 ev_set_priority (&w->prepare, EV_MINPRI); 5140 ev_set_priority (&w->prepare, EV_MINPRI);
4982 ev_prepare_start (EV_A_ &w->prepare); 5141 ev_prepare_start (EV_A_ &w->prepare);
4983 5142
5143#if EV_FORK_ENABLE
4984 ev_fork_init (&w->fork, embed_fork_cb); 5144 ev_fork_init (&w->fork, embed_fork_cb);
4985 ev_fork_start (EV_A_ &w->fork); 5145 ev_fork_start (EV_A_ &w->fork);
5146#endif
4986 5147
4987 /*ev_idle_init (&w->idle, e,bed_idle_cb);*/ 5148 /*ev_idle_init (&w->idle, e,bed_idle_cb);*/
4988 5149
4989 ev_start (EV_A_ (W)w, 1); 5150 ev_start (EV_A_ (W)w, 1);
4990 5151
5000 5161
5001 EV_FREQUENT_CHECK; 5162 EV_FREQUENT_CHECK;
5002 5163
5003 ev_io_stop (EV_A_ &w->io); 5164 ev_io_stop (EV_A_ &w->io);
5004 ev_prepare_stop (EV_A_ &w->prepare); 5165 ev_prepare_stop (EV_A_ &w->prepare);
5166#if EV_FORK_ENABLE
5005 ev_fork_stop (EV_A_ &w->fork); 5167 ev_fork_stop (EV_A_ &w->fork);
5168#endif
5006 5169
5007 ev_stop (EV_A_ (W)w); 5170 ev_stop (EV_A_ (W)w);
5008 5171
5009 EV_FREQUENT_CHECK; 5172 EV_FREQUENT_CHECK;
5010} 5173}

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines