ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev.c
(Generate patch)

Comparing libev/ev.c (file contents):
Revision 1.500 by root, Mon Jul 1 20:47:37 2019 UTC vs.
Revision 1.516 by root, Tue Dec 24 13:24:29 2019 UTC

124# else 124# else
125# undef EV_USE_LINUXAIO 125# undef EV_USE_LINUXAIO
126# define EV_USE_LINUXAIO 0 126# define EV_USE_LINUXAIO 0
127# endif 127# endif
128 128
129# if HAVE_LINUX_FS_H && HAVE_SYS_TIMERFD_H && HAVE_KERNEL_RWF_T
130# ifndef EV_USE_IOURING
131# define EV_USE_IOURING EV_FEATURE_BACKENDS
132# endif
133# else
134# undef EV_USE_IOURING
135# define EV_USE_IOURING 0
136# endif
137
129# if HAVE_KQUEUE && HAVE_SYS_EVENT_H 138# if HAVE_KQUEUE && HAVE_SYS_EVENT_H
130# ifndef EV_USE_KQUEUE 139# ifndef EV_USE_KQUEUE
131# define EV_USE_KQUEUE EV_FEATURE_BACKENDS 140# define EV_USE_KQUEUE EV_FEATURE_BACKENDS
132# endif 141# endif
133# else 142# else
168# endif 177# endif
169# else 178# else
170# undef EV_USE_EVENTFD 179# undef EV_USE_EVENTFD
171# define EV_USE_EVENTFD 0 180# define EV_USE_EVENTFD 0
172# endif 181# endif
173 182
183# if HAVE_SYS_TIMERFD_H
184# ifndef EV_USE_TIMERFD
185# define EV_USE_TIMERFD EV_FEATURE_OS
186# endif
187# else
188# undef EV_USE_TIMERFD
189# define EV_USE_TIMERFD 0
190# endif
191
174#endif 192#endif
175 193
176/* OS X, in its infinite idiocy, actually HARDCODES 194/* OS X, in its infinite idiocy, actually HARDCODES
177 * a limit of 1024 into their select. Where people have brains, 195 * a limit of 1024 into their select. Where people have brains,
178 * OS X engineers apparently have a vacuum. Or maybe they were 196 * OS X engineers apparently have a vacuum. Or maybe they were
332# else 350# else
333# define EV_USE_LINUXAIO 0 351# define EV_USE_LINUXAIO 0
334# endif 352# endif
335#endif 353#endif
336 354
355#ifndef EV_USE_IOURING
356# if __linux /* later checks might disable again */
357# define EV_USE_IOURING 1
358# else
359# define EV_USE_IOURING 0
360# endif
361#endif
362
337#ifndef EV_USE_INOTIFY 363#ifndef EV_USE_INOTIFY
338# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4)) 364# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))
339# define EV_USE_INOTIFY EV_FEATURE_OS 365# define EV_USE_INOTIFY EV_FEATURE_OS
340# else 366# else
341# define EV_USE_INOTIFY 0 367# define EV_USE_INOTIFY 0
361#ifndef EV_USE_SIGNALFD 387#ifndef EV_USE_SIGNALFD
362# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7)) 388# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7))
363# define EV_USE_SIGNALFD EV_FEATURE_OS 389# define EV_USE_SIGNALFD EV_FEATURE_OS
364# else 390# else
365# define EV_USE_SIGNALFD 0 391# define EV_USE_SIGNALFD 0
392# endif
393#endif
394
395#ifndef EV_USE_TIMERFD
396# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 8))
397# define EV_USE_TIMERFD EV_FEATURE_OS
398# else
399# define EV_USE_TIMERFD 0
366# endif 400# endif
367#endif 401#endif
368 402
369#if 0 /* debugging */ 403#if 0 /* debugging */
370# define EV_VERIFY 3 404# define EV_VERIFY 3
406# include <sys/syscall.h> 440# include <sys/syscall.h>
407# ifdef SYS_clock_gettime 441# ifdef SYS_clock_gettime
408# define clock_gettime(id, ts) syscall (SYS_clock_gettime, (id), (ts)) 442# define clock_gettime(id, ts) syscall (SYS_clock_gettime, (id), (ts))
409# undef EV_USE_MONOTONIC 443# undef EV_USE_MONOTONIC
410# define EV_USE_MONOTONIC 1 444# define EV_USE_MONOTONIC 1
445# define EV_NEED_SYSCALL 1
411# else 446# else
412# undef EV_USE_CLOCK_SYSCALL 447# undef EV_USE_CLOCK_SYSCALL
413# define EV_USE_CLOCK_SYSCALL 0 448# define EV_USE_CLOCK_SYSCALL 0
414# endif 449# endif
415#endif 450#endif
427#endif 462#endif
428 463
429#if !EV_STAT_ENABLE 464#if !EV_STAT_ENABLE
430# undef EV_USE_INOTIFY 465# undef EV_USE_INOTIFY
431# define EV_USE_INOTIFY 0 466# define EV_USE_INOTIFY 0
467#endif
468
469#if __linux && EV_USE_IOURING
470# include <linux/version.h>
471# if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)
472# undef EV_USE_IOURING
473# define EV_USE_IOURING 0
474# endif
432#endif 475#endif
433 476
434#if !EV_USE_NANOSLEEP 477#if !EV_USE_NANOSLEEP
435/* hp-ux has it in sys/time.h, which we unconditionally include above */ 478/* hp-ux has it in sys/time.h, which we unconditionally include above */
436# if !defined _WIN32 && !defined __hpux 479# if !defined _WIN32 && !defined __hpux
438# endif 481# endif
439#endif 482#endif
440 483
441#if EV_USE_LINUXAIO 484#if EV_USE_LINUXAIO
442# include <sys/syscall.h> 485# include <sys/syscall.h>
443# if !SYS_io_getevents || !EV_USE_EPOLL /* ev_linxaio uses ev_poll.c:ev_epoll_create */ 486# if SYS_io_getevents && EV_USE_EPOLL /* linuxaio backend requires epoll backend */
487# define EV_NEED_SYSCALL 1
488# else
444# undef EV_USE_LINUXAIO 489# undef EV_USE_LINUXAIO
445# define EV_USE_LINUXAIO 0 490# define EV_USE_LINUXAIO 0
491# endif
492#endif
493
494#if EV_USE_IOURING
495# include <sys/syscall.h>
496# if !SYS_io_uring_setup && __linux && !__alpha
497# define SYS_io_uring_setup 425
498# define SYS_io_uring_enter 426
499# define SYS_io_uring_wregister 427
500# endif
501# if SYS_io_uring_setup && EV_USE_EPOLL /* iouring backend requires epoll backend */
502# define EV_NEED_SYSCALL 1
503# else
504# undef EV_USE_IOURING
505# define EV_USE_IOURING 0
446# endif 506# endif
447#endif 507#endif
448 508
449#if EV_USE_INOTIFY 509#if EV_USE_INOTIFY
450# include <sys/statfs.h> 510# include <sys/statfs.h>
455# define EV_USE_INOTIFY 0 515# define EV_USE_INOTIFY 0
456# endif 516# endif
457#endif 517#endif
458 518
459#if EV_USE_EVENTFD 519#if EV_USE_EVENTFD
460/* our minimum requirement is glibc 2.7 which has the stub, but not the header */ 520/* our minimum requirement is glibc 2.7 which has the stub, but not the full header */
461# include <stdint.h> 521# include <stdint.h>
462# ifndef EFD_NONBLOCK 522# ifndef EFD_NONBLOCK
463# define EFD_NONBLOCK O_NONBLOCK 523# define EFD_NONBLOCK O_NONBLOCK
464# endif 524# endif
465# ifndef EFD_CLOEXEC 525# ifndef EFD_CLOEXEC
471# endif 531# endif
472EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags); 532EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags);
473#endif 533#endif
474 534
475#if EV_USE_SIGNALFD 535#if EV_USE_SIGNALFD
476/* our minimum requirement is glibc 2.7 which has the stub, but not the header */ 536/* our minimum requirement is glibc 2.7 which has the stub, but not the full header */
477# include <stdint.h> 537# include <stdint.h>
478# ifndef SFD_NONBLOCK 538# ifndef SFD_NONBLOCK
479# define SFD_NONBLOCK O_NONBLOCK 539# define SFD_NONBLOCK O_NONBLOCK
480# endif 540# endif
481# ifndef SFD_CLOEXEC 541# ifndef SFD_CLOEXEC
483# define SFD_CLOEXEC O_CLOEXEC 543# define SFD_CLOEXEC O_CLOEXEC
484# else 544# else
485# define SFD_CLOEXEC 02000000 545# define SFD_CLOEXEC 02000000
486# endif 546# endif
487# endif 547# endif
488EV_CPP (extern "C") int signalfd (int fd, const sigset_t *mask, int flags); 548EV_CPP (extern "C") int (signalfd) (int fd, const sigset_t *mask, int flags);
489 549
490struct signalfd_siginfo 550struct signalfd_siginfo
491{ 551{
492 uint32_t ssi_signo; 552 uint32_t ssi_signo;
493 char pad[128 - sizeof (uint32_t)]; 553 char pad[128 - sizeof (uint32_t)];
494}; 554};
495#endif 555#endif
496 556
497/**/ 557/* for timerfd, libev core requires TFD_TIMER_CANCEL_ON_SET &c */
558#if EV_USE_TIMERFD
559# include <sys/timerfd.h>
560/* timerfd is only used for periodics */
561# if !(defined (TFD_TIMER_CANCEL_ON_SET) && defined (TFD_CLOEXEC) && defined (TFD_NONBLOCK)) || !EV_PERIODIC_ENABLE
562# undef EV_USE_TIMERFD
563# define EV_USE_TIMERFD 0
564# endif
565#endif
566
567/*****************************************************************************/
498 568
499#if EV_VERIFY >= 3 569#if EV_VERIFY >= 3
500# define EV_FREQUENT_CHECK ev_verify (EV_A) 570# define EV_FREQUENT_CHECK ev_verify (EV_A)
501#else 571#else
502# define EV_FREQUENT_CHECK do { } while (0) 572# define EV_FREQUENT_CHECK do { } while (0)
507 * This value is good at least till the year 4000. 577 * This value is good at least till the year 4000.
508 */ 578 */
509#define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */ 579#define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */
510/*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */ 580/*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */
511 581
512#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */ 582#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
513#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */ 583#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
514 584
585/* find a portable timestamp that is "always" in the future but fits into time_t.
586 * this is quite hard, and we are mostly guessing - we handle 32 bit signed/unsigned time_t,
587 * and sizes larger than 32 bit, and maybe the unlikely floating point time_t */
588#define EV_TSTAMP_HUGE \
589 (sizeof (time_t) >= 8 ? 10000000000000. \
590 : 0 < (time_t)4294967295 ? 4294967295. \
591 : 2147483647.) \
592
593#ifndef EV_TS_CONST
594# define EV_TS_CONST(nv) nv
595# define EV_TS_TO_MSEC(a) a * 1e3 + 0.9999
596# define EV_TS_FROM_USEC(us) us * 1e-6
515#define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0) 597# define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0)
516#define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0) 598# define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0)
599# define EV_TV_GET(tv) ((tv).tv_sec + (tv).tv_usec * 1e-6)
600# define EV_TS_GET(ts) ((ts).tv_sec + (ts).tv_nsec * 1e-9)
601#endif
517 602
518/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */ 603/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */
519/* ECB.H BEGIN */ 604/* ECB.H BEGIN */
520/* 605/*
521 * libecb - http://software.schmorp.de/pkg/libecb 606 * libecb - http://software.schmorp.de/pkg/libecb
1558# define inline_speed ecb_inline 1643# define inline_speed ecb_inline
1559#else 1644#else
1560# define inline_speed ecb_noinline static 1645# define inline_speed ecb_noinline static
1561#endif 1646#endif
1562 1647
1648/*****************************************************************************/
1649/* raw syscall wrappers */
1650
1651#if EV_NEED_SYSCALL
1652
1653#include <sys/syscall.h>
1654
1655/*
1656 * define some syscall wrappers for common architectures
1657 * this is mostly for nice looks during debugging, not performance.
1658 * our syscalls return < 0, not == -1, on error. which is good
1659 * enough for linux aio.
1660 * TODO: arm is also common nowadays, maybe even mips and x86
1661 * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove...
1662 */
1663#if __GNUC__ && __linux && ECB_AMD64 && !defined __OPTIMIZE_SIZE__
1664 /* the costly errno access probably kills this for size optimisation */
1665
1666 #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5,arg6) \
1667 ({ \
1668 long res; \
1669 register unsigned long r6 __asm__ ("r9" ); \
1670 register unsigned long r5 __asm__ ("r8" ); \
1671 register unsigned long r4 __asm__ ("r10"); \
1672 register unsigned long r3 __asm__ ("rdx"); \
1673 register unsigned long r2 __asm__ ("rsi"); \
1674 register unsigned long r1 __asm__ ("rdi"); \
1675 if (narg >= 6) r6 = (unsigned long)(arg6); \
1676 if (narg >= 5) r5 = (unsigned long)(arg5); \
1677 if (narg >= 4) r4 = (unsigned long)(arg4); \
1678 if (narg >= 3) r3 = (unsigned long)(arg3); \
1679 if (narg >= 2) r2 = (unsigned long)(arg2); \
1680 if (narg >= 1) r1 = (unsigned long)(arg1); \
1681 __asm__ __volatile__ ( \
1682 "syscall\n\t" \
1683 : "=a" (res) \
1684 : "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \
1685 : "cc", "r11", "cx", "memory"); \
1686 errno = -res; \
1687 res; \
1688 })
1689
1690#endif
1691
1692#ifdef ev_syscall
1693 #define ev_syscall0(nr) ev_syscall (nr, 0, 0, 0, 0, 0, 0, 0)
1694 #define ev_syscall1(nr,arg1) ev_syscall (nr, 1, arg1, 0, 0, 0, 0, 0)
1695 #define ev_syscall2(nr,arg1,arg2) ev_syscall (nr, 2, arg1, arg2, 0, 0, 0, 0)
1696 #define ev_syscall3(nr,arg1,arg2,arg3) ev_syscall (nr, 3, arg1, arg2, arg3, 0, 0, 0)
1697 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) ev_syscall (nr, 3, arg1, arg2, arg3, arg4, 0, 0)
1698 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5, 0)
1699 #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) ev_syscall (nr, 6, arg1, arg2, arg3, arg4, arg5,arg6)
1700#else
1701 #define ev_syscall0(nr) syscall (nr)
1702 #define ev_syscall1(nr,arg1) syscall (nr, arg1)
1703 #define ev_syscall2(nr,arg1,arg2) syscall (nr, arg1, arg2)
1704 #define ev_syscall3(nr,arg1,arg2,arg3) syscall (nr, arg1, arg2, arg3)
1705 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) syscall (nr, arg1, arg2, arg3, arg4)
1706 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) syscall (nr, arg1, arg2, arg3, arg4, arg5)
1707 #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) syscall (nr, arg1, arg2, arg3, arg4, arg5,arg6)
1708#endif
1709
1710#endif
1711
1712/*****************************************************************************/
1713
1563#define NUMPRI (EV_MAXPRI - EV_MINPRI + 1) 1714#define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)
1564 1715
1565#if EV_MINPRI == EV_MAXPRI 1716#if EV_MINPRI == EV_MAXPRI
1566# define ABSPRI(w) (((W)w), 0) 1717# define ABSPRI(w) (((W)w), 0)
1567#else 1718#else
1626 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 10000000000000000000. : 1000000000.; 1777 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 10000000000000000000. : 1000000000.;
1627#else 1778#else
1628 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.; 1779 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.;
1629#endif 1780#endif
1630 1781
1782 /* special treatment for negative arguments */
1783 if (ecb_expect_false (v < 0.))
1784 {
1785 ev_tstamp f = -ev_floor (-v);
1786
1787 return f - (f == v ? 0 : 1);
1788 }
1789
1631 /* argument too large for an unsigned long? */ 1790 /* argument too large for an unsigned long? then reduce it */
1632 if (ecb_expect_false (v >= shift)) 1791 if (ecb_expect_false (v >= shift))
1633 { 1792 {
1634 ev_tstamp f; 1793 ev_tstamp f;
1635 1794
1636 if (v == v - 1.) 1795 if (v == v - 1.)
1637 return v; /* very large number */ 1796 return v; /* very large numbers are assumed to be integer */
1638 1797
1639 f = shift * ev_floor (v * (1. / shift)); 1798 f = shift * ev_floor (v * (1. / shift));
1640 return f + ev_floor (v - f); 1799 return f + ev_floor (v - f);
1641 }
1642
1643 /* special treatment for negative args? */
1644 if (ecb_expect_false (v < 0.))
1645 {
1646 ev_tstamp f = -ev_floor (-v);
1647
1648 return f - (f == v ? 0 : 1);
1649 } 1800 }
1650 1801
1651 /* fits into an unsigned long */ 1802 /* fits into an unsigned long */
1652 return (unsigned long)v; 1803 return (unsigned long)v;
1653} 1804}
1797{ 1948{
1798 WL head; 1949 WL head;
1799 unsigned char events; /* the events watched for */ 1950 unsigned char events; /* the events watched for */
1800 unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */ 1951 unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */
1801 unsigned char emask; /* some backends store the actual kernel mask in here */ 1952 unsigned char emask; /* some backends store the actual kernel mask in here */
1802 unsigned char unused; 1953 unsigned char eflags; /* flags field for use by backends */
1803#if EV_USE_EPOLL 1954#if EV_USE_EPOLL
1804 unsigned int egen; /* generation counter to counter epoll bugs */ 1955 unsigned int egen; /* generation counter to counter epoll bugs */
1805#endif 1956#endif
1806#if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP 1957#if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP
1807 SOCKET handle; 1958 SOCKET handle;
1861 static struct ev_loop default_loop_struct; 2012 static struct ev_loop default_loop_struct;
1862 EV_API_DECL struct ev_loop *ev_default_loop_ptr = 0; /* needs to be initialised to make it a definition despite extern */ 2013 EV_API_DECL struct ev_loop *ev_default_loop_ptr = 0; /* needs to be initialised to make it a definition despite extern */
1863 2014
1864#else 2015#else
1865 2016
1866 EV_API_DECL ev_tstamp ev_rt_now = 0; /* needs to be initialised to make it a definition despite extern */ 2017 EV_API_DECL ev_tstamp ev_rt_now = EV_TS_CONST (0.); /* needs to be initialised to make it a definition despite extern */
1867 #define VAR(name,decl) static decl; 2018 #define VAR(name,decl) static decl;
1868 #include "ev_vars.h" 2019 #include "ev_vars.h"
1869 #undef VAR 2020 #undef VAR
1870 2021
1871 static int ev_default_loop_ptr; 2022 static int ev_default_loop_ptr;
1893#if EV_USE_REALTIME 2044#if EV_USE_REALTIME
1894 if (ecb_expect_true (have_realtime)) 2045 if (ecb_expect_true (have_realtime))
1895 { 2046 {
1896 struct timespec ts; 2047 struct timespec ts;
1897 clock_gettime (CLOCK_REALTIME, &ts); 2048 clock_gettime (CLOCK_REALTIME, &ts);
1898 return ts.tv_sec + ts.tv_nsec * 1e-9; 2049 return EV_TS_GET (ts);
1899 } 2050 }
1900#endif 2051#endif
1901 2052
2053 {
1902 struct timeval tv; 2054 struct timeval tv;
1903 gettimeofday (&tv, 0); 2055 gettimeofday (&tv, 0);
1904 return tv.tv_sec + tv.tv_usec * 1e-6; 2056 return EV_TV_GET (tv);
2057 }
1905} 2058}
1906#endif 2059#endif
1907 2060
1908inline_size ev_tstamp 2061inline_size ev_tstamp
1909get_clock (void) 2062get_clock (void)
1911#if EV_USE_MONOTONIC 2064#if EV_USE_MONOTONIC
1912 if (ecb_expect_true (have_monotonic)) 2065 if (ecb_expect_true (have_monotonic))
1913 { 2066 {
1914 struct timespec ts; 2067 struct timespec ts;
1915 clock_gettime (CLOCK_MONOTONIC, &ts); 2068 clock_gettime (CLOCK_MONOTONIC, &ts);
1916 return ts.tv_sec + ts.tv_nsec * 1e-9; 2069 return EV_TS_GET (ts);
1917 } 2070 }
1918#endif 2071#endif
1919 2072
1920 return ev_time (); 2073 return ev_time ();
1921} 2074}
1929#endif 2082#endif
1930 2083
1931void 2084void
1932ev_sleep (ev_tstamp delay) EV_NOEXCEPT 2085ev_sleep (ev_tstamp delay) EV_NOEXCEPT
1933{ 2086{
1934 if (delay > 0.) 2087 if (delay > EV_TS_CONST (0.))
1935 { 2088 {
1936#if EV_USE_NANOSLEEP 2089#if EV_USE_NANOSLEEP
1937 struct timespec ts; 2090 struct timespec ts;
1938 2091
1939 EV_TS_SET (ts, delay); 2092 EV_TS_SET (ts, delay);
1940 nanosleep (&ts, 0); 2093 nanosleep (&ts, 0);
1941#elif defined _WIN32 2094#elif defined _WIN32
1942 /* maybe this should round up, as ms is very low resolution */ 2095 /* maybe this should round up, as ms is very low resolution */
1943 /* compared to select (µs) or nanosleep (ns) */ 2096 /* compared to select (µs) or nanosleep (ns) */
1944 Sleep ((unsigned long)(delay * 1e3)); 2097 Sleep ((unsigned long)(EV_TS_TO_MSEC (delay)));
1945#else 2098#else
1946 struct timeval tv; 2099 struct timeval tv;
1947 2100
1948 /* here we rely on sys/time.h + sys/types.h + unistd.h providing select */ 2101 /* here we rely on sys/time.h + sys/types.h + unistd.h providing select */
1949 /* something not guaranteed by newer posix versions, but guaranteed */ 2102 /* something not guaranteed by newer posix versions, but guaranteed */
2299 2452
2300 /* find minimum child */ 2453 /* find minimum child */
2301 if (ecb_expect_true (pos + DHEAP - 1 < E)) 2454 if (ecb_expect_true (pos + DHEAP - 1 < E))
2302 { 2455 {
2303 /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos)); 2456 /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
2304 if ( ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos)); 2457 if ( minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));
2305 if ( ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos)); 2458 if ( minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));
2306 if ( ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos)); 2459 if ( minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));
2307 } 2460 }
2308 else if (pos < E) 2461 else if (pos < E)
2309 { 2462 {
2310 /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos)); 2463 /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
2311 if (pos + 1 < E && ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos)); 2464 if (pos + 1 < E && minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));
2312 if (pos + 2 < E && ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos)); 2465 if (pos + 2 < E && minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));
2313 if (pos + 3 < E && ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos)); 2466 if (pos + 3 < E && minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));
2314 } 2467 }
2315 else 2468 else
2316 break; 2469 break;
2317 2470
2318 if (ANHE_at (he) <= minat) 2471 if (ANHE_at (he) <= minat)
2326 2479
2327 heap [k] = he; 2480 heap [k] = he;
2328 ev_active (ANHE_w (he)) = k; 2481 ev_active (ANHE_w (he)) = k;
2329} 2482}
2330 2483
2331#else /* 4HEAP */ 2484#else /* not 4HEAP */
2332 2485
2333#define HEAP0 1 2486#define HEAP0 1
2334#define HPARENT(k) ((k) >> 1) 2487#define HPARENT(k) ((k) >> 1)
2335#define UPHEAP_DONE(p,k) (!(p)) 2488#define UPHEAP_DONE(p,k) (!(p))
2336 2489
2722 2875
2723#endif 2876#endif
2724 2877
2725/*****************************************************************************/ 2878/*****************************************************************************/
2726 2879
2880#if EV_USE_TIMERFD
2881
2882static void periodics_reschedule (EV_P);
2883
2884static void
2885timerfdcb (EV_P_ ev_io *iow, int revents)
2886{
2887 struct itimerspec its = { 0 };
2888
2889 /* since we can't easily come zup with a (portable) maximum value of time_t,
2890 * we wake up once per month, which hopefully is rare enough to not
2891 * be a problem. */
2892 its.it_value.tv_sec = ev_rt_now + 86400 * 30;
2893 timerfd_settime (timerfd, TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET, &its, 0);
2894
2895 ev_rt_now = ev_time ();
2896 /* periodics_reschedule only needs ev_rt_now */
2897 /* but maybe in the future we want the full treatment. */
2898 /*
2899 now_floor = EV_TS_CONST (0.);
2900 time_update (EV_A_ EV_TSTAMP_HUGE);
2901 */
2902 periodics_reschedule (EV_A);
2903}
2904
2905ecb_noinline ecb_cold
2906static void
2907evtimerfd_init (EV_P)
2908{
2909 if (!ev_is_active (&timerfd_w))
2910 {
2911 timerfd = timerfd_create (CLOCK_REALTIME, TFD_NONBLOCK | TFD_CLOEXEC);
2912
2913 if (timerfd >= 0)
2914 {
2915 fd_intern (timerfd); /* just to be sure */
2916
2917 ev_io_init (&timerfd_w, timerfdcb, timerfd, EV_READ);
2918 ev_set_priority (&timerfd_w, EV_MINPRI);
2919 ev_io_start (EV_A_ &timerfd_w);
2920 ev_unref (EV_A); /* watcher should not keep loop alive */
2921
2922 /* (re-) arm timer */
2923 timerfdcb (EV_A_ 0, 0);
2924 }
2925 }
2926}
2927
2928#endif
2929
2930/*****************************************************************************/
2931
2727#if EV_USE_IOCP 2932#if EV_USE_IOCP
2728# include "ev_iocp.c" 2933# include "ev_iocp.c"
2729#endif 2934#endif
2730#if EV_USE_PORT 2935#if EV_USE_PORT
2731# include "ev_port.c" 2936# include "ev_port.c"
2736#if EV_USE_EPOLL 2941#if EV_USE_EPOLL
2737# include "ev_epoll.c" 2942# include "ev_epoll.c"
2738#endif 2943#endif
2739#if EV_USE_LINUXAIO 2944#if EV_USE_LINUXAIO
2740# include "ev_linuxaio.c" 2945# include "ev_linuxaio.c"
2946#endif
2947#if EV_USE_IOURING
2948# include "ev_iouring.c"
2741#endif 2949#endif
2742#if EV_USE_POLL 2950#if EV_USE_POLL
2743# include "ev_poll.c" 2951# include "ev_poll.c"
2744#endif 2952#endif
2745#if EV_USE_SELECT 2953#if EV_USE_SELECT
2778 2986
2779 if (EV_USE_PORT ) flags |= EVBACKEND_PORT; 2987 if (EV_USE_PORT ) flags |= EVBACKEND_PORT;
2780 if (EV_USE_KQUEUE ) flags |= EVBACKEND_KQUEUE; 2988 if (EV_USE_KQUEUE ) flags |= EVBACKEND_KQUEUE;
2781 if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL; 2989 if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL;
2782 if (EV_USE_LINUXAIO) flags |= EVBACKEND_LINUXAIO; 2990 if (EV_USE_LINUXAIO) flags |= EVBACKEND_LINUXAIO;
2991 if (EV_USE_IOURING ) flags |= EVBACKEND_IOURING;
2783 if (EV_USE_POLL ) flags |= EVBACKEND_POLL; 2992 if (EV_USE_POLL ) flags |= EVBACKEND_POLL;
2784 if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT; 2993 if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT;
2785 2994
2786 return flags; 2995 return flags;
2787} 2996}
2808 3017
2809 /* TODO: linuxaio is very experimental */ 3018 /* TODO: linuxaio is very experimental */
2810#if !EV_RECOMMEND_LINUXAIO 3019#if !EV_RECOMMEND_LINUXAIO
2811 flags &= ~EVBACKEND_LINUXAIO; 3020 flags &= ~EVBACKEND_LINUXAIO;
2812#endif 3021#endif
3022 /* TODO: linuxaio is super experimental */
3023#if !EV_RECOMMEND_IOURING
3024 flags &= ~EVBACKEND_IOURING;
3025#endif
2813 3026
2814 return flags; 3027 return flags;
2815} 3028}
2816 3029
2817ecb_cold 3030ecb_cold
2821 int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT; 3034 int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT;
2822 3035
2823 /* epoll embeddability broken on all linux versions up to at least 2.6.23 */ 3036 /* epoll embeddability broken on all linux versions up to at least 2.6.23 */
2824 if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */ 3037 if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */
2825 flags &= ~EVBACKEND_EPOLL; 3038 flags &= ~EVBACKEND_EPOLL;
3039
3040 /* EVBACKEND_LINUXAIO is theoretically embeddable, but suffers from a performance overhead */
3041
3042 /* EVBACKEND_IOURING is practically embeddable, but the current implementation is not
3043 * because our backend_fd is the epoll fd we need as fallback.
3044 * if the kernel ever is fixed, this might change...
3045 */
2826 3046
2827 return flags; 3047 return flags;
2828} 3048}
2829 3049
2830unsigned int 3050unsigned int
2948 fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2; 3168 fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2;
2949#endif 3169#endif
2950#if EV_USE_SIGNALFD 3170#if EV_USE_SIGNALFD
2951 sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1; 3171 sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1;
2952#endif 3172#endif
3173#if EV_USE_TIMERFD
3174 timerfd = flags & EVFLAG_NOTIMERFD ? -1 : -2;
3175#endif
2953 3176
2954 if (!(flags & EVBACKEND_MASK)) 3177 if (!(flags & EVBACKEND_MASK))
2955 flags |= ev_recommended_backends (); 3178 flags |= ev_recommended_backends ();
2956 3179
2957#if EV_USE_IOCP 3180#if EV_USE_IOCP
2960#if EV_USE_PORT 3183#if EV_USE_PORT
2961 if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags); 3184 if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags);
2962#endif 3185#endif
2963#if EV_USE_KQUEUE 3186#if EV_USE_KQUEUE
2964 if (!backend && (flags & EVBACKEND_KQUEUE )) backend = kqueue_init (EV_A_ flags); 3187 if (!backend && (flags & EVBACKEND_KQUEUE )) backend = kqueue_init (EV_A_ flags);
3188#endif
3189#if EV_USE_IOURING
3190 if (!backend && (flags & EVBACKEND_IOURING )) backend = iouring_init (EV_A_ flags);
2965#endif 3191#endif
2966#if EV_USE_LINUXAIO 3192#if EV_USE_LINUXAIO
2967 if (!backend && (flags & EVBACKEND_LINUXAIO)) backend = linuxaio_init (EV_A_ flags); 3193 if (!backend && (flags & EVBACKEND_LINUXAIO)) backend = linuxaio_init (EV_A_ flags);
2968#endif 3194#endif
2969#if EV_USE_EPOLL 3195#if EV_USE_EPOLL
3027#if EV_USE_SIGNALFD 3253#if EV_USE_SIGNALFD
3028 if (ev_is_active (&sigfd_w)) 3254 if (ev_is_active (&sigfd_w))
3029 close (sigfd); 3255 close (sigfd);
3030#endif 3256#endif
3031 3257
3258#if EV_USE_TIMERFD
3259 if (ev_is_active (&timerfd_w))
3260 close (timerfd);
3261#endif
3262
3032#if EV_USE_INOTIFY 3263#if EV_USE_INOTIFY
3033 if (fs_fd >= 0) 3264 if (fs_fd >= 0)
3034 close (fs_fd); 3265 close (fs_fd);
3035#endif 3266#endif
3036 3267
3043#if EV_USE_PORT 3274#if EV_USE_PORT
3044 if (backend == EVBACKEND_PORT ) port_destroy (EV_A); 3275 if (backend == EVBACKEND_PORT ) port_destroy (EV_A);
3045#endif 3276#endif
3046#if EV_USE_KQUEUE 3277#if EV_USE_KQUEUE
3047 if (backend == EVBACKEND_KQUEUE ) kqueue_destroy (EV_A); 3278 if (backend == EVBACKEND_KQUEUE ) kqueue_destroy (EV_A);
3279#endif
3280#if EV_USE_IOURING
3281 if (backend == EVBACKEND_IOURING ) iouring_destroy (EV_A);
3048#endif 3282#endif
3049#if EV_USE_LINUXAIO 3283#if EV_USE_LINUXAIO
3050 if (backend == EVBACKEND_LINUXAIO) linuxaio_destroy (EV_A); 3284 if (backend == EVBACKEND_LINUXAIO) linuxaio_destroy (EV_A);
3051#endif 3285#endif
3052#if EV_USE_EPOLL 3286#if EV_USE_EPOLL
3111 if (backend == EVBACKEND_PORT ) port_fork (EV_A); 3345 if (backend == EVBACKEND_PORT ) port_fork (EV_A);
3112#endif 3346#endif
3113#if EV_USE_KQUEUE 3347#if EV_USE_KQUEUE
3114 if (backend == EVBACKEND_KQUEUE ) kqueue_fork (EV_A); 3348 if (backend == EVBACKEND_KQUEUE ) kqueue_fork (EV_A);
3115#endif 3349#endif
3350#if EV_USE_IOURING
3351 if (backend == EVBACKEND_IOURING ) iouring_fork (EV_A);
3352#endif
3116#if EV_USE_LINUXAIO 3353#if EV_USE_LINUXAIO
3117 if (backend == EVBACKEND_LINUXAIO) linuxaio_fork (EV_A); 3354 if (backend == EVBACKEND_LINUXAIO) linuxaio_fork (EV_A);
3118#endif 3355#endif
3119#if EV_USE_EPOLL 3356#if EV_USE_EPOLL
3120 if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A); 3357 if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A);
3121#endif 3358#endif
3122#if EV_USE_INOTIFY 3359#if EV_USE_INOTIFY
3123 infy_fork (EV_A); 3360 infy_fork (EV_A);
3124#endif 3361#endif
3125 3362
3363 if (postfork != 2)
3364 {
3365 #if EV_USE_SIGNALFD
3366 /* surprisingly, nothing needs to be done for signalfd, accoridng to docs, it does the right thing on fork */
3367 #endif
3368
3369 #if EV_USE_TIMERFD
3370 if (ev_is_active (&timerfd_w))
3371 {
3372 ev_ref (EV_A);
3373 ev_io_stop (EV_A_ &timerfd_w);
3374
3375 close (timerfd);
3376 timerfd = -2;
3377
3378 evtimerfd_init (EV_A);
3379 /* reschedule periodics, in case we missed something */
3380 ev_feed_event (EV_A_ &timerfd_w, EV_CUSTOM);
3381 }
3382 #endif
3383
3126#if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE 3384 #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
3127 if (ev_is_active (&pipe_w) && postfork != 2) 3385 if (ev_is_active (&pipe_w))
3128 { 3386 {
3129 /* pipe_write_wanted must be false now, so modifying fd vars should be safe */ 3387 /* pipe_write_wanted must be false now, so modifying fd vars should be safe */
3130 3388
3131 ev_ref (EV_A); 3389 ev_ref (EV_A);
3132 ev_io_stop (EV_A_ &pipe_w); 3390 ev_io_stop (EV_A_ &pipe_w);
3133 3391
3134 if (evpipe [0] >= 0) 3392 if (evpipe [0] >= 0)
3135 EV_WIN32_CLOSE_FD (evpipe [0]); 3393 EV_WIN32_CLOSE_FD (evpipe [0]);
3136 3394
3137 evpipe_init (EV_A); 3395 evpipe_init (EV_A);
3138 /* iterate over everything, in case we missed something before */ 3396 /* iterate over everything, in case we missed something before */
3139 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM); 3397 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
3398 }
3399 #endif
3140 } 3400 }
3141#endif
3142 3401
3143 postfork = 0; 3402 postfork = 0;
3144} 3403}
3145 3404
3146#if EV_MULTIPLICITY 3405#if EV_MULTIPLICITY
3416 { 3675 {
3417 ev_at (w) += w->repeat; 3676 ev_at (w) += w->repeat;
3418 if (ev_at (w) < mn_now) 3677 if (ev_at (w) < mn_now)
3419 ev_at (w) = mn_now; 3678 ev_at (w) = mn_now;
3420 3679
3421 assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > 0.)); 3680 assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > EV_TS_CONST (0.)));
3422 3681
3423 ANHE_at_cache (timers [HEAP0]); 3682 ANHE_at_cache (timers [HEAP0]);
3424 downheap (timers, timercnt, HEAP0); 3683 downheap (timers, timercnt, HEAP0);
3425 } 3684 }
3426 else 3685 else
3557 3816
3558 mn_now = get_clock (); 3817 mn_now = get_clock ();
3559 3818
3560 /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */ 3819 /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */
3561 /* interpolate in the meantime */ 3820 /* interpolate in the meantime */
3562 if (ecb_expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5)) 3821 if (ecb_expect_true (mn_now - now_floor < EV_TS_CONST (MIN_TIMEJUMP * .5)))
3563 { 3822 {
3564 ev_rt_now = rtmn_diff + mn_now; 3823 ev_rt_now = rtmn_diff + mn_now;
3565 return; 3824 return;
3566 } 3825 }
3567 3826
3581 ev_tstamp diff; 3840 ev_tstamp diff;
3582 rtmn_diff = ev_rt_now - mn_now; 3841 rtmn_diff = ev_rt_now - mn_now;
3583 3842
3584 diff = odiff - rtmn_diff; 3843 diff = odiff - rtmn_diff;
3585 3844
3586 if (ecb_expect_true ((diff < 0. ? -diff : diff) < MIN_TIMEJUMP)) 3845 if (ecb_expect_true ((diff < EV_TS_CONST (0.) ? -diff : diff) < EV_TS_CONST (MIN_TIMEJUMP)))
3587 return; /* all is well */ 3846 return; /* all is well */
3588 3847
3589 ev_rt_now = ev_time (); 3848 ev_rt_now = ev_time ();
3590 mn_now = get_clock (); 3849 mn_now = get_clock ();
3591 now_floor = mn_now; 3850 now_floor = mn_now;
3600 else 3859 else
3601#endif 3860#endif
3602 { 3861 {
3603 ev_rt_now = ev_time (); 3862 ev_rt_now = ev_time ();
3604 3863
3605 if (ecb_expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + MIN_TIMEJUMP)) 3864 if (ecb_expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + EV_TS_CONST (MIN_TIMEJUMP)))
3606 { 3865 {
3607 /* adjust timers. this is easy, as the offset is the same for all of them */ 3866 /* adjust timers. this is easy, as the offset is the same for all of them */
3608 timers_reschedule (EV_A_ ev_rt_now - mn_now); 3867 timers_reschedule (EV_A_ ev_rt_now - mn_now);
3609#if EV_PERIODIC_ENABLE 3868#if EV_PERIODIC_ENABLE
3610 periodics_reschedule (EV_A); 3869 periodics_reschedule (EV_A);
3679 3938
3680 /* remember old timestamp for io_blocktime calculation */ 3939 /* remember old timestamp for io_blocktime calculation */
3681 ev_tstamp prev_mn_now = mn_now; 3940 ev_tstamp prev_mn_now = mn_now;
3682 3941
3683 /* update time to cancel out callback processing overhead */ 3942 /* update time to cancel out callback processing overhead */
3684 time_update (EV_A_ 1e100); 3943 time_update (EV_A_ EV_TS_CONST (EV_TSTAMP_HUGE));
3685 3944
3686 /* from now on, we want a pipe-wake-up */ 3945 /* from now on, we want a pipe-wake-up */
3687 pipe_write_wanted = 1; 3946 pipe_write_wanted = 1;
3688 3947
3689 ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */ 3948 ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */
3690 3949
3691 if (ecb_expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped))) 3950 if (ecb_expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped)))
3692 { 3951 {
3693 waittime = MAX_BLOCKTIME; 3952 waittime = EV_TS_CONST (MAX_BLOCKTIME);
3694 3953
3695 if (timercnt) 3954 if (timercnt)
3696 { 3955 {
3697 ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now; 3956 ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now;
3698 if (waittime > to) waittime = to; 3957 if (waittime > to) waittime = to;
3708 3967
3709 /* don't let timeouts decrease the waittime below timeout_blocktime */ 3968 /* don't let timeouts decrease the waittime below timeout_blocktime */
3710 if (ecb_expect_false (waittime < timeout_blocktime)) 3969 if (ecb_expect_false (waittime < timeout_blocktime))
3711 waittime = timeout_blocktime; 3970 waittime = timeout_blocktime;
3712 3971
3713 /* at this point, we NEED to wait, so we have to ensure */ 3972 /* now there are two more special cases left, either we have
3714 /* to pass a minimum nonzero value to the backend */ 3973 * already-expired timers, so we should not sleep, or we have timers
3974 * that expire very soon, in which case we need to wait for a minimum
3975 * amount of time for some event loop backends.
3976 */
3715 if (ecb_expect_false (waittime < backend_mintime)) 3977 if (ecb_expect_false (waittime < backend_mintime))
3978 waittime = waittime <= EV_TS_CONST (0.)
3979 ? EV_TS_CONST (0.)
3716 waittime = backend_mintime; 3980 : backend_mintime;
3717 3981
3718 /* extra check because io_blocktime is commonly 0 */ 3982 /* extra check because io_blocktime is commonly 0 */
3719 if (ecb_expect_false (io_blocktime)) 3983 if (ecb_expect_false (io_blocktime))
3720 { 3984 {
3721 sleeptime = io_blocktime - (mn_now - prev_mn_now); 3985 sleeptime = io_blocktime - (mn_now - prev_mn_now);
3722 3986
3723 if (sleeptime > waittime - backend_mintime) 3987 if (sleeptime > waittime - backend_mintime)
3724 sleeptime = waittime - backend_mintime; 3988 sleeptime = waittime - backend_mintime;
3725 3989
3726 if (ecb_expect_true (sleeptime > 0.)) 3990 if (ecb_expect_true (sleeptime > EV_TS_CONST (0.)))
3727 { 3991 {
3728 ev_sleep (sleeptime); 3992 ev_sleep (sleeptime);
3729 waittime -= sleeptime; 3993 waittime -= sleeptime;
3730 } 3994 }
3731 } 3995 }
3805} 4069}
3806 4070
3807void 4071void
3808ev_now_update (EV_P) EV_NOEXCEPT 4072ev_now_update (EV_P) EV_NOEXCEPT
3809{ 4073{
3810 time_update (EV_A_ 1e100); 4074 time_update (EV_A_ EV_TSTAMP_HUGE);
3811} 4075}
3812 4076
3813void 4077void
3814ev_suspend (EV_P) EV_NOEXCEPT 4078ev_suspend (EV_P) EV_NOEXCEPT
3815{ 4079{
4046} 4310}
4047 4311
4048ev_tstamp 4312ev_tstamp
4049ev_timer_remaining (EV_P_ ev_timer *w) EV_NOEXCEPT 4313ev_timer_remaining (EV_P_ ev_timer *w) EV_NOEXCEPT
4050{ 4314{
4051 return ev_at (w) - (ev_is_active (w) ? mn_now : 0.); 4315 return ev_at (w) - (ev_is_active (w) ? mn_now : EV_TS_CONST (0.));
4052} 4316}
4053 4317
4054#if EV_PERIODIC_ENABLE 4318#if EV_PERIODIC_ENABLE
4055ecb_noinline 4319ecb_noinline
4056void 4320void
4057ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT 4321ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT
4058{ 4322{
4059 if (ecb_expect_false (ev_is_active (w))) 4323 if (ecb_expect_false (ev_is_active (w)))
4060 return; 4324 return;
4325
4326#if EV_USE_TIMERFD
4327 if (timerfd == -2)
4328 evtimerfd_init (EV_A);
4329#endif
4061 4330
4062 if (w->reschedule_cb) 4331 if (w->reschedule_cb)
4063 ev_at (w) = w->reschedule_cb (w, ev_rt_now); 4332 ev_at (w) = w->reschedule_cb (w, ev_rt_now);
4064 else if (w->interval) 4333 else if (w->interval)
4065 { 4334 {

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines