ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev.c
(Generate patch)

Comparing libev/ev.c (file contents):
Revision 1.501 by root, Mon Jul 1 21:47:42 2019 UTC vs.
Revision 1.510 by root, Wed Aug 28 09:45:49 2019 UTC

447# endif 447# endif
448#endif 448#endif
449 449
450#if EV_USE_LINUXAIO 450#if EV_USE_LINUXAIO
451# include <sys/syscall.h> 451# include <sys/syscall.h>
452# if !SYS_io_getevents || !EV_USE_EPOLL /* ev_linxaio uses ev_poll.c:ev_epoll_create */ 452# if SYS_io_getevents && EV_USE_EPOLL /* linuxaio backend requires epoll backend */
453# define EV_NEED_SYSCALL 1
454# else
453# undef EV_USE_LINUXAIO 455# undef EV_USE_LINUXAIO
454# define EV_USE_LINUXAIO 0 456# define EV_USE_LINUXAIO 0
455# else
456# define EV_NEED_SYSCALL 1
457# endif 457# endif
458#endif 458#endif
459 459
460#if EV_USE_IOURING 460#if EV_USE_IOURING
461# include <sys/syscall.h> 461# include <sys/syscall.h>
462# if !__alpha && !SYS_io_uring_setup 462# if !SYS_io_uring_setup && __linux && !__alpha
463# define SYS_io_uring_setup 425 463# define SYS_io_uring_setup 425
464# define SYS_io_uring_enter 426 464# define SYS_io_uring_enter 426
465# define SYS_io_uring_wregister 427 465# define SYS_io_uring_wregister 427
466# endif 466# endif
467# if SYS_io_uring_setup 467# if SYS_io_uring_setup && EV_USE_EPOLL /* iouring backend requires epoll backend */
468# define EV_NEED_SYSCALL 1 468# define EV_NEED_SYSCALL 1
469# else 469# else
470# undef EV_USE_IOURING 470# undef EV_USE_IOURING
471# define EV_USE_IOURING 0 471# define EV_USE_IOURING 0
472# endif 472# endif
520}; 520};
521#endif 521#endif
522 522
523/*****************************************************************************/ 523/*****************************************************************************/
524 524
525#if EV_NEED_SYSCALL
526
527#include <sys/syscall.h>
528
529/*
530 * define some syscall wrappers for common architectures
531 * this is mostly for nice looks during debugging, not performance.
532 * our syscalls return < 0, not == -1, on error. which is good
533 * enough for linux aio.
534 * TODO: arm is also common nowadays, maybe even mips and x86
535 * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove...
536 */
537#if __GNUC__ && __linux && ECB_AMD64 && !defined __OPTIMIZE_SIZE__
538 /* the costly errno access probably kills this for size optimisation */
539
540 #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5) \
541 ({ \
542 long res; \
543 register unsigned long r5 __asm__ ("r8" ); \
544 register unsigned long r4 __asm__ ("r10"); \
545 register unsigned long r3 __asm__ ("rdx"); \
546 register unsigned long r2 __asm__ ("rsi"); \
547 register unsigned long r1 __asm__ ("rdi"); \
548 if (narg >= 5) r5 = (unsigned long)(arg5); \
549 if (narg >= 4) r4 = (unsigned long)(arg4); \
550 if (narg >= 3) r3 = (unsigned long)(arg3); \
551 if (narg >= 2) r2 = (unsigned long)(arg2); \
552 if (narg >= 1) r1 = (unsigned long)(arg1); \
553 __asm__ __volatile__ ( \
554 "syscall\n\t" \
555 : "=a" (res) \
556 : "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \
557 : "cc", "r11", "cx", "memory"); \
558 errno = -res; \
559 res; \
560 })
561
562#endif
563
564#ifdef ev_syscall
565 #define ev_syscall0(nr) ev_syscall (nr, 0, 0, 0, 0, 0, 0
566 #define ev_syscall1(nr,arg1) ev_syscall (nr, 1, arg1, 0, 0, 0, 0)
567 #define ev_syscall2(nr,arg1,arg2) ev_syscall (nr, 2, arg1, arg2, 0, 0, 0)
568 #define ev_syscall3(nr,arg1,arg2,arg3) ev_syscall (nr, 3, arg1, arg2, arg3, 0, 0)
569 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) ev_syscall (nr, 3, arg1, arg2, arg3, arg4, 0)
570 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5)
571#else
572 #define ev_syscall0(nr) syscall (nr)
573 #define ev_syscall1(nr,arg1) syscall (nr, arg1)
574 #define ev_syscall2(nr,arg1,arg2) syscall (nr, arg1, arg2)
575 #define ev_syscall3(nr,arg1,arg2,arg3) syscall (nr, arg1, arg2, arg3)
576 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) syscall (nr, arg1, arg2, arg3, arg4)
577 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) syscall (nr, arg1, arg2, arg3, arg4, arg5)
578#endif
579
580#endif
581
582/*****************************************************************************/
583
584#if EV_VERIFY >= 3 525#if EV_VERIFY >= 3
585# define EV_FREQUENT_CHECK ev_verify (EV_A) 526# define EV_FREQUENT_CHECK ev_verify (EV_A)
586#else 527#else
587# define EV_FREQUENT_CHECK do { } while (0) 528# define EV_FREQUENT_CHECK do { } while (0)
588#endif 529#endif
592 * This value is good at least till the year 4000. 533 * This value is good at least till the year 4000.
593 */ 534 */
594#define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */ 535#define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */
595/*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */ 536/*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */
596 537
597#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */ 538#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
598#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */ 539#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
599 540
541/* find a portable timestamp that is "always" in the future but fits into time_t.
542 * this is quite hard, and we are mostly guessing - we handle 32 bit signed/unsigned time_t,
543 * and sizes larger than 32 bit, and maybe the unlikely floating point time_t */
544#define EV_TSTAMP_HUGE \
545 (sizeof (time_t) >= 8 ? 10000000000000. \
546 : 0 < (time_t)4294967295 ? 4294967295. \
547 : 2147483647.) \
548
549#ifndef EV_TS_CONST
550# define EV_TS_CONST(nv) nv
551# define EV_TS_TO_MSEC(a) a * 1e3 + 0.9999
552# define EV_TS_FROM_USEC(us) us * 1e-6
600#define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0) 553# define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0)
601#define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0) 554# define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0)
555# define EV_TV_GET(tv) ((tv).tv_sec + (tv).tv_usec * 1e-6)
556# define EV_TS_GET(ts) ((ts).tv_sec + (ts).tv_nsec * 1e-9)
557#endif
602 558
603/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */ 559/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */
604/* ECB.H BEGIN */ 560/* ECB.H BEGIN */
605/* 561/*
606 * libecb - http://software.schmorp.de/pkg/libecb 562 * libecb - http://software.schmorp.de/pkg/libecb
1643# define inline_speed ecb_inline 1599# define inline_speed ecb_inline
1644#else 1600#else
1645# define inline_speed ecb_noinline static 1601# define inline_speed ecb_noinline static
1646#endif 1602#endif
1647 1603
1604/*****************************************************************************/
1605/* raw syscall wrappers */
1606
1607#if EV_NEED_SYSCALL
1608
1609#include <sys/syscall.h>
1610
1611/*
1612 * define some syscall wrappers for common architectures
1613 * this is mostly for nice looks during debugging, not performance.
1614 * our syscalls return < 0, not == -1, on error. which is good
1615 * enough for linux aio.
1616 * TODO: arm is also common nowadays, maybe even mips and x86
1617 * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove...
1618 */
1619#if __GNUC__ && __linux && ECB_AMD64 && !defined __OPTIMIZE_SIZE__
1620 /* the costly errno access probably kills this for size optimisation */
1621
1622 #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5,arg6) \
1623 ({ \
1624 long res; \
1625 register unsigned long r6 __asm__ ("r9" ); \
1626 register unsigned long r5 __asm__ ("r8" ); \
1627 register unsigned long r4 __asm__ ("r10"); \
1628 register unsigned long r3 __asm__ ("rdx"); \
1629 register unsigned long r2 __asm__ ("rsi"); \
1630 register unsigned long r1 __asm__ ("rdi"); \
1631 if (narg >= 6) r6 = (unsigned long)(arg6); \
1632 if (narg >= 5) r5 = (unsigned long)(arg5); \
1633 if (narg >= 4) r4 = (unsigned long)(arg4); \
1634 if (narg >= 3) r3 = (unsigned long)(arg3); \
1635 if (narg >= 2) r2 = (unsigned long)(arg2); \
1636 if (narg >= 1) r1 = (unsigned long)(arg1); \
1637 __asm__ __volatile__ ( \
1638 "syscall\n\t" \
1639 : "=a" (res) \
1640 : "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \
1641 : "cc", "r11", "cx", "memory"); \
1642 errno = -res; \
1643 res; \
1644 })
1645
1646#endif
1647
1648#ifdef ev_syscall
1649 #define ev_syscall0(nr) ev_syscall (nr, 0, 0, 0, 0, 0, 0, 0)
1650 #define ev_syscall1(nr,arg1) ev_syscall (nr, 1, arg1, 0, 0, 0, 0, 0)
1651 #define ev_syscall2(nr,arg1,arg2) ev_syscall (nr, 2, arg1, arg2, 0, 0, 0, 0)
1652 #define ev_syscall3(nr,arg1,arg2,arg3) ev_syscall (nr, 3, arg1, arg2, arg3, 0, 0, 0)
1653 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) ev_syscall (nr, 3, arg1, arg2, arg3, arg4, 0, 0)
1654 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5, 0)
1655 #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) ev_syscall (nr, 6, arg1, arg2, arg3, arg4, arg5,arg6)
1656#else
1657 #define ev_syscall0(nr) syscall (nr)
1658 #define ev_syscall1(nr,arg1) syscall (nr, arg1)
1659 #define ev_syscall2(nr,arg1,arg2) syscall (nr, arg1, arg2)
1660 #define ev_syscall3(nr,arg1,arg2,arg3) syscall (nr, arg1, arg2, arg3)
1661 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) syscall (nr, arg1, arg2, arg3, arg4)
1662 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) syscall (nr, arg1, arg2, arg3, arg4, arg5)
1663 #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) syscall (nr, arg1, arg2, arg3, arg4, arg5,arg6)
1664#endif
1665
1666#endif
1667
1668/*****************************************************************************/
1669
1648#define NUMPRI (EV_MAXPRI - EV_MINPRI + 1) 1670#define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)
1649 1671
1650#if EV_MINPRI == EV_MAXPRI 1672#if EV_MINPRI == EV_MAXPRI
1651# define ABSPRI(w) (((W)w), 0) 1673# define ABSPRI(w) (((W)w), 0)
1652#else 1674#else
1711 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 10000000000000000000. : 1000000000.; 1733 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 10000000000000000000. : 1000000000.;
1712#else 1734#else
1713 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.; 1735 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.;
1714#endif 1736#endif
1715 1737
1738 /* special treatment for negative arguments */
1739 if (ecb_expect_false (v < 0.))
1740 {
1741 ev_tstamp f = -ev_floor (-v);
1742
1743 return f - (f == v ? 0 : 1);
1744 }
1745
1716 /* argument too large for an unsigned long? */ 1746 /* argument too large for an unsigned long? then reduce it */
1717 if (ecb_expect_false (v >= shift)) 1747 if (ecb_expect_false (v >= shift))
1718 { 1748 {
1719 ev_tstamp f; 1749 ev_tstamp f;
1720 1750
1721 if (v == v - 1.) 1751 if (v == v - 1.)
1722 return v; /* very large number */ 1752 return v; /* very large numbers are assumed to be integer */
1723 1753
1724 f = shift * ev_floor (v * (1. / shift)); 1754 f = shift * ev_floor (v * (1. / shift));
1725 return f + ev_floor (v - f); 1755 return f + ev_floor (v - f);
1726 }
1727
1728 /* special treatment for negative args? */
1729 if (ecb_expect_false (v < 0.))
1730 {
1731 ev_tstamp f = -ev_floor (-v);
1732
1733 return f - (f == v ? 0 : 1);
1734 } 1756 }
1735 1757
1736 /* fits into an unsigned long */ 1758 /* fits into an unsigned long */
1737 return (unsigned long)v; 1759 return (unsigned long)v;
1738} 1760}
1882{ 1904{
1883 WL head; 1905 WL head;
1884 unsigned char events; /* the events watched for */ 1906 unsigned char events; /* the events watched for */
1885 unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */ 1907 unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */
1886 unsigned char emask; /* some backends store the actual kernel mask in here */ 1908 unsigned char emask; /* some backends store the actual kernel mask in here */
1887 unsigned char unused; 1909 unsigned char eflags; /* flags field for use by backends */
1888#if EV_USE_EPOLL 1910#if EV_USE_EPOLL
1889 unsigned int egen; /* generation counter to counter epoll bugs */ 1911 unsigned int egen; /* generation counter to counter epoll bugs */
1890#endif 1912#endif
1891#if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP 1913#if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP
1892 SOCKET handle; 1914 SOCKET handle;
1946 static struct ev_loop default_loop_struct; 1968 static struct ev_loop default_loop_struct;
1947 EV_API_DECL struct ev_loop *ev_default_loop_ptr = 0; /* needs to be initialised to make it a definition despite extern */ 1969 EV_API_DECL struct ev_loop *ev_default_loop_ptr = 0; /* needs to be initialised to make it a definition despite extern */
1948 1970
1949#else 1971#else
1950 1972
1951 EV_API_DECL ev_tstamp ev_rt_now = 0; /* needs to be initialised to make it a definition despite extern */ 1973 EV_API_DECL ev_tstamp ev_rt_now = EV_TS_CONST (0.); /* needs to be initialised to make it a definition despite extern */
1952 #define VAR(name,decl) static decl; 1974 #define VAR(name,decl) static decl;
1953 #include "ev_vars.h" 1975 #include "ev_vars.h"
1954 #undef VAR 1976 #undef VAR
1955 1977
1956 static int ev_default_loop_ptr; 1978 static int ev_default_loop_ptr;
1978#if EV_USE_REALTIME 2000#if EV_USE_REALTIME
1979 if (ecb_expect_true (have_realtime)) 2001 if (ecb_expect_true (have_realtime))
1980 { 2002 {
1981 struct timespec ts; 2003 struct timespec ts;
1982 clock_gettime (CLOCK_REALTIME, &ts); 2004 clock_gettime (CLOCK_REALTIME, &ts);
1983 return ts.tv_sec + ts.tv_nsec * 1e-9; 2005 return EV_TS_GET (ts);
1984 } 2006 }
1985#endif 2007#endif
1986 2008
2009 {
1987 struct timeval tv; 2010 struct timeval tv;
1988 gettimeofday (&tv, 0); 2011 gettimeofday (&tv, 0);
1989 return tv.tv_sec + tv.tv_usec * 1e-6; 2012 return EV_TV_GET (tv);
2013 }
1990} 2014}
1991#endif 2015#endif
1992 2016
1993inline_size ev_tstamp 2017inline_size ev_tstamp
1994get_clock (void) 2018get_clock (void)
1996#if EV_USE_MONOTONIC 2020#if EV_USE_MONOTONIC
1997 if (ecb_expect_true (have_monotonic)) 2021 if (ecb_expect_true (have_monotonic))
1998 { 2022 {
1999 struct timespec ts; 2023 struct timespec ts;
2000 clock_gettime (CLOCK_MONOTONIC, &ts); 2024 clock_gettime (CLOCK_MONOTONIC, &ts);
2001 return ts.tv_sec + ts.tv_nsec * 1e-9; 2025 return EV_TS_GET (ts);
2002 } 2026 }
2003#endif 2027#endif
2004 2028
2005 return ev_time (); 2029 return ev_time ();
2006} 2030}
2014#endif 2038#endif
2015 2039
2016void 2040void
2017ev_sleep (ev_tstamp delay) EV_NOEXCEPT 2041ev_sleep (ev_tstamp delay) EV_NOEXCEPT
2018{ 2042{
2019 if (delay > 0.) 2043 if (delay > EV_TS_CONST (0.))
2020 { 2044 {
2021#if EV_USE_NANOSLEEP 2045#if EV_USE_NANOSLEEP
2022 struct timespec ts; 2046 struct timespec ts;
2023 2047
2024 EV_TS_SET (ts, delay); 2048 EV_TS_SET (ts, delay);
2025 nanosleep (&ts, 0); 2049 nanosleep (&ts, 0);
2026#elif defined _WIN32 2050#elif defined _WIN32
2027 /* maybe this should round up, as ms is very low resolution */ 2051 /* maybe this should round up, as ms is very low resolution */
2028 /* compared to select (µs) or nanosleep (ns) */ 2052 /* compared to select (µs) or nanosleep (ns) */
2029 Sleep ((unsigned long)(delay * 1e3)); 2053 Sleep ((unsigned long)(EV_TS_TO_MSEC (delay)));
2030#else 2054#else
2031 struct timeval tv; 2055 struct timeval tv;
2032 2056
2033 /* here we rely on sys/time.h + sys/types.h + unistd.h providing select */ 2057 /* here we rely on sys/time.h + sys/types.h + unistd.h providing select */
2034 /* something not guaranteed by newer posix versions, but guaranteed */ 2058 /* something not guaranteed by newer posix versions, but guaranteed */
2384 2408
2385 /* find minimum child */ 2409 /* find minimum child */
2386 if (ecb_expect_true (pos + DHEAP - 1 < E)) 2410 if (ecb_expect_true (pos + DHEAP - 1 < E))
2387 { 2411 {
2388 /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos)); 2412 /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
2389 if ( ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos)); 2413 if ( minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));
2390 if ( ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos)); 2414 if ( minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));
2391 if ( ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos)); 2415 if ( minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));
2392 } 2416 }
2393 else if (pos < E) 2417 else if (pos < E)
2394 { 2418 {
2395 /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos)); 2419 /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
2396 if (pos + 1 < E && ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos)); 2420 if (pos + 1 < E && minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));
2397 if (pos + 2 < E && ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos)); 2421 if (pos + 2 < E && minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));
2398 if (pos + 3 < E && ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos)); 2422 if (pos + 3 < E && minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));
2399 } 2423 }
2400 else 2424 else
2401 break; 2425 break;
2402 2426
2403 if (ANHE_at (he) <= minat) 2427 if (ANHE_at (he) <= minat)
2411 2435
2412 heap [k] = he; 2436 heap [k] = he;
2413 ev_active (ANHE_w (he)) = k; 2437 ev_active (ANHE_w (he)) = k;
2414} 2438}
2415 2439
2416#else /* 4HEAP */ 2440#else /* not 4HEAP */
2417 2441
2418#define HEAP0 1 2442#define HEAP0 1
2419#define HPARENT(k) ((k) >> 1) 2443#define HPARENT(k) ((k) >> 1)
2420#define UPHEAP_DONE(p,k) (!(p)) 2444#define UPHEAP_DONE(p,k) (!(p))
2421 2445
2915 2939
2916 /* epoll embeddability broken on all linux versions up to at least 2.6.23 */ 2940 /* epoll embeddability broken on all linux versions up to at least 2.6.23 */
2917 if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */ 2941 if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */
2918 flags &= ~EVBACKEND_EPOLL; 2942 flags &= ~EVBACKEND_EPOLL;
2919 2943
2944 /* EVBACKEND_LINUXAIO is theoretically embeddable, but suffers from a performance overhead */
2945
2946 /* EVBACKEND_IOURING is practically embeddable, but the current implementation is not
2947 * because our backend_fd is the epoll fd we need as fallback.
2948 * if the kernel ever is fixed, this might change...
2949 */
2950
2920 return flags; 2951 return flags;
2921} 2952}
2922 2953
2923unsigned int 2954unsigned int
2924ev_backend (EV_P) EV_NOEXCEPT 2955ev_backend (EV_P) EV_NOEXCEPT
3518 { 3549 {
3519 ev_at (w) += w->repeat; 3550 ev_at (w) += w->repeat;
3520 if (ev_at (w) < mn_now) 3551 if (ev_at (w) < mn_now)
3521 ev_at (w) = mn_now; 3552 ev_at (w) = mn_now;
3522 3553
3523 assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > 0.)); 3554 assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > EV_TS_CONST (0.)));
3524 3555
3525 ANHE_at_cache (timers [HEAP0]); 3556 ANHE_at_cache (timers [HEAP0]);
3526 downheap (timers, timercnt, HEAP0); 3557 downheap (timers, timercnt, HEAP0);
3527 } 3558 }
3528 else 3559 else
3659 3690
3660 mn_now = get_clock (); 3691 mn_now = get_clock ();
3661 3692
3662 /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */ 3693 /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */
3663 /* interpolate in the meantime */ 3694 /* interpolate in the meantime */
3664 if (ecb_expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5)) 3695 if (ecb_expect_true (mn_now - now_floor < EV_TS_CONST (MIN_TIMEJUMP * .5)))
3665 { 3696 {
3666 ev_rt_now = rtmn_diff + mn_now; 3697 ev_rt_now = rtmn_diff + mn_now;
3667 return; 3698 return;
3668 } 3699 }
3669 3700
3683 ev_tstamp diff; 3714 ev_tstamp diff;
3684 rtmn_diff = ev_rt_now - mn_now; 3715 rtmn_diff = ev_rt_now - mn_now;
3685 3716
3686 diff = odiff - rtmn_diff; 3717 diff = odiff - rtmn_diff;
3687 3718
3688 if (ecb_expect_true ((diff < 0. ? -diff : diff) < MIN_TIMEJUMP)) 3719 if (ecb_expect_true ((diff < EV_TS_CONST (0.) ? -diff : diff) < EV_TS_CONST (MIN_TIMEJUMP)))
3689 return; /* all is well */ 3720 return; /* all is well */
3690 3721
3691 ev_rt_now = ev_time (); 3722 ev_rt_now = ev_time ();
3692 mn_now = get_clock (); 3723 mn_now = get_clock ();
3693 now_floor = mn_now; 3724 now_floor = mn_now;
3702 else 3733 else
3703#endif 3734#endif
3704 { 3735 {
3705 ev_rt_now = ev_time (); 3736 ev_rt_now = ev_time ();
3706 3737
3707 if (ecb_expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + MIN_TIMEJUMP)) 3738 if (ecb_expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + EV_TS_CONST (MIN_TIMEJUMP)))
3708 { 3739 {
3709 /* adjust timers. this is easy, as the offset is the same for all of them */ 3740 /* adjust timers. this is easy, as the offset is the same for all of them */
3710 timers_reschedule (EV_A_ ev_rt_now - mn_now); 3741 timers_reschedule (EV_A_ ev_rt_now - mn_now);
3711#if EV_PERIODIC_ENABLE 3742#if EV_PERIODIC_ENABLE
3712 periodics_reschedule (EV_A); 3743 periodics_reschedule (EV_A);
3781 3812
3782 /* remember old timestamp for io_blocktime calculation */ 3813 /* remember old timestamp for io_blocktime calculation */
3783 ev_tstamp prev_mn_now = mn_now; 3814 ev_tstamp prev_mn_now = mn_now;
3784 3815
3785 /* update time to cancel out callback processing overhead */ 3816 /* update time to cancel out callback processing overhead */
3786 time_update (EV_A_ 1e100); 3817 time_update (EV_A_ EV_TS_CONST (EV_TSTAMP_HUGE));
3787 3818
3788 /* from now on, we want a pipe-wake-up */ 3819 /* from now on, we want a pipe-wake-up */
3789 pipe_write_wanted = 1; 3820 pipe_write_wanted = 1;
3790 3821
3791 ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */ 3822 ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */
3792 3823
3793 if (ecb_expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped))) 3824 if (ecb_expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped)))
3794 { 3825 {
3795 waittime = MAX_BLOCKTIME; 3826 waittime = EV_TS_CONST (MAX_BLOCKTIME);
3796 3827
3797 if (timercnt) 3828 if (timercnt)
3798 { 3829 {
3799 ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now; 3830 ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now;
3800 if (waittime > to) waittime = to; 3831 if (waittime > to) waittime = to;
3823 sleeptime = io_blocktime - (mn_now - prev_mn_now); 3854 sleeptime = io_blocktime - (mn_now - prev_mn_now);
3824 3855
3825 if (sleeptime > waittime - backend_mintime) 3856 if (sleeptime > waittime - backend_mintime)
3826 sleeptime = waittime - backend_mintime; 3857 sleeptime = waittime - backend_mintime;
3827 3858
3828 if (ecb_expect_true (sleeptime > 0.)) 3859 if (ecb_expect_true (sleeptime > EV_TS_CONST (0.)))
3829 { 3860 {
3830 ev_sleep (sleeptime); 3861 ev_sleep (sleeptime);
3831 waittime -= sleeptime; 3862 waittime -= sleeptime;
3832 } 3863 }
3833 } 3864 }
3907} 3938}
3908 3939
3909void 3940void
3910ev_now_update (EV_P) EV_NOEXCEPT 3941ev_now_update (EV_P) EV_NOEXCEPT
3911{ 3942{
3912 time_update (EV_A_ 1e100); 3943 time_update (EV_A_ EV_TSTAMP_HUGE);
3913} 3944}
3914 3945
3915void 3946void
3916ev_suspend (EV_P) EV_NOEXCEPT 3947ev_suspend (EV_P) EV_NOEXCEPT
3917{ 3948{
4148} 4179}
4149 4180
4150ev_tstamp 4181ev_tstamp
4151ev_timer_remaining (EV_P_ ev_timer *w) EV_NOEXCEPT 4182ev_timer_remaining (EV_P_ ev_timer *w) EV_NOEXCEPT
4152{ 4183{
4153 return ev_at (w) - (ev_is_active (w) ? mn_now : 0.); 4184 return ev_at (w) - (ev_is_active (w) ? mn_now : EV_TS_CONST (0.));
4154} 4185}
4155 4186
4156#if EV_PERIODIC_ENABLE 4187#if EV_PERIODIC_ENABLE
4157ecb_noinline 4188ecb_noinline
4158void 4189void

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines