ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev.c
(Generate patch)

Comparing libev/ev.c (file contents):
Revision 1.501 by root, Mon Jul 1 21:47:42 2019 UTC vs.
Revision 1.507 by root, Thu Jul 11 08:22:54 2019 UTC

447# endif 447# endif
448#endif 448#endif
449 449
450#if EV_USE_LINUXAIO 450#if EV_USE_LINUXAIO
451# include <sys/syscall.h> 451# include <sys/syscall.h>
452# if !SYS_io_getevents || !EV_USE_EPOLL /* ev_linxaio uses ev_poll.c:ev_epoll_create */ 452# if SYS_io_getevents && EV_USE_EPOLL /* linuxaio backend requires epoll backend */
453# define EV_NEED_SYSCALL 1
454# else
453# undef EV_USE_LINUXAIO 455# undef EV_USE_LINUXAIO
454# define EV_USE_LINUXAIO 0 456# define EV_USE_LINUXAIO 0
455# else
456# define EV_NEED_SYSCALL 1
457# endif 457# endif
458#endif 458#endif
459 459
460#if EV_USE_IOURING 460#if EV_USE_IOURING
461# include <sys/syscall.h> 461# include <sys/syscall.h>
462# if !__alpha && !SYS_io_uring_setup 462# if !SYS_io_uring_setup && __linux && !__alpha
463# define SYS_io_uring_setup 425 463# define SYS_io_uring_setup 425
464# define SYS_io_uring_enter 426 464# define SYS_io_uring_enter 426
465# define SYS_io_uring_wregister 427 465# define SYS_io_uring_wregister 427
466# endif 466# endif
467# if SYS_io_uring_setup 467# if SYS_io_uring_setup && EV_USE_EPOLL /* iouring backend requires epoll backend */
468# define EV_NEED_SYSCALL 1 468# define EV_NEED_SYSCALL 1
469# else 469# else
470# undef EV_USE_IOURING 470# undef EV_USE_IOURING
471# define EV_USE_IOURING 0 471# define EV_USE_IOURING 0
472# endif 472# endif
520}; 520};
521#endif 521#endif
522 522
523/*****************************************************************************/ 523/*****************************************************************************/
524 524
525#if EV_NEED_SYSCALL
526
527#include <sys/syscall.h>
528
529/*
530 * define some syscall wrappers for common architectures
531 * this is mostly for nice looks during debugging, not performance.
532 * our syscalls return < 0, not == -1, on error. which is good
533 * enough for linux aio.
534 * TODO: arm is also common nowadays, maybe even mips and x86
535 * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove...
536 */
537#if __GNUC__ && __linux && ECB_AMD64 && !defined __OPTIMIZE_SIZE__
538 /* the costly errno access probably kills this for size optimisation */
539
540 #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5) \
541 ({ \
542 long res; \
543 register unsigned long r5 __asm__ ("r8" ); \
544 register unsigned long r4 __asm__ ("r10"); \
545 register unsigned long r3 __asm__ ("rdx"); \
546 register unsigned long r2 __asm__ ("rsi"); \
547 register unsigned long r1 __asm__ ("rdi"); \
548 if (narg >= 5) r5 = (unsigned long)(arg5); \
549 if (narg >= 4) r4 = (unsigned long)(arg4); \
550 if (narg >= 3) r3 = (unsigned long)(arg3); \
551 if (narg >= 2) r2 = (unsigned long)(arg2); \
552 if (narg >= 1) r1 = (unsigned long)(arg1); \
553 __asm__ __volatile__ ( \
554 "syscall\n\t" \
555 : "=a" (res) \
556 : "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \
557 : "cc", "r11", "cx", "memory"); \
558 errno = -res; \
559 res; \
560 })
561
562#endif
563
564#ifdef ev_syscall
565 #define ev_syscall0(nr) ev_syscall (nr, 0, 0, 0, 0, 0, 0
566 #define ev_syscall1(nr,arg1) ev_syscall (nr, 1, arg1, 0, 0, 0, 0)
567 #define ev_syscall2(nr,arg1,arg2) ev_syscall (nr, 2, arg1, arg2, 0, 0, 0)
568 #define ev_syscall3(nr,arg1,arg2,arg3) ev_syscall (nr, 3, arg1, arg2, arg3, 0, 0)
569 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) ev_syscall (nr, 3, arg1, arg2, arg3, arg4, 0)
570 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5)
571#else
572 #define ev_syscall0(nr) syscall (nr)
573 #define ev_syscall1(nr,arg1) syscall (nr, arg1)
574 #define ev_syscall2(nr,arg1,arg2) syscall (nr, arg1, arg2)
575 #define ev_syscall3(nr,arg1,arg2,arg3) syscall (nr, arg1, arg2, arg3)
576 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) syscall (nr, arg1, arg2, arg3, arg4)
577 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) syscall (nr, arg1, arg2, arg3, arg4, arg5)
578#endif
579
580#endif
581
582/*****************************************************************************/
583
584#if EV_VERIFY >= 3 525#if EV_VERIFY >= 3
585# define EV_FREQUENT_CHECK ev_verify (EV_A) 526# define EV_FREQUENT_CHECK ev_verify (EV_A)
586#else 527#else
587# define EV_FREQUENT_CHECK do { } while (0) 528# define EV_FREQUENT_CHECK do { } while (0)
588#endif 529#endif
592 * This value is good at least till the year 4000. 533 * This value is good at least till the year 4000.
593 */ 534 */
594#define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */ 535#define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */
595/*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */ 536/*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */
596 537
597#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */ 538#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
598#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */ 539#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
599 540
541/* find a portable timestamp that is "always" in the future but fits into time_t.
542 * this is quite hard, and we are mostly guessing - we handle 32 bit signed/unsigned time_t,
543 * and sizes larger than 32 bit, and maybe the unlikely floating point time_t */
544#define EV_TSTAMP_HUGE \
545 (sizeof (time_t) >= 8 ? 10000000000000. \
546 : 0 < (time_t)4294967295 ? 4294967295. \
547 : 2147483647.) \
548
549#define EV_TS_TO_MS(a) a * 1e3 + 0.9999
550#define EV_TS_FROM_USEC(us) us * 1e-6
600#define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0) 551#define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0)
601#define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0) 552#define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0)
553#define EV_TV_GET(tv) ((tv).tv_sec + (tv).tv_usec * 1e-6)
554#define EV_TS_GET(ts) ((ts).tv_sec + (ts).tv_nsec * 1e-9)
602 555
603/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */ 556/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */
604/* ECB.H BEGIN */ 557/* ECB.H BEGIN */
605/* 558/*
606 * libecb - http://software.schmorp.de/pkg/libecb 559 * libecb - http://software.schmorp.de/pkg/libecb
1643# define inline_speed ecb_inline 1596# define inline_speed ecb_inline
1644#else 1597#else
1645# define inline_speed ecb_noinline static 1598# define inline_speed ecb_noinline static
1646#endif 1599#endif
1647 1600
1601/*****************************************************************************/
1602/* raw syscall wrappers */
1603
1604#if EV_NEED_SYSCALL
1605
1606#include <sys/syscall.h>
1607
1608/*
1609 * define some syscall wrappers for common architectures
1610 * this is mostly for nice looks during debugging, not performance.
1611 * our syscalls return < 0, not == -1, on error. which is good
1612 * enough for linux aio.
1613 * TODO: arm is also common nowadays, maybe even mips and x86
1614 * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove...
1615 */
1616#if __GNUC__ && __linux && ECB_AMD64 && !defined __OPTIMIZE_SIZE__
1617 /* the costly errno access probably kills this for size optimisation */
1618
1619 #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5,arg6) \
1620 ({ \
1621 long res; \
1622 register unsigned long r6 __asm__ ("r9" ); \
1623 register unsigned long r5 __asm__ ("r8" ); \
1624 register unsigned long r4 __asm__ ("r10"); \
1625 register unsigned long r3 __asm__ ("rdx"); \
1626 register unsigned long r2 __asm__ ("rsi"); \
1627 register unsigned long r1 __asm__ ("rdi"); \
1628 if (narg >= 6) r6 = (unsigned long)(arg6); \
1629 if (narg >= 5) r5 = (unsigned long)(arg5); \
1630 if (narg >= 4) r4 = (unsigned long)(arg4); \
1631 if (narg >= 3) r3 = (unsigned long)(arg3); \
1632 if (narg >= 2) r2 = (unsigned long)(arg2); \
1633 if (narg >= 1) r1 = (unsigned long)(arg1); \
1634 __asm__ __volatile__ ( \
1635 "syscall\n\t" \
1636 : "=a" (res) \
1637 : "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \
1638 : "cc", "r11", "cx", "memory"); \
1639 errno = -res; \
1640 res; \
1641 })
1642
1643#endif
1644
1645#ifdef ev_syscall
1646 #define ev_syscall0(nr) ev_syscall (nr, 0, 0, 0, 0, 0, 0, 0)
1647 #define ev_syscall1(nr,arg1) ev_syscall (nr, 1, arg1, 0, 0, 0, 0, 0)
1648 #define ev_syscall2(nr,arg1,arg2) ev_syscall (nr, 2, arg1, arg2, 0, 0, 0, 0)
1649 #define ev_syscall3(nr,arg1,arg2,arg3) ev_syscall (nr, 3, arg1, arg2, arg3, 0, 0, 0)
1650 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) ev_syscall (nr, 3, arg1, arg2, arg3, arg4, 0, 0)
1651 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5, 0)
1652 #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) ev_syscall (nr, 6, arg1, arg2, arg3, arg4, arg5,arg6)
1653#else
1654 #define ev_syscall0(nr) syscall (nr)
1655 #define ev_syscall1(nr,arg1) syscall (nr, arg1)
1656 #define ev_syscall2(nr,arg1,arg2) syscall (nr, arg1, arg2)
1657 #define ev_syscall3(nr,arg1,arg2,arg3) syscall (nr, arg1, arg2, arg3)
1658 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) syscall (nr, arg1, arg2, arg3, arg4)
1659 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) syscall (nr, arg1, arg2, arg3, arg4, arg5)
1660 #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) syscall (nr, arg1, arg2, arg3, arg4, arg5,arg6)
1661#endif
1662
1663#endif
1664
1665/*****************************************************************************/
1666
1648#define NUMPRI (EV_MAXPRI - EV_MINPRI + 1) 1667#define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)
1649 1668
1650#if EV_MINPRI == EV_MAXPRI 1669#if EV_MINPRI == EV_MAXPRI
1651# define ABSPRI(w) (((W)w), 0) 1670# define ABSPRI(w) (((W)w), 0)
1652#else 1671#else
1711 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 10000000000000000000. : 1000000000.; 1730 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 10000000000000000000. : 1000000000.;
1712#else 1731#else
1713 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.; 1732 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.;
1714#endif 1733#endif
1715 1734
1735 /* special treatment for negative arguments */
1736 if (ecb_expect_false (v < 0.))
1737 {
1738 ev_tstamp f = -ev_floor (-v);
1739
1740 return f - (f == v ? 0 : 1);
1741 }
1742
1716 /* argument too large for an unsigned long? */ 1743 /* argument too large for an unsigned long? then reduce it */
1717 if (ecb_expect_false (v >= shift)) 1744 if (ecb_expect_false (v >= shift))
1718 { 1745 {
1719 ev_tstamp f; 1746 ev_tstamp f;
1720 1747
1721 if (v == v - 1.) 1748 if (v == v - 1.)
1722 return v; /* very large number */ 1749 return v; /* very large numbers are assumed to be integer */
1723 1750
1724 f = shift * ev_floor (v * (1. / shift)); 1751 f = shift * ev_floor (v * (1. / shift));
1725 return f + ev_floor (v - f); 1752 return f + ev_floor (v - f);
1726 }
1727
1728 /* special treatment for negative args? */
1729 if (ecb_expect_false (v < 0.))
1730 {
1731 ev_tstamp f = -ev_floor (-v);
1732
1733 return f - (f == v ? 0 : 1);
1734 } 1753 }
1735 1754
1736 /* fits into an unsigned long */ 1755 /* fits into an unsigned long */
1737 return (unsigned long)v; 1756 return (unsigned long)v;
1738} 1757}
1882{ 1901{
1883 WL head; 1902 WL head;
1884 unsigned char events; /* the events watched for */ 1903 unsigned char events; /* the events watched for */
1885 unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */ 1904 unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */
1886 unsigned char emask; /* some backends store the actual kernel mask in here */ 1905 unsigned char emask; /* some backends store the actual kernel mask in here */
1887 unsigned char unused; 1906 unsigned char eflags; /* flags field for use by backends */
1888#if EV_USE_EPOLL 1907#if EV_USE_EPOLL
1889 unsigned int egen; /* generation counter to counter epoll bugs */ 1908 unsigned int egen; /* generation counter to counter epoll bugs */
1890#endif 1909#endif
1891#if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP 1910#if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP
1892 SOCKET handle; 1911 SOCKET handle;
1978#if EV_USE_REALTIME 1997#if EV_USE_REALTIME
1979 if (ecb_expect_true (have_realtime)) 1998 if (ecb_expect_true (have_realtime))
1980 { 1999 {
1981 struct timespec ts; 2000 struct timespec ts;
1982 clock_gettime (CLOCK_REALTIME, &ts); 2001 clock_gettime (CLOCK_REALTIME, &ts);
1983 return ts.tv_sec + ts.tv_nsec * 1e-9; 2002 return EV_TS_GET (ts);
1984 } 2003 }
1985#endif 2004#endif
1986 2005
1987 struct timeval tv; 2006 struct timeval tv;
1988 gettimeofday (&tv, 0); 2007 gettimeofday (&tv, 0);
1989 return tv.tv_sec + tv.tv_usec * 1e-6; 2008 return EV_TV_GET (tv);
1990} 2009}
1991#endif 2010#endif
1992 2011
1993inline_size ev_tstamp 2012inline_size ev_tstamp
1994get_clock (void) 2013get_clock (void)
1996#if EV_USE_MONOTONIC 2015#if EV_USE_MONOTONIC
1997 if (ecb_expect_true (have_monotonic)) 2016 if (ecb_expect_true (have_monotonic))
1998 { 2017 {
1999 struct timespec ts; 2018 struct timespec ts;
2000 clock_gettime (CLOCK_MONOTONIC, &ts); 2019 clock_gettime (CLOCK_MONOTONIC, &ts);
2001 return ts.tv_sec + ts.tv_nsec * 1e-9; 2020 return EV_TS_GET (ts);
2002 } 2021 }
2003#endif 2022#endif
2004 2023
2005 return ev_time (); 2024 return ev_time ();
2006} 2025}
2024 EV_TS_SET (ts, delay); 2043 EV_TS_SET (ts, delay);
2025 nanosleep (&ts, 0); 2044 nanosleep (&ts, 0);
2026#elif defined _WIN32 2045#elif defined _WIN32
2027 /* maybe this should round up, as ms is very low resolution */ 2046 /* maybe this should round up, as ms is very low resolution */
2028 /* compared to select (µs) or nanosleep (ns) */ 2047 /* compared to select (µs) or nanosleep (ns) */
2029 Sleep ((unsigned long)(delay * 1e3)); 2048 Sleep ((unsigned long)(EV_TS_TO_MS (delay)));
2030#else 2049#else
2031 struct timeval tv; 2050 struct timeval tv;
2032 2051
2033 /* here we rely on sys/time.h + sys/types.h + unistd.h providing select */ 2052 /* here we rely on sys/time.h + sys/types.h + unistd.h providing select */
2034 /* something not guaranteed by newer posix versions, but guaranteed */ 2053 /* something not guaranteed by newer posix versions, but guaranteed */
2384 2403
2385 /* find minimum child */ 2404 /* find minimum child */
2386 if (ecb_expect_true (pos + DHEAP - 1 < E)) 2405 if (ecb_expect_true (pos + DHEAP - 1 < E))
2387 { 2406 {
2388 /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos)); 2407 /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
2389 if ( ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos)); 2408 if ( minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));
2390 if ( ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos)); 2409 if ( minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));
2391 if ( ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos)); 2410 if ( minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));
2392 } 2411 }
2393 else if (pos < E) 2412 else if (pos < E)
2394 { 2413 {
2395 /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos)); 2414 /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
2396 if (pos + 1 < E && ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos)); 2415 if (pos + 1 < E && minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));
2397 if (pos + 2 < E && ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos)); 2416 if (pos + 2 < E && minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));
2398 if (pos + 3 < E && ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos)); 2417 if (pos + 3 < E && minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));
2399 } 2418 }
2400 else 2419 else
2401 break; 2420 break;
2402 2421
2403 if (ANHE_at (he) <= minat) 2422 if (ANHE_at (he) <= minat)
2411 2430
2412 heap [k] = he; 2431 heap [k] = he;
2413 ev_active (ANHE_w (he)) = k; 2432 ev_active (ANHE_w (he)) = k;
2414} 2433}
2415 2434
2416#else /* 4HEAP */ 2435#else /* not 4HEAP */
2417 2436
2418#define HEAP0 1 2437#define HEAP0 1
2419#define HPARENT(k) ((k) >> 1) 2438#define HPARENT(k) ((k) >> 1)
2420#define UPHEAP_DONE(p,k) (!(p)) 2439#define UPHEAP_DONE(p,k) (!(p))
2421 2440
2914 int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT; 2933 int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT;
2915 2934
2916 /* epoll embeddability broken on all linux versions up to at least 2.6.23 */ 2935 /* epoll embeddability broken on all linux versions up to at least 2.6.23 */
2917 if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */ 2936 if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */
2918 flags &= ~EVBACKEND_EPOLL; 2937 flags &= ~EVBACKEND_EPOLL;
2938
2939 /* EVBACKEND_LINUXAIO is theoretically embeddable, but suffers from a performance overhead */
2940
2941 /* EVBACKEND_IOURING is practically embeddable, but the current implementation is not
2942 * because our backend_fd is the epoll fd we need as fallback.
2943 * if the kernel ever is fixed, this might change...
2944 */
2919 2945
2920 return flags; 2946 return flags;
2921} 2947}
2922 2948
2923unsigned int 2949unsigned int

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines