ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev.c
(Generate patch)

Comparing libev/ev.c (file contents):
Revision 1.494 by root, Sun Jun 23 23:28:45 2019 UTC vs.
Revision 1.503 by root, Wed Jul 3 21:52:04 2019 UTC

332# else 332# else
333# define EV_USE_LINUXAIO 0 333# define EV_USE_LINUXAIO 0
334# endif 334# endif
335#endif 335#endif
336 336
337#ifndef EV_USE_IOURING
338# if __linux
339# define EV_USE_IOURING 0
340# else
341# define EV_USE_IOURING 0
342# endif
343#endif
344
337#ifndef EV_USE_INOTIFY 345#ifndef EV_USE_INOTIFY
338# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4)) 346# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))
339# define EV_USE_INOTIFY EV_FEATURE_OS 347# define EV_USE_INOTIFY EV_FEATURE_OS
340# else 348# else
341# define EV_USE_INOTIFY 0 349# define EV_USE_INOTIFY 0
406# include <sys/syscall.h> 414# include <sys/syscall.h>
407# ifdef SYS_clock_gettime 415# ifdef SYS_clock_gettime
408# define clock_gettime(id, ts) syscall (SYS_clock_gettime, (id), (ts)) 416# define clock_gettime(id, ts) syscall (SYS_clock_gettime, (id), (ts))
409# undef EV_USE_MONOTONIC 417# undef EV_USE_MONOTONIC
410# define EV_USE_MONOTONIC 1 418# define EV_USE_MONOTONIC 1
419# define EV_NEED_SYSCALL 1
411# else 420# else
412# undef EV_USE_CLOCK_SYSCALL 421# undef EV_USE_CLOCK_SYSCALL
413# define EV_USE_CLOCK_SYSCALL 0 422# define EV_USE_CLOCK_SYSCALL 0
414# endif 423# endif
415#endif 424#endif
438# endif 447# endif
439#endif 448#endif
440 449
441#if EV_USE_LINUXAIO 450#if EV_USE_LINUXAIO
442# include <sys/syscall.h> 451# include <sys/syscall.h>
443# if !SYS_io_getevents || !EV_USE_EPOLL 452# if SYS_io_getevents && EV_USE_EPOLL /* linuxaio backend requires epoll backend */
453# define EV_NEED_SYSCALL 1
454# else
444# undef EV_USE_LINUXAIO 455# undef EV_USE_LINUXAIO
445# define EV_USE_LINUXAIO 0 456# define EV_USE_LINUXAIO 0
457# endif
458#endif
459
460#if EV_USE_IOURING
461# include <sys/syscall.h>
462# if !SYS_io_uring_setup && __linux && !__alpha
463# define SYS_io_uring_setup 425
464# define SYS_io_uring_enter 426
465# define SYS_io_uring_wregister 427
466# endif
467# if SYS_io_uring_setup && EV_USE_EPOLL /* iouring backend requires epoll backend */
468# define EV_NEED_SYSCALL 1
469# else
470# undef EV_USE_IOURING
471# define EV_USE_IOURING 0
446# endif 472# endif
447#endif 473#endif
448 474
449#if EV_USE_INOTIFY 475#if EV_USE_INOTIFY
450# include <sys/statfs.h> 476# include <sys/statfs.h>
492 uint32_t ssi_signo; 518 uint32_t ssi_signo;
493 char pad[128 - sizeof (uint32_t)]; 519 char pad[128 - sizeof (uint32_t)];
494}; 520};
495#endif 521#endif
496 522
497/**/ 523/*****************************************************************************/
498 524
499#if EV_VERIFY >= 3 525#if EV_VERIFY >= 3
500# define EV_FREQUENT_CHECK ev_verify (EV_A) 526# define EV_FREQUENT_CHECK ev_verify (EV_A)
501#else 527#else
502# define EV_FREQUENT_CHECK do { } while (0) 528# define EV_FREQUENT_CHECK do { } while (0)
507 * This value is good at least till the year 4000. 533 * This value is good at least till the year 4000.
508 */ 534 */
509#define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */ 535#define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */
510/*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */ 536/*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */
511 537
512#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */ 538#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
513#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */ 539#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
540
541/* find a portable timestamp that is "alawys" in the future but fits into time_t.
542 * this is quite hard, and we are mostly guessing - we handle 32 bit signed/unsigned time_t,
543 * and sizes large than 32 bit, but and maybe the unlikely loating point time_t */
544#define EV_TSTAMP_HUGE \
545 (sizeof (time_t) >= 8 ? 10000000000000. \
546 : 0 < (time_t)4294967295 ? 4294967295. \
547 : 2147483647.) \
514 548
515#define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0) 549#define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0)
516#define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0) 550#define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0)
517 551
518/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */ 552/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */
559 593
560#ifndef ECB_H 594#ifndef ECB_H
561#define ECB_H 595#define ECB_H
562 596
563/* 16 bits major, 16 bits minor */ 597/* 16 bits major, 16 bits minor */
564#define ECB_VERSION 0x00010005 598#define ECB_VERSION 0x00010006
565 599
566#ifdef _WIN32 600#ifdef _WIN32
567 typedef signed char int8_t; 601 typedef signed char int8_t;
568 typedef unsigned char uint8_t; 602 typedef unsigned char uint8_t;
569 typedef signed short int16_t; 603 typedef signed short int16_t;
683 #include <intrin.h> /* fence functions _ReadBarrier, also bit search functions _BitScanReverse */ 717 #include <intrin.h> /* fence functions _ReadBarrier, also bit search functions _BitScanReverse */
684#endif 718#endif
685 719
686#ifndef ECB_MEMORY_FENCE 720#ifndef ECB_MEMORY_FENCE
687 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 721 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
722 #define ECB_MEMORY_FENCE_RELAXED __asm__ __volatile__ ("" : : : "memory")
688 #if __i386 || __i386__ 723 #if __i386 || __i386__
689 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") 724 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
690 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") 725 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
691 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory") 726 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory")
692 #elif ECB_GCC_AMD64 727 #elif ECB_GCC_AMD64
742 #if ECB_GCC_VERSION(4,7) 777 #if ECB_GCC_VERSION(4,7)
743 /* see comment below (stdatomic.h) about the C11 memory model. */ 778 /* see comment below (stdatomic.h) about the C11 memory model. */
744 #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST) 779 #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST)
745 #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE) 780 #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE)
746 #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE) 781 #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE)
782 #define ECB_MEMORY_FENCE_RELAXED __atomic_thread_fence (__ATOMIC_RELAXED)
747 783
748 #elif ECB_CLANG_EXTENSION(c_atomic) 784 #elif ECB_CLANG_EXTENSION(c_atomic)
749 /* see comment below (stdatomic.h) about the C11 memory model. */ 785 /* see comment below (stdatomic.h) about the C11 memory model. */
750 #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST) 786 #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST)
751 #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE) 787 #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE)
752 #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE) 788 #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE)
789 #define ECB_MEMORY_FENCE_RELAXED __c11_atomic_thread_fence (__ATOMIC_RELAXED)
753 790
754 #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ 791 #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__
755 #define ECB_MEMORY_FENCE __sync_synchronize () 792 #define ECB_MEMORY_FENCE __sync_synchronize ()
756 #elif _MSC_VER >= 1500 /* VC++ 2008 */ 793 #elif _MSC_VER >= 1500 /* VC++ 2008 */
757 /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */ 794 /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */
767 #elif defined _WIN32 804 #elif defined _WIN32
768 #include <WinNT.h> 805 #include <WinNT.h>
769 #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */ 806 #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */
770 #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 807 #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
771 #include <mbarrier.h> 808 #include <mbarrier.h>
772 #define ECB_MEMORY_FENCE __machine_rw_barrier () 809 #define ECB_MEMORY_FENCE __machine_rw_barrier ()
773 #define ECB_MEMORY_FENCE_ACQUIRE __machine_r_barrier () 810 #define ECB_MEMORY_FENCE_ACQUIRE __machine_acq_barrier ()
774 #define ECB_MEMORY_FENCE_RELEASE __machine_w_barrier () 811 #define ECB_MEMORY_FENCE_RELEASE __machine_rel_barrier ()
812 #define ECB_MEMORY_FENCE_RELAXED __compiler_barrier ()
775 #elif __xlC__ 813 #elif __xlC__
776 #define ECB_MEMORY_FENCE __sync () 814 #define ECB_MEMORY_FENCE __sync ()
777 #endif 815 #endif
778#endif 816#endif
779 817
780#ifndef ECB_MEMORY_FENCE 818#ifndef ECB_MEMORY_FENCE
781 #if ECB_C11 && !defined __STDC_NO_ATOMICS__ 819 #if ECB_C11 && !defined __STDC_NO_ATOMICS__
782 /* we assume that these memory fences work on all variables/all memory accesses, */ 820 /* we assume that these memory fences work on all variables/all memory accesses, */
783 /* not just C11 atomics and atomic accesses */ 821 /* not just C11 atomics and atomic accesses */
784 #include <stdatomic.h> 822 #include <stdatomic.h>
785 /* Unfortunately, neither gcc 4.7 nor clang 3.1 generate any instructions for */
786 /* any fence other than seq_cst, which isn't very efficient for us. */
787 /* Why that is, we don't know - either the C11 memory model is quite useless */
788 /* for most usages, or gcc and clang have a bug */
789 /* I *currently* lean towards the latter, and inefficiently implement */
790 /* all three of ecb's fences as a seq_cst fence */
791 /* Update, gcc-4.8 generates mfence for all c++ fences, but nothing */
792 /* for all __atomic_thread_fence's except seq_cst */
793 #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst) 823 #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst)
824 #define ECB_MEMORY_FENCE_ACQUIRE atomic_thread_fence (memory_order_acquire)
825 #define ECB_MEMORY_FENCE_RELEASE atomic_thread_fence (memory_order_release)
794 #endif 826 #endif
795#endif 827#endif
796 828
797#ifndef ECB_MEMORY_FENCE 829#ifndef ECB_MEMORY_FENCE
798 #if !ECB_AVOID_PTHREADS 830 #if !ECB_AVOID_PTHREADS
816 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE 848 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
817#endif 849#endif
818 850
819#if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE 851#if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE
820 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE 852 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
853#endif
854
855#if !defined ECB_MEMORY_FENCE_RELAXED && defined ECB_MEMORY_FENCE
856 #define ECB_MEMORY_FENCE_RELAXED ECB_MEMORY_FENCE /* very heavy-handed */
821#endif 857#endif
822 858
823/*****************************************************************************/ 859/*****************************************************************************/
824 860
825#if ECB_CPP 861#if ECB_CPP
1534/* ECB.H END */ 1570/* ECB.H END */
1535 1571
1536#if ECB_MEMORY_FENCE_NEEDS_PTHREADS 1572#if ECB_MEMORY_FENCE_NEEDS_PTHREADS
1537/* if your architecture doesn't need memory fences, e.g. because it is 1573/* if your architecture doesn't need memory fences, e.g. because it is
1538 * single-cpu/core, or if you use libev in a project that doesn't use libev 1574 * single-cpu/core, or if you use libev in a project that doesn't use libev
1539 * from multiple threads, then you can define ECB_AVOID_PTHREADS when compiling 1575 * from multiple threads, then you can define ECB_NO_THREADS when compiling
1540 * libev, in which cases the memory fences become nops. 1576 * libev, in which cases the memory fences become nops.
1541 * alternatively, you can remove this #error and link against libpthread, 1577 * alternatively, you can remove this #error and link against libpthread,
1542 * which will then provide the memory fences. 1578 * which will then provide the memory fences.
1543 */ 1579 */
1544# error "memory fences not defined for your architecture, please report" 1580# error "memory fences not defined for your architecture, please report"
1548# define ECB_MEMORY_FENCE do { } while (0) 1584# define ECB_MEMORY_FENCE do { } while (0)
1549# define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE 1585# define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
1550# define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE 1586# define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
1551#endif 1587#endif
1552 1588
1553#define expect_false(cond) ecb_expect_false (cond)
1554#define expect_true(cond) ecb_expect_true (cond)
1555#define noinline ecb_noinline
1556
1557#define inline_size ecb_inline 1589#define inline_size ecb_inline
1558 1590
1559#if EV_FEATURE_CODE 1591#if EV_FEATURE_CODE
1560# define inline_speed ecb_inline 1592# define inline_speed ecb_inline
1561#else 1593#else
1562# define inline_speed noinline static 1594# define inline_speed ecb_noinline static
1563#endif 1595#endif
1596
1597/*****************************************************************************/
1598/* raw syscall wrappers */
1599
1600#if EV_NEED_SYSCALL
1601
1602#include <sys/syscall.h>
1603
1604/*
1605 * define some syscall wrappers for common architectures
1606 * this is mostly for nice looks during debugging, not performance.
1607 * our syscalls return < 0, not == -1, on error. which is good
1608 * enough for linux aio.
1609 * TODO: arm is also common nowadays, maybe even mips and x86
1610 * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove...
1611 */
1612#if __GNUC__ && __linux && ECB_AMD64 && !defined __OPTIMIZE_SIZE__
1613 /* the costly errno access probably kills this for size optimisation */
1614
1615 #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5,arg6) \
1616 ({ \
1617 long res; \
1618 register unsigned long r6 __asm__ ("r9" ); \
1619 register unsigned long r5 __asm__ ("r8" ); \
1620 register unsigned long r4 __asm__ ("r10"); \
1621 register unsigned long r3 __asm__ ("rdx"); \
1622 register unsigned long r2 __asm__ ("rsi"); \
1623 register unsigned long r1 __asm__ ("rdi"); \
1624 if (narg >= 6) r6 = (unsigned long)(arg6); \
1625 if (narg >= 5) r5 = (unsigned long)(arg5); \
1626 if (narg >= 4) r4 = (unsigned long)(arg4); \
1627 if (narg >= 3) r3 = (unsigned long)(arg3); \
1628 if (narg >= 2) r2 = (unsigned long)(arg2); \
1629 if (narg >= 1) r1 = (unsigned long)(arg1); \
1630 __asm__ __volatile__ ( \
1631 "syscall\n\t" \
1632 : "=a" (res) \
1633 : "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \
1634 : "cc", "r11", "cx", "memory"); \
1635 errno = -res; \
1636 res; \
1637 })
1638
1639#endif
1640
1641#ifdef ev_syscall
1642 #define ev_syscall0(nr) ev_syscall (nr, 0, 0, 0, 0, 0, 0, 0)
1643 #define ev_syscall1(nr,arg1) ev_syscall (nr, 1, arg1, 0, 0, 0, 0, 0)
1644 #define ev_syscall2(nr,arg1,arg2) ev_syscall (nr, 2, arg1, arg2, 0, 0, 0, 0)
1645 #define ev_syscall3(nr,arg1,arg2,arg3) ev_syscall (nr, 3, arg1, arg2, arg3, 0, 0, 0)
1646 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) ev_syscall (nr, 3, arg1, arg2, arg3, arg4, 0, 0)
1647 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5, 0)
1648 #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) ev_syscall (nr, 6, arg1, arg2, arg3, arg4, arg5,arg6)
1649#else
1650 #define ev_syscall0(nr) syscall (nr)
1651 #define ev_syscall1(nr,arg1) syscall (nr, arg1)
1652 #define ev_syscall2(nr,arg1,arg2) syscall (nr, arg1, arg2)
1653 #define ev_syscall3(nr,arg1,arg2,arg3) syscall (nr, arg1, arg2, arg3)
1654 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) syscall (nr, arg1, arg2, arg3, arg4)
1655 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) syscall (nr, arg1, arg2, arg3, arg4, arg5)
1656 #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) syscall (nr, arg1, arg2, arg3, arg4, arg5,arg6)
1657#endif
1658
1659#endif
1660
1661/*****************************************************************************/
1564 1662
1565#define NUMPRI (EV_MAXPRI - EV_MINPRI + 1) 1663#define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)
1566 1664
1567#if EV_MINPRI == EV_MAXPRI 1665#if EV_MINPRI == EV_MAXPRI
1568# define ABSPRI(w) (((W)w), 0) 1666# define ABSPRI(w) (((W)w), 0)
1617#else 1715#else
1618 1716
1619#include <float.h> 1717#include <float.h>
1620 1718
1621/* a floor() replacement function, should be independent of ev_tstamp type */ 1719/* a floor() replacement function, should be independent of ev_tstamp type */
1622noinline 1720ecb_noinline
1623static ev_tstamp 1721static ev_tstamp
1624ev_floor (ev_tstamp v) 1722ev_floor (ev_tstamp v)
1625{ 1723{
1626 /* the choice of shift factor is not terribly important */ 1724 /* the choice of shift factor is not terribly important */
1627#if FLT_RADIX != 2 /* assume FLT_RADIX == 10 */ 1725#if FLT_RADIX != 2 /* assume FLT_RADIX == 10 */
1629#else 1727#else
1630 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.; 1728 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.;
1631#endif 1729#endif
1632 1730
1633 /* argument too large for an unsigned long? */ 1731 /* argument too large for an unsigned long? */
1634 if (expect_false (v >= shift)) 1732 if (ecb_expect_false (v >= shift))
1635 { 1733 {
1636 ev_tstamp f; 1734 ev_tstamp f;
1637 1735
1638 if (v == v - 1.) 1736 if (v == v - 1.)
1639 return v; /* very large number */ 1737 return v; /* very large number */
1641 f = shift * ev_floor (v * (1. / shift)); 1739 f = shift * ev_floor (v * (1. / shift));
1642 return f + ev_floor (v - f); 1740 return f + ev_floor (v - f);
1643 } 1741 }
1644 1742
1645 /* special treatment for negative args? */ 1743 /* special treatment for negative args? */
1646 if (expect_false (v < 0.)) 1744 if (ecb_expect_false (v < 0.))
1647 { 1745 {
1648 ev_tstamp f = -ev_floor (-v); 1746 ev_tstamp f = -ev_floor (-v);
1649 1747
1650 return f - (f == v ? 0 : 1); 1748 return f - (f == v ? 0 : 1);
1651 } 1749 }
1660 1758
1661#ifdef __linux 1759#ifdef __linux
1662# include <sys/utsname.h> 1760# include <sys/utsname.h>
1663#endif 1761#endif
1664 1762
1665noinline ecb_cold 1763ecb_noinline ecb_cold
1666static unsigned int 1764static unsigned int
1667ev_linux_version (void) 1765ev_linux_version (void)
1668{ 1766{
1669#ifdef __linux 1767#ifdef __linux
1670 unsigned int v = 0; 1768 unsigned int v = 0;
1700} 1798}
1701 1799
1702/*****************************************************************************/ 1800/*****************************************************************************/
1703 1801
1704#if EV_AVOID_STDIO 1802#if EV_AVOID_STDIO
1705noinline ecb_cold 1803ecb_noinline ecb_cold
1706static void 1804static void
1707ev_printerr (const char *msg) 1805ev_printerr (const char *msg)
1708{ 1806{
1709 write (STDERR_FILENO, msg, strlen (msg)); 1807 write (STDERR_FILENO, msg, strlen (msg));
1710} 1808}
1717ev_set_syserr_cb (void (*cb)(const char *msg) EV_NOEXCEPT) EV_NOEXCEPT 1815ev_set_syserr_cb (void (*cb)(const char *msg) EV_NOEXCEPT) EV_NOEXCEPT
1718{ 1816{
1719 syserr_cb = cb; 1817 syserr_cb = cb;
1720} 1818}
1721 1819
1722noinline ecb_cold 1820ecb_noinline ecb_cold
1723static void 1821static void
1724ev_syserr (const char *msg) 1822ev_syserr (const char *msg)
1725{ 1823{
1726 if (!msg) 1824 if (!msg)
1727 msg = "(libev) system error"; 1825 msg = "(libev) system error";
1799{ 1897{
1800 WL head; 1898 WL head;
1801 unsigned char events; /* the events watched for */ 1899 unsigned char events; /* the events watched for */
1802 unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */ 1900 unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */
1803 unsigned char emask; /* some backends store the actual kernel mask in here */ 1901 unsigned char emask; /* some backends store the actual kernel mask in here */
1804 unsigned char unused; 1902 unsigned char eflags; /* flags field for use by backends */
1805#if EV_USE_EPOLL 1903#if EV_USE_EPOLL
1806 unsigned int egen; /* generation counter to counter epoll bugs */ 1904 unsigned int egen; /* generation counter to counter epoll bugs */
1807#endif 1905#endif
1808#if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP 1906#if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP
1809 SOCKET handle; 1907 SOCKET handle;
1873 static int ev_default_loop_ptr; 1971 static int ev_default_loop_ptr;
1874 1972
1875#endif 1973#endif
1876 1974
1877#if EV_FEATURE_API 1975#if EV_FEATURE_API
1878# define EV_RELEASE_CB if (expect_false (release_cb)) release_cb (EV_A) 1976# define EV_RELEASE_CB if (ecb_expect_false (release_cb)) release_cb (EV_A)
1879# define EV_ACQUIRE_CB if (expect_false (acquire_cb)) acquire_cb (EV_A) 1977# define EV_ACQUIRE_CB if (ecb_expect_false (acquire_cb)) acquire_cb (EV_A)
1880# define EV_INVOKE_PENDING invoke_cb (EV_A) 1978# define EV_INVOKE_PENDING invoke_cb (EV_A)
1881#else 1979#else
1882# define EV_RELEASE_CB (void)0 1980# define EV_RELEASE_CB (void)0
1883# define EV_ACQUIRE_CB (void)0 1981# define EV_ACQUIRE_CB (void)0
1884# define EV_INVOKE_PENDING ev_invoke_pending (EV_A) 1982# define EV_INVOKE_PENDING ev_invoke_pending (EV_A)
1891#ifndef EV_HAVE_EV_TIME 1989#ifndef EV_HAVE_EV_TIME
1892ev_tstamp 1990ev_tstamp
1893ev_time (void) EV_NOEXCEPT 1991ev_time (void) EV_NOEXCEPT
1894{ 1992{
1895#if EV_USE_REALTIME 1993#if EV_USE_REALTIME
1896 if (expect_true (have_realtime)) 1994 if (ecb_expect_true (have_realtime))
1897 { 1995 {
1898 struct timespec ts; 1996 struct timespec ts;
1899 clock_gettime (CLOCK_REALTIME, &ts); 1997 clock_gettime (CLOCK_REALTIME, &ts);
1900 return ts.tv_sec + ts.tv_nsec * 1e-9; 1998 return ts.tv_sec + ts.tv_nsec * 1e-9;
1901 } 1999 }
1909 2007
1910inline_size ev_tstamp 2008inline_size ev_tstamp
1911get_clock (void) 2009get_clock (void)
1912{ 2010{
1913#if EV_USE_MONOTONIC 2011#if EV_USE_MONOTONIC
1914 if (expect_true (have_monotonic)) 2012 if (ecb_expect_true (have_monotonic))
1915 { 2013 {
1916 struct timespec ts; 2014 struct timespec ts;
1917 clock_gettime (CLOCK_MONOTONIC, &ts); 2015 clock_gettime (CLOCK_MONOTONIC, &ts);
1918 return ts.tv_sec + ts.tv_nsec * 1e-9; 2016 return ts.tv_sec + ts.tv_nsec * 1e-9;
1919 } 2017 }
1981 } 2079 }
1982 2080
1983 return ncur; 2081 return ncur;
1984} 2082}
1985 2083
1986noinline ecb_cold 2084ecb_noinline ecb_cold
1987static void * 2085static void *
1988array_realloc (int elem, void *base, int *cur, int cnt) 2086array_realloc (int elem, void *base, int *cur, int cnt)
1989{ 2087{
1990 *cur = array_nextsize (elem, *cur, cnt); 2088 *cur = array_nextsize (elem, *cur, cnt);
1991 return ev_realloc (base, elem * *cur); 2089 return ev_realloc (base, elem * *cur);
1992} 2090}
1993 2091
1994#define array_needsize_noinit(base,count) 2092#define array_needsize_noinit(base,offset,count)
1995 2093
1996#define array_needsize_zerofill(base,count) \ 2094#define array_needsize_zerofill(base,offset,count) \
1997 memset ((void *)(base), 0, sizeof (*(base)) * (count)) 2095 memset ((void *)(base + offset), 0, sizeof (*(base)) * (count))
1998 2096
1999#define array_needsize(type,base,cur,cnt,init) \ 2097#define array_needsize(type,base,cur,cnt,init) \
2000 if (expect_false ((cnt) > (cur))) \ 2098 if (ecb_expect_false ((cnt) > (cur))) \
2001 { \ 2099 { \
2002 ecb_unused int ocur_ = (cur); \ 2100 ecb_unused int ocur_ = (cur); \
2003 (base) = (type *)array_realloc \ 2101 (base) = (type *)array_realloc \
2004 (sizeof (type), (base), &(cur), (cnt)); \ 2102 (sizeof (type), (base), &(cur), (cnt)); \
2005 init ((base) + (ocur_), (cur) - ocur_); \ 2103 init ((base), ocur_, ((cur) - ocur_)); \
2006 } 2104 }
2007 2105
2008#if 0 2106#if 0
2009#define array_slim(type,stem) \ 2107#define array_slim(type,stem) \
2010 if (stem ## max < array_roundsize (stem ## cnt >> 2)) \ 2108 if (stem ## max < array_roundsize (stem ## cnt >> 2)) \
2019 ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0; stem ## s idx = 0 2117 ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0; stem ## s idx = 0
2020 2118
2021/*****************************************************************************/ 2119/*****************************************************************************/
2022 2120
2023/* dummy callback for pending events */ 2121/* dummy callback for pending events */
2024noinline 2122ecb_noinline
2025static void 2123static void
2026pendingcb (EV_P_ ev_prepare *w, int revents) 2124pendingcb (EV_P_ ev_prepare *w, int revents)
2027{ 2125{
2028} 2126}
2029 2127
2030noinline 2128ecb_noinline
2031void 2129void
2032ev_feed_event (EV_P_ void *w, int revents) EV_NOEXCEPT 2130ev_feed_event (EV_P_ void *w, int revents) EV_NOEXCEPT
2033{ 2131{
2034 W w_ = (W)w; 2132 W w_ = (W)w;
2035 int pri = ABSPRI (w_); 2133 int pri = ABSPRI (w_);
2036 2134
2037 if (expect_false (w_->pending)) 2135 if (ecb_expect_false (w_->pending))
2038 pendings [pri][w_->pending - 1].events |= revents; 2136 pendings [pri][w_->pending - 1].events |= revents;
2039 else 2137 else
2040 { 2138 {
2041 w_->pending = ++pendingcnt [pri]; 2139 w_->pending = ++pendingcnt [pri];
2042 array_needsize (ANPENDING, pendings [pri], pendingmax [pri], w_->pending, array_needsize_noinit); 2140 array_needsize (ANPENDING, pendings [pri], pendingmax [pri], w_->pending, array_needsize_noinit);
2093inline_speed void 2191inline_speed void
2094fd_event (EV_P_ int fd, int revents) 2192fd_event (EV_P_ int fd, int revents)
2095{ 2193{
2096 ANFD *anfd = anfds + fd; 2194 ANFD *anfd = anfds + fd;
2097 2195
2098 if (expect_true (!anfd->reify)) 2196 if (ecb_expect_true (!anfd->reify))
2099 fd_event_nocheck (EV_A_ fd, revents); 2197 fd_event_nocheck (EV_A_ fd, revents);
2100} 2198}
2101 2199
2102void 2200void
2103ev_feed_fd_event (EV_P_ int fd, int revents) EV_NOEXCEPT 2201ev_feed_fd_event (EV_P_ int fd, int revents) EV_NOEXCEPT
2145 ev_io *w; 2243 ev_io *w;
2146 2244
2147 unsigned char o_events = anfd->events; 2245 unsigned char o_events = anfd->events;
2148 unsigned char o_reify = anfd->reify; 2246 unsigned char o_reify = anfd->reify;
2149 2247
2150 anfd->reify = 0; 2248 anfd->reify = 0;
2151 2249
2152 /*if (expect_true (o_reify & EV_ANFD_REIFY)) probably a deoptimisation */ 2250 /*if (ecb_expect_true (o_reify & EV_ANFD_REIFY)) probably a deoptimisation */
2153 { 2251 {
2154 anfd->events = 0; 2252 anfd->events = 0;
2155 2253
2156 for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next) 2254 for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next)
2157 anfd->events |= (unsigned char)w->events; 2255 anfd->events |= (unsigned char)w->events;
2173fd_change (EV_P_ int fd, int flags) 2271fd_change (EV_P_ int fd, int flags)
2174{ 2272{
2175 unsigned char reify = anfds [fd].reify; 2273 unsigned char reify = anfds [fd].reify;
2176 anfds [fd].reify |= flags; 2274 anfds [fd].reify |= flags;
2177 2275
2178 if (expect_true (!reify)) 2276 if (ecb_expect_true (!reify))
2179 { 2277 {
2180 ++fdchangecnt; 2278 ++fdchangecnt;
2181 array_needsize (int, fdchanges, fdchangemax, fdchangecnt, array_needsize_noinit); 2279 array_needsize (int, fdchanges, fdchangemax, fdchangecnt, array_needsize_noinit);
2182 fdchanges [fdchangecnt - 1] = fd; 2280 fdchanges [fdchangecnt - 1] = fd;
2183 } 2281 }
2206 return fcntl (fd, F_GETFD) != -1; 2304 return fcntl (fd, F_GETFD) != -1;
2207#endif 2305#endif
2208} 2306}
2209 2307
2210/* called on EBADF to verify fds */ 2308/* called on EBADF to verify fds */
2211noinline ecb_cold 2309ecb_noinline ecb_cold
2212static void 2310static void
2213fd_ebadf (EV_P) 2311fd_ebadf (EV_P)
2214{ 2312{
2215 int fd; 2313 int fd;
2216 2314
2219 if (!fd_valid (fd) && errno == EBADF) 2317 if (!fd_valid (fd) && errno == EBADF)
2220 fd_kill (EV_A_ fd); 2318 fd_kill (EV_A_ fd);
2221} 2319}
2222 2320
2223/* called on ENOMEM in select/poll to kill some fds and retry */ 2321/* called on ENOMEM in select/poll to kill some fds and retry */
2224noinline ecb_cold 2322ecb_noinline ecb_cold
2225static void 2323static void
2226fd_enomem (EV_P) 2324fd_enomem (EV_P)
2227{ 2325{
2228 int fd; 2326 int fd;
2229 2327
2234 break; 2332 break;
2235 } 2333 }
2236} 2334}
2237 2335
2238/* usually called after fork if backend needs to re-arm all fds from scratch */ 2336/* usually called after fork if backend needs to re-arm all fds from scratch */
2239noinline 2337ecb_noinline
2240static void 2338static void
2241fd_rearm_all (EV_P) 2339fd_rearm_all (EV_P)
2242{ 2340{
2243 int fd; 2341 int fd;
2244 2342
2298 ev_tstamp minat; 2396 ev_tstamp minat;
2299 ANHE *minpos; 2397 ANHE *minpos;
2300 ANHE *pos = heap + DHEAP * (k - HEAP0) + HEAP0 + 1; 2398 ANHE *pos = heap + DHEAP * (k - HEAP0) + HEAP0 + 1;
2301 2399
2302 /* find minimum child */ 2400 /* find minimum child */
2303 if (expect_true (pos + DHEAP - 1 < E)) 2401 if (ecb_expect_true (pos + DHEAP - 1 < E))
2304 { 2402 {
2305 /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos)); 2403 /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
2306 if ( ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos)); 2404 if ( ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos));
2307 if ( ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos)); 2405 if ( ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos));
2308 if ( ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos)); 2406 if ( ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos));
2426 2524
2427/*****************************************************************************/ 2525/*****************************************************************************/
2428 2526
2429#if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE 2527#if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
2430 2528
2431noinline ecb_cold 2529ecb_noinline ecb_cold
2432static void 2530static void
2433evpipe_init (EV_P) 2531evpipe_init (EV_P)
2434{ 2532{
2435 if (!ev_is_active (&pipe_w)) 2533 if (!ev_is_active (&pipe_w))
2436 { 2534 {
2477inline_speed void 2575inline_speed void
2478evpipe_write (EV_P_ EV_ATOMIC_T *flag) 2576evpipe_write (EV_P_ EV_ATOMIC_T *flag)
2479{ 2577{
2480 ECB_MEMORY_FENCE; /* push out the write before this function was called, acquire flag */ 2578 ECB_MEMORY_FENCE; /* push out the write before this function was called, acquire flag */
2481 2579
2482 if (expect_true (*flag)) 2580 if (ecb_expect_true (*flag))
2483 return; 2581 return;
2484 2582
2485 *flag = 1; 2583 *flag = 1;
2486 ECB_MEMORY_FENCE_RELEASE; /* make sure flag is visible before the wakeup */ 2584 ECB_MEMORY_FENCE_RELEASE; /* make sure flag is visible before the wakeup */
2487 2585
2564 sig_pending = 0; 2662 sig_pending = 0;
2565 2663
2566 ECB_MEMORY_FENCE; 2664 ECB_MEMORY_FENCE;
2567 2665
2568 for (i = EV_NSIG - 1; i--; ) 2666 for (i = EV_NSIG - 1; i--; )
2569 if (expect_false (signals [i].pending)) 2667 if (ecb_expect_false (signals [i].pending))
2570 ev_feed_signal_event (EV_A_ i + 1); 2668 ev_feed_signal_event (EV_A_ i + 1);
2571 } 2669 }
2572#endif 2670#endif
2573 2671
2574#if EV_ASYNC_ENABLE 2672#if EV_ASYNC_ENABLE
2615#endif 2713#endif
2616 2714
2617 ev_feed_signal (signum); 2715 ev_feed_signal (signum);
2618} 2716}
2619 2717
2620noinline 2718ecb_noinline
2621void 2719void
2622ev_feed_signal_event (EV_P_ int signum) EV_NOEXCEPT 2720ev_feed_signal_event (EV_P_ int signum) EV_NOEXCEPT
2623{ 2721{
2624 WL w; 2722 WL w;
2625 2723
2626 if (expect_false (signum <= 0 || signum >= EV_NSIG)) 2724 if (ecb_expect_false (signum <= 0 || signum >= EV_NSIG))
2627 return; 2725 return;
2628 2726
2629 --signum; 2727 --signum;
2630 2728
2631#if EV_MULTIPLICITY 2729#if EV_MULTIPLICITY
2632 /* it is permissible to try to feed a signal to the wrong loop */ 2730 /* it is permissible to try to feed a signal to the wrong loop */
2633 /* or, likely more useful, feeding a signal nobody is waiting for */ 2731 /* or, likely more useful, feeding a signal nobody is waiting for */
2634 2732
2635 if (expect_false (signals [signum].loop != EV_A)) 2733 if (ecb_expect_false (signals [signum].loop != EV_A))
2636 return; 2734 return;
2637#endif 2735#endif
2638 2736
2639 signals [signum].pending = 0; 2737 signals [signum].pending = 0;
2640 ECB_MEMORY_FENCE_RELEASE; 2738 ECB_MEMORY_FENCE_RELEASE;
2739# include "ev_epoll.c" 2837# include "ev_epoll.c"
2740#endif 2838#endif
2741#if EV_USE_LINUXAIO 2839#if EV_USE_LINUXAIO
2742# include "ev_linuxaio.c" 2840# include "ev_linuxaio.c"
2743#endif 2841#endif
2842#if EV_USE_IOURING
2843# include "ev_iouring.c"
2844#endif
2744#if EV_USE_POLL 2845#if EV_USE_POLL
2745# include "ev_poll.c" 2846# include "ev_poll.c"
2746#endif 2847#endif
2747#if EV_USE_SELECT 2848#if EV_USE_SELECT
2748# include "ev_select.c" 2849# include "ev_select.c"
2780 2881
2781 if (EV_USE_PORT ) flags |= EVBACKEND_PORT; 2882 if (EV_USE_PORT ) flags |= EVBACKEND_PORT;
2782 if (EV_USE_KQUEUE ) flags |= EVBACKEND_KQUEUE; 2883 if (EV_USE_KQUEUE ) flags |= EVBACKEND_KQUEUE;
2783 if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL; 2884 if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL;
2784 if (EV_USE_LINUXAIO) flags |= EVBACKEND_LINUXAIO; 2885 if (EV_USE_LINUXAIO) flags |= EVBACKEND_LINUXAIO;
2886 if (EV_USE_IOURING ) flags |= EVBACKEND_IOURING;
2785 if (EV_USE_POLL ) flags |= EVBACKEND_POLL; 2887 if (EV_USE_POLL ) flags |= EVBACKEND_POLL;
2786 if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT; 2888 if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT;
2787 2889
2788 return flags; 2890 return flags;
2789} 2891}
2810 2912
2811 /* TODO: linuxaio is very experimental */ 2913 /* TODO: linuxaio is very experimental */
2812#if !EV_RECOMMEND_LINUXAIO 2914#if !EV_RECOMMEND_LINUXAIO
2813 flags &= ~EVBACKEND_LINUXAIO; 2915 flags &= ~EVBACKEND_LINUXAIO;
2814#endif 2916#endif
2917 /* TODO: linuxaio is super experimental */
2918#if !EV_RECOMMEND_IOURING
2919 flags &= ~EVBACKEND_IOURING;
2920#endif
2815 2921
2816 return flags; 2922 return flags;
2817} 2923}
2818 2924
2819ecb_cold 2925ecb_cold
2824 2930
2825 /* epoll embeddability broken on all linux versions up to at least 2.6.23 */ 2931 /* epoll embeddability broken on all linux versions up to at least 2.6.23 */
2826 if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */ 2932 if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */
2827 flags &= ~EVBACKEND_EPOLL; 2933 flags &= ~EVBACKEND_EPOLL;
2828 2934
2935 /* EVBACKEND_LINUXAIO is theoretically embeddable, but suffers from a performance overhead */
2936
2937 /* EVBACKEND_IOURING is practically embeddable, but the current implementation is not
2938 * because our backend_fd is the epoll fd we need as fallback.
2939 * if the kernel ever is fixed, this might change...
2940 */
2941
2829 return flags; 2942 return flags;
2830} 2943}
2831 2944
2832unsigned int 2945unsigned int
2833ev_backend (EV_P) EV_NOEXCEPT 2946ev_backend (EV_P) EV_NOEXCEPT
2885 acquire_cb = acquire; 2998 acquire_cb = acquire;
2886} 2999}
2887#endif 3000#endif
2888 3001
2889/* initialise a loop structure, must be zero-initialised */ 3002/* initialise a loop structure, must be zero-initialised */
2890noinline ecb_cold 3003ecb_noinline ecb_cold
2891static void 3004static void
2892loop_init (EV_P_ unsigned int flags) EV_NOEXCEPT 3005loop_init (EV_P_ unsigned int flags) EV_NOEXCEPT
2893{ 3006{
2894 if (!backend) 3007 if (!backend)
2895 { 3008 {
2963 if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags); 3076 if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags);
2964#endif 3077#endif
2965#if EV_USE_KQUEUE 3078#if EV_USE_KQUEUE
2966 if (!backend && (flags & EVBACKEND_KQUEUE )) backend = kqueue_init (EV_A_ flags); 3079 if (!backend && (flags & EVBACKEND_KQUEUE )) backend = kqueue_init (EV_A_ flags);
2967#endif 3080#endif
3081#if EV_USE_IOURING
3082 if (!backend && (flags & EVBACKEND_IOURING )) backend = iouring_init (EV_A_ flags);
3083#endif
2968#if EV_USE_LINUXAIO 3084#if EV_USE_LINUXAIO
2969 if (!backend && (flags & EVBACKEND_LINUXAIO)) backend = linuxaio_init (EV_A_ flags); 3085 if (!backend && (flags & EVBACKEND_LINUXAIO)) backend = linuxaio_init (EV_A_ flags);
2970#endif 3086#endif
2971#if EV_USE_EPOLL 3087#if EV_USE_EPOLL
2972 if (!backend && (flags & EVBACKEND_EPOLL )) backend = epoll_init (EV_A_ flags); 3088 if (!backend && (flags & EVBACKEND_EPOLL )) backend = epoll_init (EV_A_ flags);
3000 return; 3116 return;
3001#endif 3117#endif
3002 3118
3003#if EV_CLEANUP_ENABLE 3119#if EV_CLEANUP_ENABLE
3004 /* queue cleanup watchers (and execute them) */ 3120 /* queue cleanup watchers (and execute them) */
3005 if (expect_false (cleanupcnt)) 3121 if (ecb_expect_false (cleanupcnt))
3006 { 3122 {
3007 queue_events (EV_A_ (W *)cleanups, cleanupcnt, EV_CLEANUP); 3123 queue_events (EV_A_ (W *)cleanups, cleanupcnt, EV_CLEANUP);
3008 EV_INVOKE_PENDING; 3124 EV_INVOKE_PENDING;
3009 } 3125 }
3010#endif 3126#endif
3045#if EV_USE_PORT 3161#if EV_USE_PORT
3046 if (backend == EVBACKEND_PORT ) port_destroy (EV_A); 3162 if (backend == EVBACKEND_PORT ) port_destroy (EV_A);
3047#endif 3163#endif
3048#if EV_USE_KQUEUE 3164#if EV_USE_KQUEUE
3049 if (backend == EVBACKEND_KQUEUE ) kqueue_destroy (EV_A); 3165 if (backend == EVBACKEND_KQUEUE ) kqueue_destroy (EV_A);
3166#endif
3167#if EV_USE_IOURING
3168 if (backend == EVBACKEND_IOURING ) iouring_destroy (EV_A);
3050#endif 3169#endif
3051#if EV_USE_LINUXAIO 3170#if EV_USE_LINUXAIO
3052 if (backend == EVBACKEND_LINUXAIO) linuxaio_destroy (EV_A); 3171 if (backend == EVBACKEND_LINUXAIO) linuxaio_destroy (EV_A);
3053#endif 3172#endif
3054#if EV_USE_EPOLL 3173#if EV_USE_EPOLL
3113 if (backend == EVBACKEND_PORT ) port_fork (EV_A); 3232 if (backend == EVBACKEND_PORT ) port_fork (EV_A);
3114#endif 3233#endif
3115#if EV_USE_KQUEUE 3234#if EV_USE_KQUEUE
3116 if (backend == EVBACKEND_KQUEUE ) kqueue_fork (EV_A); 3235 if (backend == EVBACKEND_KQUEUE ) kqueue_fork (EV_A);
3117#endif 3236#endif
3237#if EV_USE_IOURING
3238 if (backend == EVBACKEND_IOURING ) iouring_fork (EV_A);
3239#endif
3118#if EV_USE_LINUXAIO 3240#if EV_USE_LINUXAIO
3119 if (backend == EVBACKEND_LINUXAIO) linuxaio_fork (EV_A); 3241 if (backend == EVBACKEND_LINUXAIO) linuxaio_fork (EV_A);
3120#endif 3242#endif
3121#if EV_USE_EPOLL 3243#if EV_USE_EPOLL
3122 if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A); 3244 if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A);
3164} 3286}
3165 3287
3166#endif /* multiplicity */ 3288#endif /* multiplicity */
3167 3289
3168#if EV_VERIFY 3290#if EV_VERIFY
3169noinline ecb_cold 3291ecb_noinline ecb_cold
3170static void 3292static void
3171verify_watcher (EV_P_ W w) 3293verify_watcher (EV_P_ W w)
3172{ 3294{
3173 assert (("libev: watcher has invalid priority", ABSPRI (w) >= 0 && ABSPRI (w) < NUMPRI)); 3295 assert (("libev: watcher has invalid priority", ABSPRI (w) >= 0 && ABSPRI (w) < NUMPRI));
3174 3296
3175 if (w->pending) 3297 if (w->pending)
3176 assert (("libev: pending watcher not on pending queue", pendings [ABSPRI (w)][w->pending - 1].w == w)); 3298 assert (("libev: pending watcher not on pending queue", pendings [ABSPRI (w)][w->pending - 1].w == w));
3177} 3299}
3178 3300
3179noinline ecb_cold 3301ecb_noinline ecb_cold
3180static void 3302static void
3181verify_heap (EV_P_ ANHE *heap, int N) 3303verify_heap (EV_P_ ANHE *heap, int N)
3182{ 3304{
3183 int i; 3305 int i;
3184 3306
3190 3312
3191 verify_watcher (EV_A_ (W)ANHE_w (heap [i])); 3313 verify_watcher (EV_A_ (W)ANHE_w (heap [i]));
3192 } 3314 }
3193} 3315}
3194 3316
3195noinline ecb_cold 3317ecb_noinline ecb_cold
3196static void 3318static void
3197array_verify (EV_P_ W *ws, int cnt) 3319array_verify (EV_P_ W *ws, int cnt)
3198{ 3320{
3199 while (cnt--) 3321 while (cnt--)
3200 { 3322 {
3349 count += pendingcnt [pri]; 3471 count += pendingcnt [pri];
3350 3472
3351 return count; 3473 return count;
3352} 3474}
3353 3475
3354noinline 3476ecb_noinline
3355void 3477void
3356ev_invoke_pending (EV_P) 3478ev_invoke_pending (EV_P)
3357{ 3479{
3358 pendingpri = NUMPRI; 3480 pendingpri = NUMPRI;
3359 3481
3378/* make idle watchers pending. this handles the "call-idle */ 3500/* make idle watchers pending. this handles the "call-idle */
3379/* only when higher priorities are idle" logic */ 3501/* only when higher priorities are idle" logic */
3380inline_size void 3502inline_size void
3381idle_reify (EV_P) 3503idle_reify (EV_P)
3382{ 3504{
3383 if (expect_false (idleall)) 3505 if (ecb_expect_false (idleall))
3384 { 3506 {
3385 int pri; 3507 int pri;
3386 3508
3387 for (pri = NUMPRI; pri--; ) 3509 for (pri = NUMPRI; pri--; )
3388 { 3510 {
3437 } 3559 }
3438} 3560}
3439 3561
3440#if EV_PERIODIC_ENABLE 3562#if EV_PERIODIC_ENABLE
3441 3563
3442noinline 3564ecb_noinline
3443static void 3565static void
3444periodic_recalc (EV_P_ ev_periodic *w) 3566periodic_recalc (EV_P_ ev_periodic *w)
3445{ 3567{
3446 ev_tstamp interval = w->interval > MIN_INTERVAL ? w->interval : MIN_INTERVAL; 3568 ev_tstamp interval = w->interval > MIN_INTERVAL ? w->interval : MIN_INTERVAL;
3447 ev_tstamp at = w->offset + interval * ev_floor ((ev_rt_now - w->offset) / interval); 3569 ev_tstamp at = w->offset + interval * ev_floor ((ev_rt_now - w->offset) / interval);
3450 while (at <= ev_rt_now) 3572 while (at <= ev_rt_now)
3451 { 3573 {
3452 ev_tstamp nat = at + w->interval; 3574 ev_tstamp nat = at + w->interval;
3453 3575
3454 /* when resolution fails us, we use ev_rt_now */ 3576 /* when resolution fails us, we use ev_rt_now */
3455 if (expect_false (nat == at)) 3577 if (ecb_expect_false (nat == at))
3456 { 3578 {
3457 at = ev_rt_now; 3579 at = ev_rt_now;
3458 break; 3580 break;
3459 } 3581 }
3460 3582
3506 } 3628 }
3507} 3629}
3508 3630
3509/* simply recalculate all periodics */ 3631/* simply recalculate all periodics */
3510/* TODO: maybe ensure that at least one event happens when jumping forward? */ 3632/* TODO: maybe ensure that at least one event happens when jumping forward? */
3511noinline ecb_cold 3633ecb_noinline ecb_cold
3512static void 3634static void
3513periodics_reschedule (EV_P) 3635periodics_reschedule (EV_P)
3514{ 3636{
3515 int i; 3637 int i;
3516 3638
3530 reheap (periodics, periodiccnt); 3652 reheap (periodics, periodiccnt);
3531} 3653}
3532#endif 3654#endif
3533 3655
3534/* adjust all timers by a given offset */ 3656/* adjust all timers by a given offset */
3535noinline ecb_cold 3657ecb_noinline ecb_cold
3536static void 3658static void
3537timers_reschedule (EV_P_ ev_tstamp adjust) 3659timers_reschedule (EV_P_ ev_tstamp adjust)
3538{ 3660{
3539 int i; 3661 int i;
3540 3662
3550/* also detect if there was a timejump, and act accordingly */ 3672/* also detect if there was a timejump, and act accordingly */
3551inline_speed void 3673inline_speed void
3552time_update (EV_P_ ev_tstamp max_block) 3674time_update (EV_P_ ev_tstamp max_block)
3553{ 3675{
3554#if EV_USE_MONOTONIC 3676#if EV_USE_MONOTONIC
3555 if (expect_true (have_monotonic)) 3677 if (ecb_expect_true (have_monotonic))
3556 { 3678 {
3557 int i; 3679 int i;
3558 ev_tstamp odiff = rtmn_diff; 3680 ev_tstamp odiff = rtmn_diff;
3559 3681
3560 mn_now = get_clock (); 3682 mn_now = get_clock ();
3561 3683
3562 /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */ 3684 /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */
3563 /* interpolate in the meantime */ 3685 /* interpolate in the meantime */
3564 if (expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5)) 3686 if (ecb_expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5))
3565 { 3687 {
3566 ev_rt_now = rtmn_diff + mn_now; 3688 ev_rt_now = rtmn_diff + mn_now;
3567 return; 3689 return;
3568 } 3690 }
3569 3691
3583 ev_tstamp diff; 3705 ev_tstamp diff;
3584 rtmn_diff = ev_rt_now - mn_now; 3706 rtmn_diff = ev_rt_now - mn_now;
3585 3707
3586 diff = odiff - rtmn_diff; 3708 diff = odiff - rtmn_diff;
3587 3709
3588 if (expect_true ((diff < 0. ? -diff : diff) < MIN_TIMEJUMP)) 3710 if (ecb_expect_true ((diff < 0. ? -diff : diff) < MIN_TIMEJUMP))
3589 return; /* all is well */ 3711 return; /* all is well */
3590 3712
3591 ev_rt_now = ev_time (); 3713 ev_rt_now = ev_time ();
3592 mn_now = get_clock (); 3714 mn_now = get_clock ();
3593 now_floor = mn_now; 3715 now_floor = mn_now;
3602 else 3724 else
3603#endif 3725#endif
3604 { 3726 {
3605 ev_rt_now = ev_time (); 3727 ev_rt_now = ev_time ();
3606 3728
3607 if (expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + MIN_TIMEJUMP)) 3729 if (ecb_expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + MIN_TIMEJUMP))
3608 { 3730 {
3609 /* adjust timers. this is easy, as the offset is the same for all of them */ 3731 /* adjust timers. this is easy, as the offset is the same for all of them */
3610 timers_reschedule (EV_A_ ev_rt_now - mn_now); 3732 timers_reschedule (EV_A_ ev_rt_now - mn_now);
3611#if EV_PERIODIC_ENABLE 3733#if EV_PERIODIC_ENABLE
3612 periodics_reschedule (EV_A); 3734 periodics_reschedule (EV_A);
3635#if EV_VERIFY >= 2 3757#if EV_VERIFY >= 2
3636 ev_verify (EV_A); 3758 ev_verify (EV_A);
3637#endif 3759#endif
3638 3760
3639#ifndef _WIN32 3761#ifndef _WIN32
3640 if (expect_false (curpid)) /* penalise the forking check even more */ 3762 if (ecb_expect_false (curpid)) /* penalise the forking check even more */
3641 if (expect_false (getpid () != curpid)) 3763 if (ecb_expect_false (getpid () != curpid))
3642 { 3764 {
3643 curpid = getpid (); 3765 curpid = getpid ();
3644 postfork = 1; 3766 postfork = 1;
3645 } 3767 }
3646#endif 3768#endif
3647 3769
3648#if EV_FORK_ENABLE 3770#if EV_FORK_ENABLE
3649 /* we might have forked, so queue fork handlers */ 3771 /* we might have forked, so queue fork handlers */
3650 if (expect_false (postfork)) 3772 if (ecb_expect_false (postfork))
3651 if (forkcnt) 3773 if (forkcnt)
3652 { 3774 {
3653 queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK); 3775 queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK);
3654 EV_INVOKE_PENDING; 3776 EV_INVOKE_PENDING;
3655 } 3777 }
3656#endif 3778#endif
3657 3779
3658#if EV_PREPARE_ENABLE 3780#if EV_PREPARE_ENABLE
3659 /* queue prepare watchers (and execute them) */ 3781 /* queue prepare watchers (and execute them) */
3660 if (expect_false (preparecnt)) 3782 if (ecb_expect_false (preparecnt))
3661 { 3783 {
3662 queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE); 3784 queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE);
3663 EV_INVOKE_PENDING; 3785 EV_INVOKE_PENDING;
3664 } 3786 }
3665#endif 3787#endif
3666 3788
3667 if (expect_false (loop_done)) 3789 if (ecb_expect_false (loop_done))
3668 break; 3790 break;
3669 3791
3670 /* we might have forked, so reify kernel state if necessary */ 3792 /* we might have forked, so reify kernel state if necessary */
3671 if (expect_false (postfork)) 3793 if (ecb_expect_false (postfork))
3672 loop_fork (EV_A); 3794 loop_fork (EV_A);
3673 3795
3674 /* update fd-related kernel structures */ 3796 /* update fd-related kernel structures */
3675 fd_reify (EV_A); 3797 fd_reify (EV_A);
3676 3798
3688 /* from now on, we want a pipe-wake-up */ 3810 /* from now on, we want a pipe-wake-up */
3689 pipe_write_wanted = 1; 3811 pipe_write_wanted = 1;
3690 3812
3691 ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */ 3813 ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */
3692 3814
3693 if (expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped))) 3815 if (ecb_expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped)))
3694 { 3816 {
3695 waittime = MAX_BLOCKTIME; 3817 waittime = MAX_BLOCKTIME;
3696 3818
3697 if (timercnt) 3819 if (timercnt)
3698 { 3820 {
3707 if (waittime > to) waittime = to; 3829 if (waittime > to) waittime = to;
3708 } 3830 }
3709#endif 3831#endif
3710 3832
3711 /* don't let timeouts decrease the waittime below timeout_blocktime */ 3833 /* don't let timeouts decrease the waittime below timeout_blocktime */
3712 if (expect_false (waittime < timeout_blocktime)) 3834 if (ecb_expect_false (waittime < timeout_blocktime))
3713 waittime = timeout_blocktime; 3835 waittime = timeout_blocktime;
3714 3836
3715 /* at this point, we NEED to wait, so we have to ensure */ 3837 /* at this point, we NEED to wait, so we have to ensure */
3716 /* to pass a minimum nonzero value to the backend */ 3838 /* to pass a minimum nonzero value to the backend */
3717 if (expect_false (waittime < backend_mintime)) 3839 if (ecb_expect_false (waittime < backend_mintime))
3718 waittime = backend_mintime; 3840 waittime = backend_mintime;
3719 3841
3720 /* extra check because io_blocktime is commonly 0 */ 3842 /* extra check because io_blocktime is commonly 0 */
3721 if (expect_false (io_blocktime)) 3843 if (ecb_expect_false (io_blocktime))
3722 { 3844 {
3723 sleeptime = io_blocktime - (mn_now - prev_mn_now); 3845 sleeptime = io_blocktime - (mn_now - prev_mn_now);
3724 3846
3725 if (sleeptime > waittime - backend_mintime) 3847 if (sleeptime > waittime - backend_mintime)
3726 sleeptime = waittime - backend_mintime; 3848 sleeptime = waittime - backend_mintime;
3727 3849
3728 if (expect_true (sleeptime > 0.)) 3850 if (ecb_expect_true (sleeptime > 0.))
3729 { 3851 {
3730 ev_sleep (sleeptime); 3852 ev_sleep (sleeptime);
3731 waittime -= sleeptime; 3853 waittime -= sleeptime;
3732 } 3854 }
3733 } 3855 }
3747 { 3869 {
3748 assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w))); 3870 assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w)));
3749 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM); 3871 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
3750 } 3872 }
3751 3873
3752
3753 /* update ev_rt_now, do magic */ 3874 /* update ev_rt_now, do magic */
3754 time_update (EV_A_ waittime + sleeptime); 3875 time_update (EV_A_ waittime + sleeptime);
3755 } 3876 }
3756 3877
3757 /* queue pending timers and reschedule them */ 3878 /* queue pending timers and reschedule them */
3765 idle_reify (EV_A); 3886 idle_reify (EV_A);
3766#endif 3887#endif
3767 3888
3768#if EV_CHECK_ENABLE 3889#if EV_CHECK_ENABLE
3769 /* queue check watchers, to be executed first */ 3890 /* queue check watchers, to be executed first */
3770 if (expect_false (checkcnt)) 3891 if (ecb_expect_false (checkcnt))
3771 queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK); 3892 queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK);
3772#endif 3893#endif
3773 3894
3774 EV_INVOKE_PENDING; 3895 EV_INVOKE_PENDING;
3775 } 3896 }
3776 while (expect_true ( 3897 while (ecb_expect_true (
3777 activecnt 3898 activecnt
3778 && !loop_done 3899 && !loop_done
3779 && !(flags & (EVRUN_ONCE | EVRUN_NOWAIT)) 3900 && !(flags & (EVRUN_ONCE | EVRUN_NOWAIT))
3780 )); 3901 ));
3781 3902
3845inline_size void 3966inline_size void
3846wlist_del (WL *head, WL elem) 3967wlist_del (WL *head, WL elem)
3847{ 3968{
3848 while (*head) 3969 while (*head)
3849 { 3970 {
3850 if (expect_true (*head == elem)) 3971 if (ecb_expect_true (*head == elem))
3851 { 3972 {
3852 *head = elem->next; 3973 *head = elem->next;
3853 break; 3974 break;
3854 } 3975 }
3855 3976
3872ev_clear_pending (EV_P_ void *w) EV_NOEXCEPT 3993ev_clear_pending (EV_P_ void *w) EV_NOEXCEPT
3873{ 3994{
3874 W w_ = (W)w; 3995 W w_ = (W)w;
3875 int pending = w_->pending; 3996 int pending = w_->pending;
3876 3997
3877 if (expect_true (pending)) 3998 if (ecb_expect_true (pending))
3878 { 3999 {
3879 ANPENDING *p = pendings [ABSPRI (w_)] + pending - 1; 4000 ANPENDING *p = pendings [ABSPRI (w_)] + pending - 1;
3880 p->w = (W)&pending_w; 4001 p->w = (W)&pending_w;
3881 w_->pending = 0; 4002 w_->pending = 0;
3882 return p->events; 4003 return p->events;
3909 w->active = 0; 4030 w->active = 0;
3910} 4031}
3911 4032
3912/*****************************************************************************/ 4033/*****************************************************************************/
3913 4034
3914noinline 4035ecb_noinline
3915void 4036void
3916ev_io_start (EV_P_ ev_io *w) EV_NOEXCEPT 4037ev_io_start (EV_P_ ev_io *w) EV_NOEXCEPT
3917{ 4038{
3918 int fd = w->fd; 4039 int fd = w->fd;
3919 4040
3920 if (expect_false (ev_is_active (w))) 4041 if (ecb_expect_false (ev_is_active (w)))
3921 return; 4042 return;
3922 4043
3923 assert (("libev: ev_io_start called with negative fd", fd >= 0)); 4044 assert (("libev: ev_io_start called with negative fd", fd >= 0));
3924 assert (("libev: ev_io_start called with illegal event mask", !(w->events & ~(EV__IOFDSET | EV_READ | EV_WRITE)))); 4045 assert (("libev: ev_io_start called with illegal event mask", !(w->events & ~(EV__IOFDSET | EV_READ | EV_WRITE))));
3925 4046
4047#if EV_VERIFY >= 2
4048 assert (("libev: ev_io_start called on watcher with invalid fd", fd_valid (fd)));
4049#endif
3926 EV_FREQUENT_CHECK; 4050 EV_FREQUENT_CHECK;
3927 4051
3928 ev_start (EV_A_ (W)w, 1); 4052 ev_start (EV_A_ (W)w, 1);
3929 array_needsize (ANFD, anfds, anfdmax, fd + 1, array_needsize_zerofill); 4053 array_needsize (ANFD, anfds, anfdmax, fd + 1, array_needsize_zerofill);
3930 wlist_add (&anfds[fd].head, (WL)w); 4054 wlist_add (&anfds[fd].head, (WL)w);
3936 w->events &= ~EV__IOFDSET; 4060 w->events &= ~EV__IOFDSET;
3937 4061
3938 EV_FREQUENT_CHECK; 4062 EV_FREQUENT_CHECK;
3939} 4063}
3940 4064
3941noinline 4065ecb_noinline
3942void 4066void
3943ev_io_stop (EV_P_ ev_io *w) EV_NOEXCEPT 4067ev_io_stop (EV_P_ ev_io *w) EV_NOEXCEPT
3944{ 4068{
3945 clear_pending (EV_A_ (W)w); 4069 clear_pending (EV_A_ (W)w);
3946 if (expect_false (!ev_is_active (w))) 4070 if (ecb_expect_false (!ev_is_active (w)))
3947 return; 4071 return;
3948 4072
3949 assert (("libev: ev_io_stop called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax)); 4073 assert (("libev: ev_io_stop called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax));
3950 4074
4075#if EV_VERIFY >= 2
4076 assert (("libev: ev_io_stop called on watcher with invalid fd", fd_valid (w->fd)));
4077#endif
3951 EV_FREQUENT_CHECK; 4078 EV_FREQUENT_CHECK;
3952 4079
3953 wlist_del (&anfds[w->fd].head, (WL)w); 4080 wlist_del (&anfds[w->fd].head, (WL)w);
3954 ev_stop (EV_A_ (W)w); 4081 ev_stop (EV_A_ (W)w);
3955 4082
3956 fd_change (EV_A_ w->fd, EV_ANFD_REIFY); 4083 fd_change (EV_A_ w->fd, EV_ANFD_REIFY);
3957 4084
3958 EV_FREQUENT_CHECK; 4085 EV_FREQUENT_CHECK;
3959} 4086}
3960 4087
3961noinline 4088ecb_noinline
3962void 4089void
3963ev_timer_start (EV_P_ ev_timer *w) EV_NOEXCEPT 4090ev_timer_start (EV_P_ ev_timer *w) EV_NOEXCEPT
3964{ 4091{
3965 if (expect_false (ev_is_active (w))) 4092 if (ecb_expect_false (ev_is_active (w)))
3966 return; 4093 return;
3967 4094
3968 ev_at (w) += mn_now; 4095 ev_at (w) += mn_now;
3969 4096
3970 assert (("libev: ev_timer_start called with negative timer repeat value", w->repeat >= 0.)); 4097 assert (("libev: ev_timer_start called with negative timer repeat value", w->repeat >= 0.));
3981 EV_FREQUENT_CHECK; 4108 EV_FREQUENT_CHECK;
3982 4109
3983 /*assert (("libev: internal timer heap corruption", timers [ev_active (w)] == (WT)w));*/ 4110 /*assert (("libev: internal timer heap corruption", timers [ev_active (w)] == (WT)w));*/
3984} 4111}
3985 4112
3986noinline 4113ecb_noinline
3987void 4114void
3988ev_timer_stop (EV_P_ ev_timer *w) EV_NOEXCEPT 4115ev_timer_stop (EV_P_ ev_timer *w) EV_NOEXCEPT
3989{ 4116{
3990 clear_pending (EV_A_ (W)w); 4117 clear_pending (EV_A_ (W)w);
3991 if (expect_false (!ev_is_active (w))) 4118 if (ecb_expect_false (!ev_is_active (w)))
3992 return; 4119 return;
3993 4120
3994 EV_FREQUENT_CHECK; 4121 EV_FREQUENT_CHECK;
3995 4122
3996 { 4123 {
3998 4125
3999 assert (("libev: internal timer heap corruption", ANHE_w (timers [active]) == (WT)w)); 4126 assert (("libev: internal timer heap corruption", ANHE_w (timers [active]) == (WT)w));
4000 4127
4001 --timercnt; 4128 --timercnt;
4002 4129
4003 if (expect_true (active < timercnt + HEAP0)) 4130 if (ecb_expect_true (active < timercnt + HEAP0))
4004 { 4131 {
4005 timers [active] = timers [timercnt + HEAP0]; 4132 timers [active] = timers [timercnt + HEAP0];
4006 adjustheap (timers, timercnt, active); 4133 adjustheap (timers, timercnt, active);
4007 } 4134 }
4008 } 4135 }
4012 ev_stop (EV_A_ (W)w); 4139 ev_stop (EV_A_ (W)w);
4013 4140
4014 EV_FREQUENT_CHECK; 4141 EV_FREQUENT_CHECK;
4015} 4142}
4016 4143
4017noinline 4144ecb_noinline
4018void 4145void
4019ev_timer_again (EV_P_ ev_timer *w) EV_NOEXCEPT 4146ev_timer_again (EV_P_ ev_timer *w) EV_NOEXCEPT
4020{ 4147{
4021 EV_FREQUENT_CHECK; 4148 EV_FREQUENT_CHECK;
4022 4149
4047{ 4174{
4048 return ev_at (w) - (ev_is_active (w) ? mn_now : 0.); 4175 return ev_at (w) - (ev_is_active (w) ? mn_now : 0.);
4049} 4176}
4050 4177
4051#if EV_PERIODIC_ENABLE 4178#if EV_PERIODIC_ENABLE
4052noinline 4179ecb_noinline
4053void 4180void
4054ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT 4181ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT
4055{ 4182{
4056 if (expect_false (ev_is_active (w))) 4183 if (ecb_expect_false (ev_is_active (w)))
4057 return; 4184 return;
4058 4185
4059 if (w->reschedule_cb) 4186 if (w->reschedule_cb)
4060 ev_at (w) = w->reschedule_cb (w, ev_rt_now); 4187 ev_at (w) = w->reschedule_cb (w, ev_rt_now);
4061 else if (w->interval) 4188 else if (w->interval)
4078 EV_FREQUENT_CHECK; 4205 EV_FREQUENT_CHECK;
4079 4206
4080 /*assert (("libev: internal periodic heap corruption", ANHE_w (periodics [ev_active (w)]) == (WT)w));*/ 4207 /*assert (("libev: internal periodic heap corruption", ANHE_w (periodics [ev_active (w)]) == (WT)w));*/
4081} 4208}
4082 4209
4083noinline 4210ecb_noinline
4084void 4211void
4085ev_periodic_stop (EV_P_ ev_periodic *w) EV_NOEXCEPT 4212ev_periodic_stop (EV_P_ ev_periodic *w) EV_NOEXCEPT
4086{ 4213{
4087 clear_pending (EV_A_ (W)w); 4214 clear_pending (EV_A_ (W)w);
4088 if (expect_false (!ev_is_active (w))) 4215 if (ecb_expect_false (!ev_is_active (w)))
4089 return; 4216 return;
4090 4217
4091 EV_FREQUENT_CHECK; 4218 EV_FREQUENT_CHECK;
4092 4219
4093 { 4220 {
4095 4222
4096 assert (("libev: internal periodic heap corruption", ANHE_w (periodics [active]) == (WT)w)); 4223 assert (("libev: internal periodic heap corruption", ANHE_w (periodics [active]) == (WT)w));
4097 4224
4098 --periodiccnt; 4225 --periodiccnt;
4099 4226
4100 if (expect_true (active < periodiccnt + HEAP0)) 4227 if (ecb_expect_true (active < periodiccnt + HEAP0))
4101 { 4228 {
4102 periodics [active] = periodics [periodiccnt + HEAP0]; 4229 periodics [active] = periodics [periodiccnt + HEAP0];
4103 adjustheap (periodics, periodiccnt, active); 4230 adjustheap (periodics, periodiccnt, active);
4104 } 4231 }
4105 } 4232 }
4107 ev_stop (EV_A_ (W)w); 4234 ev_stop (EV_A_ (W)w);
4108 4235
4109 EV_FREQUENT_CHECK; 4236 EV_FREQUENT_CHECK;
4110} 4237}
4111 4238
4112noinline 4239ecb_noinline
4113void 4240void
4114ev_periodic_again (EV_P_ ev_periodic *w) EV_NOEXCEPT 4241ev_periodic_again (EV_P_ ev_periodic *w) EV_NOEXCEPT
4115{ 4242{
4116 /* TODO: use adjustheap and recalculation */ 4243 /* TODO: use adjustheap and recalculation */
4117 ev_periodic_stop (EV_A_ w); 4244 ev_periodic_stop (EV_A_ w);
4123# define SA_RESTART 0 4250# define SA_RESTART 0
4124#endif 4251#endif
4125 4252
4126#if EV_SIGNAL_ENABLE 4253#if EV_SIGNAL_ENABLE
4127 4254
4128noinline 4255ecb_noinline
4129void 4256void
4130ev_signal_start (EV_P_ ev_signal *w) EV_NOEXCEPT 4257ev_signal_start (EV_P_ ev_signal *w) EV_NOEXCEPT
4131{ 4258{
4132 if (expect_false (ev_is_active (w))) 4259 if (ecb_expect_false (ev_is_active (w)))
4133 return; 4260 return;
4134 4261
4135 assert (("libev: ev_signal_start called with illegal signal number", w->signum > 0 && w->signum < EV_NSIG)); 4262 assert (("libev: ev_signal_start called with illegal signal number", w->signum > 0 && w->signum < EV_NSIG));
4136 4263
4137#if EV_MULTIPLICITY 4264#if EV_MULTIPLICITY
4206 } 4333 }
4207 4334
4208 EV_FREQUENT_CHECK; 4335 EV_FREQUENT_CHECK;
4209} 4336}
4210 4337
4211noinline 4338ecb_noinline
4212void 4339void
4213ev_signal_stop (EV_P_ ev_signal *w) EV_NOEXCEPT 4340ev_signal_stop (EV_P_ ev_signal *w) EV_NOEXCEPT
4214{ 4341{
4215 clear_pending (EV_A_ (W)w); 4342 clear_pending (EV_A_ (W)w);
4216 if (expect_false (!ev_is_active (w))) 4343 if (ecb_expect_false (!ev_is_active (w)))
4217 return; 4344 return;
4218 4345
4219 EV_FREQUENT_CHECK; 4346 EV_FREQUENT_CHECK;
4220 4347
4221 wlist_del (&signals [w->signum - 1].head, (WL)w); 4348 wlist_del (&signals [w->signum - 1].head, (WL)w);
4254ev_child_start (EV_P_ ev_child *w) EV_NOEXCEPT 4381ev_child_start (EV_P_ ev_child *w) EV_NOEXCEPT
4255{ 4382{
4256#if EV_MULTIPLICITY 4383#if EV_MULTIPLICITY
4257 assert (("libev: child watchers are only supported in the default loop", loop == ev_default_loop_ptr)); 4384 assert (("libev: child watchers are only supported in the default loop", loop == ev_default_loop_ptr));
4258#endif 4385#endif
4259 if (expect_false (ev_is_active (w))) 4386 if (ecb_expect_false (ev_is_active (w)))
4260 return; 4387 return;
4261 4388
4262 EV_FREQUENT_CHECK; 4389 EV_FREQUENT_CHECK;
4263 4390
4264 ev_start (EV_A_ (W)w, 1); 4391 ev_start (EV_A_ (W)w, 1);
4269 4396
4270void 4397void
4271ev_child_stop (EV_P_ ev_child *w) EV_NOEXCEPT 4398ev_child_stop (EV_P_ ev_child *w) EV_NOEXCEPT
4272{ 4399{
4273 clear_pending (EV_A_ (W)w); 4400 clear_pending (EV_A_ (W)w);
4274 if (expect_false (!ev_is_active (w))) 4401 if (ecb_expect_false (!ev_is_active (w)))
4275 return; 4402 return;
4276 4403
4277 EV_FREQUENT_CHECK; 4404 EV_FREQUENT_CHECK;
4278 4405
4279 wlist_del (&childs [w->pid & ((EV_PID_HASHSIZE) - 1)], (WL)w); 4406 wlist_del (&childs [w->pid & ((EV_PID_HASHSIZE) - 1)], (WL)w);
4293 4420
4294#define DEF_STAT_INTERVAL 5.0074891 4421#define DEF_STAT_INTERVAL 5.0074891
4295#define NFS_STAT_INTERVAL 30.1074891 /* for filesystems potentially failing inotify */ 4422#define NFS_STAT_INTERVAL 30.1074891 /* for filesystems potentially failing inotify */
4296#define MIN_STAT_INTERVAL 0.1074891 4423#define MIN_STAT_INTERVAL 0.1074891
4297 4424
4298noinline static void stat_timer_cb (EV_P_ ev_timer *w_, int revents); 4425ecb_noinline static void stat_timer_cb (EV_P_ ev_timer *w_, int revents);
4299 4426
4300#if EV_USE_INOTIFY 4427#if EV_USE_INOTIFY
4301 4428
4302/* the * 2 is to allow for alignment padding, which for some reason is >> 8 */ 4429/* the * 2 is to allow for alignment padding, which for some reason is >> 8 */
4303# define EV_INOTIFY_BUFSIZE (sizeof (struct inotify_event) * 2 + NAME_MAX) 4430# define EV_INOTIFY_BUFSIZE (sizeof (struct inotify_event) * 2 + NAME_MAX)
4304 4431
4305noinline 4432ecb_noinline
4306static void 4433static void
4307infy_add (EV_P_ ev_stat *w) 4434infy_add (EV_P_ ev_stat *w)
4308{ 4435{
4309 w->wd = inotify_add_watch (fs_fd, w->path, 4436 w->wd = inotify_add_watch (fs_fd, w->path,
4310 IN_ATTRIB | IN_DELETE_SELF | IN_MOVE_SELF | IN_MODIFY 4437 IN_ATTRIB | IN_DELETE_SELF | IN_MOVE_SELF | IN_MODIFY
4375 if (ev_is_active (&w->timer)) ev_ref (EV_A); 4502 if (ev_is_active (&w->timer)) ev_ref (EV_A);
4376 ev_timer_again (EV_A_ &w->timer); 4503 ev_timer_again (EV_A_ &w->timer);
4377 if (ev_is_active (&w->timer)) ev_unref (EV_A); 4504 if (ev_is_active (&w->timer)) ev_unref (EV_A);
4378} 4505}
4379 4506
4380noinline 4507ecb_noinline
4381static void 4508static void
4382infy_del (EV_P_ ev_stat *w) 4509infy_del (EV_P_ ev_stat *w)
4383{ 4510{
4384 int slot; 4511 int slot;
4385 int wd = w->wd; 4512 int wd = w->wd;
4393 4520
4394 /* remove this watcher, if others are watching it, they will rearm */ 4521 /* remove this watcher, if others are watching it, they will rearm */
4395 inotify_rm_watch (fs_fd, wd); 4522 inotify_rm_watch (fs_fd, wd);
4396} 4523}
4397 4524
4398noinline 4525ecb_noinline
4399static void 4526static void
4400infy_wd (EV_P_ int slot, int wd, struct inotify_event *ev) 4527infy_wd (EV_P_ int slot, int wd, struct inotify_event *ev)
4401{ 4528{
4402 if (slot < 0) 4529 if (slot < 0)
4403 /* overflow, need to check for all hash slots */ 4530 /* overflow, need to check for all hash slots */
4549 w->attr.st_nlink = 0; 4676 w->attr.st_nlink = 0;
4550 else if (!w->attr.st_nlink) 4677 else if (!w->attr.st_nlink)
4551 w->attr.st_nlink = 1; 4678 w->attr.st_nlink = 1;
4552} 4679}
4553 4680
4554noinline 4681ecb_noinline
4555static void 4682static void
4556stat_timer_cb (EV_P_ ev_timer *w_, int revents) 4683stat_timer_cb (EV_P_ ev_timer *w_, int revents)
4557{ 4684{
4558 ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer)); 4685 ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer));
4559 4686
4593} 4720}
4594 4721
4595void 4722void
4596ev_stat_start (EV_P_ ev_stat *w) EV_NOEXCEPT 4723ev_stat_start (EV_P_ ev_stat *w) EV_NOEXCEPT
4597{ 4724{
4598 if (expect_false (ev_is_active (w))) 4725 if (ecb_expect_false (ev_is_active (w)))
4599 return; 4726 return;
4600 4727
4601 ev_stat_stat (EV_A_ w); 4728 ev_stat_stat (EV_A_ w);
4602 4729
4603 if (w->interval < MIN_STAT_INTERVAL && w->interval) 4730 if (w->interval < MIN_STAT_INTERVAL && w->interval)
4625 4752
4626void 4753void
4627ev_stat_stop (EV_P_ ev_stat *w) EV_NOEXCEPT 4754ev_stat_stop (EV_P_ ev_stat *w) EV_NOEXCEPT
4628{ 4755{
4629 clear_pending (EV_A_ (W)w); 4756 clear_pending (EV_A_ (W)w);
4630 if (expect_false (!ev_is_active (w))) 4757 if (ecb_expect_false (!ev_is_active (w)))
4631 return; 4758 return;
4632 4759
4633 EV_FREQUENT_CHECK; 4760 EV_FREQUENT_CHECK;
4634 4761
4635#if EV_USE_INOTIFY 4762#if EV_USE_INOTIFY
4650 4777
4651#if EV_IDLE_ENABLE 4778#if EV_IDLE_ENABLE
4652void 4779void
4653ev_idle_start (EV_P_ ev_idle *w) EV_NOEXCEPT 4780ev_idle_start (EV_P_ ev_idle *w) EV_NOEXCEPT
4654{ 4781{
4655 if (expect_false (ev_is_active (w))) 4782 if (ecb_expect_false (ev_is_active (w)))
4656 return; 4783 return;
4657 4784
4658 pri_adjust (EV_A_ (W)w); 4785 pri_adjust (EV_A_ (W)w);
4659 4786
4660 EV_FREQUENT_CHECK; 4787 EV_FREQUENT_CHECK;
4674 4801
4675void 4802void
4676ev_idle_stop (EV_P_ ev_idle *w) EV_NOEXCEPT 4803ev_idle_stop (EV_P_ ev_idle *w) EV_NOEXCEPT
4677{ 4804{
4678 clear_pending (EV_A_ (W)w); 4805 clear_pending (EV_A_ (W)w);
4679 if (expect_false (!ev_is_active (w))) 4806 if (ecb_expect_false (!ev_is_active (w)))
4680 return; 4807 return;
4681 4808
4682 EV_FREQUENT_CHECK; 4809 EV_FREQUENT_CHECK;
4683 4810
4684 { 4811 {
4697 4824
4698#if EV_PREPARE_ENABLE 4825#if EV_PREPARE_ENABLE
4699void 4826void
4700ev_prepare_start (EV_P_ ev_prepare *w) EV_NOEXCEPT 4827ev_prepare_start (EV_P_ ev_prepare *w) EV_NOEXCEPT
4701{ 4828{
4702 if (expect_false (ev_is_active (w))) 4829 if (ecb_expect_false (ev_is_active (w)))
4703 return; 4830 return;
4704 4831
4705 EV_FREQUENT_CHECK; 4832 EV_FREQUENT_CHECK;
4706 4833
4707 ev_start (EV_A_ (W)w, ++preparecnt); 4834 ev_start (EV_A_ (W)w, ++preparecnt);
4713 4840
4714void 4841void
4715ev_prepare_stop (EV_P_ ev_prepare *w) EV_NOEXCEPT 4842ev_prepare_stop (EV_P_ ev_prepare *w) EV_NOEXCEPT
4716{ 4843{
4717 clear_pending (EV_A_ (W)w); 4844 clear_pending (EV_A_ (W)w);
4718 if (expect_false (!ev_is_active (w))) 4845 if (ecb_expect_false (!ev_is_active (w)))
4719 return; 4846 return;
4720 4847
4721 EV_FREQUENT_CHECK; 4848 EV_FREQUENT_CHECK;
4722 4849
4723 { 4850 {
4735 4862
4736#if EV_CHECK_ENABLE 4863#if EV_CHECK_ENABLE
4737void 4864void
4738ev_check_start (EV_P_ ev_check *w) EV_NOEXCEPT 4865ev_check_start (EV_P_ ev_check *w) EV_NOEXCEPT
4739{ 4866{
4740 if (expect_false (ev_is_active (w))) 4867 if (ecb_expect_false (ev_is_active (w)))
4741 return; 4868 return;
4742 4869
4743 EV_FREQUENT_CHECK; 4870 EV_FREQUENT_CHECK;
4744 4871
4745 ev_start (EV_A_ (W)w, ++checkcnt); 4872 ev_start (EV_A_ (W)w, ++checkcnt);
4751 4878
4752void 4879void
4753ev_check_stop (EV_P_ ev_check *w) EV_NOEXCEPT 4880ev_check_stop (EV_P_ ev_check *w) EV_NOEXCEPT
4754{ 4881{
4755 clear_pending (EV_A_ (W)w); 4882 clear_pending (EV_A_ (W)w);
4756 if (expect_false (!ev_is_active (w))) 4883 if (ecb_expect_false (!ev_is_active (w)))
4757 return; 4884 return;
4758 4885
4759 EV_FREQUENT_CHECK; 4886 EV_FREQUENT_CHECK;
4760 4887
4761 { 4888 {
4770 EV_FREQUENT_CHECK; 4897 EV_FREQUENT_CHECK;
4771} 4898}
4772#endif 4899#endif
4773 4900
4774#if EV_EMBED_ENABLE 4901#if EV_EMBED_ENABLE
4775noinline 4902ecb_noinline
4776void 4903void
4777ev_embed_sweep (EV_P_ ev_embed *w) EV_NOEXCEPT 4904ev_embed_sweep (EV_P_ ev_embed *w) EV_NOEXCEPT
4778{ 4905{
4779 ev_run (w->other, EVRUN_NOWAIT); 4906 ev_run (w->other, EVRUN_NOWAIT);
4780} 4907}
4832#endif 4959#endif
4833 4960
4834void 4961void
4835ev_embed_start (EV_P_ ev_embed *w) EV_NOEXCEPT 4962ev_embed_start (EV_P_ ev_embed *w) EV_NOEXCEPT
4836{ 4963{
4837 if (expect_false (ev_is_active (w))) 4964 if (ecb_expect_false (ev_is_active (w)))
4838 return; 4965 return;
4839 4966
4840 { 4967 {
4841 EV_P = w->other; 4968 EV_P = w->other;
4842 assert (("libev: loop to be embedded is not embeddable", backend & ev_embeddable_backends ())); 4969 assert (("libev: loop to be embedded is not embeddable", backend & ev_embeddable_backends ()));
4864 4991
4865void 4992void
4866ev_embed_stop (EV_P_ ev_embed *w) EV_NOEXCEPT 4993ev_embed_stop (EV_P_ ev_embed *w) EV_NOEXCEPT
4867{ 4994{
4868 clear_pending (EV_A_ (W)w); 4995 clear_pending (EV_A_ (W)w);
4869 if (expect_false (!ev_is_active (w))) 4996 if (ecb_expect_false (!ev_is_active (w)))
4870 return; 4997 return;
4871 4998
4872 EV_FREQUENT_CHECK; 4999 EV_FREQUENT_CHECK;
4873 5000
4874 ev_io_stop (EV_A_ &w->io); 5001 ev_io_stop (EV_A_ &w->io);
4883 5010
4884#if EV_FORK_ENABLE 5011#if EV_FORK_ENABLE
4885void 5012void
4886ev_fork_start (EV_P_ ev_fork *w) EV_NOEXCEPT 5013ev_fork_start (EV_P_ ev_fork *w) EV_NOEXCEPT
4887{ 5014{
4888 if (expect_false (ev_is_active (w))) 5015 if (ecb_expect_false (ev_is_active (w)))
4889 return; 5016 return;
4890 5017
4891 EV_FREQUENT_CHECK; 5018 EV_FREQUENT_CHECK;
4892 5019
4893 ev_start (EV_A_ (W)w, ++forkcnt); 5020 ev_start (EV_A_ (W)w, ++forkcnt);
4899 5026
4900void 5027void
4901ev_fork_stop (EV_P_ ev_fork *w) EV_NOEXCEPT 5028ev_fork_stop (EV_P_ ev_fork *w) EV_NOEXCEPT
4902{ 5029{
4903 clear_pending (EV_A_ (W)w); 5030 clear_pending (EV_A_ (W)w);
4904 if (expect_false (!ev_is_active (w))) 5031 if (ecb_expect_false (!ev_is_active (w)))
4905 return; 5032 return;
4906 5033
4907 EV_FREQUENT_CHECK; 5034 EV_FREQUENT_CHECK;
4908 5035
4909 { 5036 {
4921 5048
4922#if EV_CLEANUP_ENABLE 5049#if EV_CLEANUP_ENABLE
4923void 5050void
4924ev_cleanup_start (EV_P_ ev_cleanup *w) EV_NOEXCEPT 5051ev_cleanup_start (EV_P_ ev_cleanup *w) EV_NOEXCEPT
4925{ 5052{
4926 if (expect_false (ev_is_active (w))) 5053 if (ecb_expect_false (ev_is_active (w)))
4927 return; 5054 return;
4928 5055
4929 EV_FREQUENT_CHECK; 5056 EV_FREQUENT_CHECK;
4930 5057
4931 ev_start (EV_A_ (W)w, ++cleanupcnt); 5058 ev_start (EV_A_ (W)w, ++cleanupcnt);
4939 5066
4940void 5067void
4941ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_NOEXCEPT 5068ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_NOEXCEPT
4942{ 5069{
4943 clear_pending (EV_A_ (W)w); 5070 clear_pending (EV_A_ (W)w);
4944 if (expect_false (!ev_is_active (w))) 5071 if (ecb_expect_false (!ev_is_active (w)))
4945 return; 5072 return;
4946 5073
4947 EV_FREQUENT_CHECK; 5074 EV_FREQUENT_CHECK;
4948 ev_ref (EV_A); 5075 ev_ref (EV_A);
4949 5076
4962 5089
4963#if EV_ASYNC_ENABLE 5090#if EV_ASYNC_ENABLE
4964void 5091void
4965ev_async_start (EV_P_ ev_async *w) EV_NOEXCEPT 5092ev_async_start (EV_P_ ev_async *w) EV_NOEXCEPT
4966{ 5093{
4967 if (expect_false (ev_is_active (w))) 5094 if (ecb_expect_false (ev_is_active (w)))
4968 return; 5095 return;
4969 5096
4970 w->sent = 0; 5097 w->sent = 0;
4971 5098
4972 evpipe_init (EV_A); 5099 evpipe_init (EV_A);
4982 5109
4983void 5110void
4984ev_async_stop (EV_P_ ev_async *w) EV_NOEXCEPT 5111ev_async_stop (EV_P_ ev_async *w) EV_NOEXCEPT
4985{ 5112{
4986 clear_pending (EV_A_ (W)w); 5113 clear_pending (EV_A_ (W)w);
4987 if (expect_false (!ev_is_active (w))) 5114 if (ecb_expect_false (!ev_is_active (w)))
4988 return; 5115 return;
4989 5116
4990 EV_FREQUENT_CHECK; 5117 EV_FREQUENT_CHECK;
4991 5118
4992 { 5119 {

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines