ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev.c
(Generate patch)

Comparing libev/ev.c (file contents):
Revision 1.491 by root, Thu Jun 20 23:14:53 2019 UTC vs.
Revision 1.506 by root, Thu Jul 11 05:41:39 2019 UTC

325#ifndef EV_USE_PORT 325#ifndef EV_USE_PORT
326# define EV_USE_PORT 0 326# define EV_USE_PORT 0
327#endif 327#endif
328 328
329#ifndef EV_USE_LINUXAIO 329#ifndef EV_USE_LINUXAIO
330# if __linux /* libev currently assumes linux/aio_abi.h is always available on linux */
331# define EV_USE_LINUXAIO 1
332# else
330# define EV_USE_LINUXAIO 0 333# define EV_USE_LINUXAIO 0
334# endif
335#endif
336
337#ifndef EV_USE_IOURING
338# if __linux
339# define EV_USE_IOURING 0
340# else
341# define EV_USE_IOURING 0
342# endif
331#endif 343#endif
332 344
333#ifndef EV_USE_INOTIFY 345#ifndef EV_USE_INOTIFY
334# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4)) 346# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))
335# define EV_USE_INOTIFY EV_FEATURE_OS 347# define EV_USE_INOTIFY EV_FEATURE_OS
392/* aix's poll.h seems to cause lots of trouble */ 404/* aix's poll.h seems to cause lots of trouble */
393#ifdef _AIX 405#ifdef _AIX
394/* AIX has a completely broken poll.h header */ 406/* AIX has a completely broken poll.h header */
395# undef EV_USE_POLL 407# undef EV_USE_POLL
396# define EV_USE_POLL 0 408# define EV_USE_POLL 0
397#endif
398
399#if EV_USE_LINUXAIO
400# include <linux/aio_abi.h> /* probably only needed for aio_context_t */
401#endif 409#endif
402 410
403/* on linux, we can use a (slow) syscall to avoid a dependency on pthread, */ 411/* on linux, we can use a (slow) syscall to avoid a dependency on pthread, */
404/* which makes programs even slower. might work on other unices, too. */ 412/* which makes programs even slower. might work on other unices, too. */
405#if EV_USE_CLOCK_SYSCALL 413#if EV_USE_CLOCK_SYSCALL
406# include <sys/syscall.h> 414# include <sys/syscall.h>
407# ifdef SYS_clock_gettime 415# ifdef SYS_clock_gettime
408# define clock_gettime(id, ts) syscall (SYS_clock_gettime, (id), (ts)) 416# define clock_gettime(id, ts) syscall (SYS_clock_gettime, (id), (ts))
409# undef EV_USE_MONOTONIC 417# undef EV_USE_MONOTONIC
410# define EV_USE_MONOTONIC 1 418# define EV_USE_MONOTONIC 1
419# define EV_NEED_SYSCALL 1
411# else 420# else
412# undef EV_USE_CLOCK_SYSCALL 421# undef EV_USE_CLOCK_SYSCALL
413# define EV_USE_CLOCK_SYSCALL 0 422# define EV_USE_CLOCK_SYSCALL 0
414# endif 423# endif
415#endif 424#endif
438# endif 447# endif
439#endif 448#endif
440 449
441#if EV_USE_LINUXAIO 450#if EV_USE_LINUXAIO
442# include <sys/syscall.h> 451# include <sys/syscall.h>
443# if !SYS_io_getevents 452# if SYS_io_getevents && EV_USE_EPOLL /* linuxaio backend requires epoll backend */
453# define EV_NEED_SYSCALL 1
454# else
444# undef EV_USE_LINUXAIO 455# undef EV_USE_LINUXAIO
445# define EV_USE_LINUXAIO 0 456# define EV_USE_LINUXAIO 0
457# endif
458#endif
459
460#if EV_USE_IOURING
461# include <sys/syscall.h>
462# if !SYS_io_uring_setup && __linux && !__alpha
463# define SYS_io_uring_setup 425
464# define SYS_io_uring_enter 426
465# define SYS_io_uring_wregister 427
466# endif
467# if SYS_io_uring_setup && EV_USE_EPOLL /* iouring backend requires epoll backend */
468# define EV_NEED_SYSCALL 1
469# else
470# undef EV_USE_IOURING
471# define EV_USE_IOURING 0
446# endif 472# endif
447#endif 473#endif
448 474
449#if EV_USE_INOTIFY 475#if EV_USE_INOTIFY
450# include <sys/statfs.h> 476# include <sys/statfs.h>
492 uint32_t ssi_signo; 518 uint32_t ssi_signo;
493 char pad[128 - sizeof (uint32_t)]; 519 char pad[128 - sizeof (uint32_t)];
494}; 520};
495#endif 521#endif
496 522
497/**/ 523/*****************************************************************************/
498 524
499#if EV_VERIFY >= 3 525#if EV_VERIFY >= 3
500# define EV_FREQUENT_CHECK ev_verify (EV_A) 526# define EV_FREQUENT_CHECK ev_verify (EV_A)
501#else 527#else
502# define EV_FREQUENT_CHECK do { } while (0) 528# define EV_FREQUENT_CHECK do { } while (0)
507 * This value is good at least till the year 4000. 533 * This value is good at least till the year 4000.
508 */ 534 */
509#define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */ 535#define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */
510/*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */ 536/*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */
511 537
512#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */ 538#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
513#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */ 539#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
514 540
541/* find a portable timestamp that is "always" in the future but fits into time_t.
542 * this is quite hard, and we are mostly guessing - we handle 32 bit signed/unsigned time_t,
543 * and sizes larger than 32 bit, and maybe the unlikely floating point time_t */
544#define EV_TSTAMP_HUGE \
545 (sizeof (time_t) >= 8 ? 10000000000000. \
546 : 0 < (time_t)4294967295 ? 4294967295. \
547 : 2147483647.) \
548
549#define EV_TS_TO_MS(a) a * 1e3 + 0.9999
550#define EV_TS_FROM_US(us) us * 1e-6
515#define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0) 551#define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0)
516#define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0) 552#define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0)
553#define EV_TV_GET(tv) ((tv).tv_sec + (tv).tv_usec * 1e-6)
554#define EV_TS_GET(ts) ((ts).tv_sec + (ts).tv_nsec * 1e-9)
517 555
518/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */ 556/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */
519/* ECB.H BEGIN */ 557/* ECB.H BEGIN */
520/* 558/*
521 * libecb - http://software.schmorp.de/pkg/libecb 559 * libecb - http://software.schmorp.de/pkg/libecb
559 597
560#ifndef ECB_H 598#ifndef ECB_H
561#define ECB_H 599#define ECB_H
562 600
563/* 16 bits major, 16 bits minor */ 601/* 16 bits major, 16 bits minor */
564#define ECB_VERSION 0x00010005 602#define ECB_VERSION 0x00010006
565 603
566#ifdef _WIN32 604#ifdef _WIN32
567 typedef signed char int8_t; 605 typedef signed char int8_t;
568 typedef unsigned char uint8_t; 606 typedef unsigned char uint8_t;
569 typedef signed short int16_t; 607 typedef signed short int16_t;
683 #include <intrin.h> /* fence functions _ReadBarrier, also bit search functions _BitScanReverse */ 721 #include <intrin.h> /* fence functions _ReadBarrier, also bit search functions _BitScanReverse */
684#endif 722#endif
685 723
686#ifndef ECB_MEMORY_FENCE 724#ifndef ECB_MEMORY_FENCE
687 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 725 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
726 #define ECB_MEMORY_FENCE_RELAXED __asm__ __volatile__ ("" : : : "memory")
688 #if __i386 || __i386__ 727 #if __i386 || __i386__
689 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") 728 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
690 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") 729 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
691 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory") 730 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory")
692 #elif ECB_GCC_AMD64 731 #elif ECB_GCC_AMD64
742 #if ECB_GCC_VERSION(4,7) 781 #if ECB_GCC_VERSION(4,7)
743 /* see comment below (stdatomic.h) about the C11 memory model. */ 782 /* see comment below (stdatomic.h) about the C11 memory model. */
744 #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST) 783 #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST)
745 #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE) 784 #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE)
746 #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE) 785 #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE)
786 #define ECB_MEMORY_FENCE_RELAXED __atomic_thread_fence (__ATOMIC_RELAXED)
747 787
748 #elif ECB_CLANG_EXTENSION(c_atomic) 788 #elif ECB_CLANG_EXTENSION(c_atomic)
749 /* see comment below (stdatomic.h) about the C11 memory model. */ 789 /* see comment below (stdatomic.h) about the C11 memory model. */
750 #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST) 790 #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST)
751 #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE) 791 #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE)
752 #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE) 792 #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE)
793 #define ECB_MEMORY_FENCE_RELAXED __c11_atomic_thread_fence (__ATOMIC_RELAXED)
753 794
754 #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ 795 #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__
755 #define ECB_MEMORY_FENCE __sync_synchronize () 796 #define ECB_MEMORY_FENCE __sync_synchronize ()
756 #elif _MSC_VER >= 1500 /* VC++ 2008 */ 797 #elif _MSC_VER >= 1500 /* VC++ 2008 */
757 /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */ 798 /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */
767 #elif defined _WIN32 808 #elif defined _WIN32
768 #include <WinNT.h> 809 #include <WinNT.h>
769 #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */ 810 #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */
770 #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 811 #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
771 #include <mbarrier.h> 812 #include <mbarrier.h>
772 #define ECB_MEMORY_FENCE __machine_rw_barrier () 813 #define ECB_MEMORY_FENCE __machine_rw_barrier ()
773 #define ECB_MEMORY_FENCE_ACQUIRE __machine_r_barrier () 814 #define ECB_MEMORY_FENCE_ACQUIRE __machine_acq_barrier ()
774 #define ECB_MEMORY_FENCE_RELEASE __machine_w_barrier () 815 #define ECB_MEMORY_FENCE_RELEASE __machine_rel_barrier ()
816 #define ECB_MEMORY_FENCE_RELAXED __compiler_barrier ()
775 #elif __xlC__ 817 #elif __xlC__
776 #define ECB_MEMORY_FENCE __sync () 818 #define ECB_MEMORY_FENCE __sync ()
777 #endif 819 #endif
778#endif 820#endif
779 821
780#ifndef ECB_MEMORY_FENCE 822#ifndef ECB_MEMORY_FENCE
781 #if ECB_C11 && !defined __STDC_NO_ATOMICS__ 823 #if ECB_C11 && !defined __STDC_NO_ATOMICS__
782 /* we assume that these memory fences work on all variables/all memory accesses, */ 824 /* we assume that these memory fences work on all variables/all memory accesses, */
783 /* not just C11 atomics and atomic accesses */ 825 /* not just C11 atomics and atomic accesses */
784 #include <stdatomic.h> 826 #include <stdatomic.h>
785 /* Unfortunately, neither gcc 4.7 nor clang 3.1 generate any instructions for */
786 /* any fence other than seq_cst, which isn't very efficient for us. */
787 /* Why that is, we don't know - either the C11 memory model is quite useless */
788 /* for most usages, or gcc and clang have a bug */
789 /* I *currently* lean towards the latter, and inefficiently implement */
790 /* all three of ecb's fences as a seq_cst fence */
791 /* Update, gcc-4.8 generates mfence for all c++ fences, but nothing */
792 /* for all __atomic_thread_fence's except seq_cst */
793 #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst) 827 #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst)
828 #define ECB_MEMORY_FENCE_ACQUIRE atomic_thread_fence (memory_order_acquire)
829 #define ECB_MEMORY_FENCE_RELEASE atomic_thread_fence (memory_order_release)
794 #endif 830 #endif
795#endif 831#endif
796 832
797#ifndef ECB_MEMORY_FENCE 833#ifndef ECB_MEMORY_FENCE
798 #if !ECB_AVOID_PTHREADS 834 #if !ECB_AVOID_PTHREADS
816 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE 852 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
817#endif 853#endif
818 854
819#if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE 855#if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE
820 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE 856 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
857#endif
858
859#if !defined ECB_MEMORY_FENCE_RELAXED && defined ECB_MEMORY_FENCE
860 #define ECB_MEMORY_FENCE_RELAXED ECB_MEMORY_FENCE /* very heavy-handed */
821#endif 861#endif
822 862
823/*****************************************************************************/ 863/*****************************************************************************/
824 864
825#if ECB_CPP 865#if ECB_CPP
1534/* ECB.H END */ 1574/* ECB.H END */
1535 1575
1536#if ECB_MEMORY_FENCE_NEEDS_PTHREADS 1576#if ECB_MEMORY_FENCE_NEEDS_PTHREADS
1537/* if your architecture doesn't need memory fences, e.g. because it is 1577/* if your architecture doesn't need memory fences, e.g. because it is
1538 * single-cpu/core, or if you use libev in a project that doesn't use libev 1578 * single-cpu/core, or if you use libev in a project that doesn't use libev
1539 * from multiple threads, then you can define ECB_AVOID_PTHREADS when compiling 1579 * from multiple threads, then you can define ECB_NO_THREADS when compiling
1540 * libev, in which cases the memory fences become nops. 1580 * libev, in which cases the memory fences become nops.
1541 * alternatively, you can remove this #error and link against libpthread, 1581 * alternatively, you can remove this #error and link against libpthread,
1542 * which will then provide the memory fences. 1582 * which will then provide the memory fences.
1543 */ 1583 */
1544# error "memory fences not defined for your architecture, please report" 1584# error "memory fences not defined for your architecture, please report"
1548# define ECB_MEMORY_FENCE do { } while (0) 1588# define ECB_MEMORY_FENCE do { } while (0)
1549# define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE 1589# define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
1550# define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE 1590# define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
1551#endif 1591#endif
1552 1592
1553#define expect_false(cond) ecb_expect_false (cond)
1554#define expect_true(cond) ecb_expect_true (cond)
1555#define noinline ecb_noinline
1556
1557#define inline_size ecb_inline 1593#define inline_size ecb_inline
1558 1594
1559#if EV_FEATURE_CODE 1595#if EV_FEATURE_CODE
1560# define inline_speed ecb_inline 1596# define inline_speed ecb_inline
1561#else 1597#else
1562# define inline_speed noinline static 1598# define inline_speed ecb_noinline static
1563#endif 1599#endif
1600
1601/*****************************************************************************/
1602/* raw syscall wrappers */
1603
1604#if EV_NEED_SYSCALL
1605
1606#include <sys/syscall.h>
1607
1608/*
1609 * define some syscall wrappers for common architectures
1610 * this is mostly for nice looks during debugging, not performance.
1611 * our syscalls return < 0, not == -1, on error. which is good
1612 * enough for linux aio.
1613 * TODO: arm is also common nowadays, maybe even mips and x86
1614 * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove...
1615 */
1616#if __GNUC__ && __linux && ECB_AMD64 && !defined __OPTIMIZE_SIZE__
1617 /* the costly errno access probably kills this for size optimisation */
1618
1619 #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5,arg6) \
1620 ({ \
1621 long res; \
1622 register unsigned long r6 __asm__ ("r9" ); \
1623 register unsigned long r5 __asm__ ("r8" ); \
1624 register unsigned long r4 __asm__ ("r10"); \
1625 register unsigned long r3 __asm__ ("rdx"); \
1626 register unsigned long r2 __asm__ ("rsi"); \
1627 register unsigned long r1 __asm__ ("rdi"); \
1628 if (narg >= 6) r6 = (unsigned long)(arg6); \
1629 if (narg >= 5) r5 = (unsigned long)(arg5); \
1630 if (narg >= 4) r4 = (unsigned long)(arg4); \
1631 if (narg >= 3) r3 = (unsigned long)(arg3); \
1632 if (narg >= 2) r2 = (unsigned long)(arg2); \
1633 if (narg >= 1) r1 = (unsigned long)(arg1); \
1634 __asm__ __volatile__ ( \
1635 "syscall\n\t" \
1636 : "=a" (res) \
1637 : "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \
1638 : "cc", "r11", "cx", "memory"); \
1639 errno = -res; \
1640 res; \
1641 })
1642
1643#endif
1644
1645#ifdef ev_syscall
1646 #define ev_syscall0(nr) ev_syscall (nr, 0, 0, 0, 0, 0, 0, 0)
1647 #define ev_syscall1(nr,arg1) ev_syscall (nr, 1, arg1, 0, 0, 0, 0, 0)
1648 #define ev_syscall2(nr,arg1,arg2) ev_syscall (nr, 2, arg1, arg2, 0, 0, 0, 0)
1649 #define ev_syscall3(nr,arg1,arg2,arg3) ev_syscall (nr, 3, arg1, arg2, arg3, 0, 0, 0)
1650 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) ev_syscall (nr, 3, arg1, arg2, arg3, arg4, 0, 0)
1651 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5, 0)
1652 #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) ev_syscall (nr, 6, arg1, arg2, arg3, arg4, arg5,arg6)
1653#else
1654 #define ev_syscall0(nr) syscall (nr)
1655 #define ev_syscall1(nr,arg1) syscall (nr, arg1)
1656 #define ev_syscall2(nr,arg1,arg2) syscall (nr, arg1, arg2)
1657 #define ev_syscall3(nr,arg1,arg2,arg3) syscall (nr, arg1, arg2, arg3)
1658 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) syscall (nr, arg1, arg2, arg3, arg4)
1659 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) syscall (nr, arg1, arg2, arg3, arg4, arg5)
1660 #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) syscall (nr, arg1, arg2, arg3, arg4, arg5,arg6)
1661#endif
1662
1663#endif
1664
1665/*****************************************************************************/
1564 1666
1565#define NUMPRI (EV_MAXPRI - EV_MINPRI + 1) 1667#define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)
1566 1668
1567#if EV_MINPRI == EV_MAXPRI 1669#if EV_MINPRI == EV_MAXPRI
1568# define ABSPRI(w) (((W)w), 0) 1670# define ABSPRI(w) (((W)w), 0)
1603# include "ev_win32.c" 1705# include "ev_win32.c"
1604#endif 1706#endif
1605 1707
1606/*****************************************************************************/ 1708/*****************************************************************************/
1607 1709
1710#if EV_USE_LINUXAIO
1711# include <linux/aio_abi.h> /* probably only needed for aio_context_t */
1712#endif
1713
1608/* define a suitable floor function (only used by periodics atm) */ 1714/* define a suitable floor function (only used by periodics atm) */
1609 1715
1610#if EV_USE_FLOOR 1716#if EV_USE_FLOOR
1611# include <math.h> 1717# include <math.h>
1612# define ev_floor(v) floor (v) 1718# define ev_floor(v) floor (v)
1613#else 1719#else
1614 1720
1615#include <float.h> 1721#include <float.h>
1616 1722
1617/* a floor() replacement function, should be independent of ev_tstamp type */ 1723/* a floor() replacement function, should be independent of ev_tstamp type */
1618noinline 1724ecb_noinline
1619static ev_tstamp 1725static ev_tstamp
1620ev_floor (ev_tstamp v) 1726ev_floor (ev_tstamp v)
1621{ 1727{
1622 /* the choice of shift factor is not terribly important */ 1728 /* the choice of shift factor is not terribly important */
1623#if FLT_RADIX != 2 /* assume FLT_RADIX == 10 */ 1729#if FLT_RADIX != 2 /* assume FLT_RADIX == 10 */
1624 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 10000000000000000000. : 1000000000.; 1730 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 10000000000000000000. : 1000000000.;
1625#else 1731#else
1626 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.; 1732 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.;
1627#endif 1733#endif
1628 1734
1735 /* special treatment for negative arguments */
1736 if (ecb_expect_false (v < 0.))
1737 {
1738 ev_tstamp f = -ev_floor (-v);
1739
1740 return f - (f == v ? 0 : 1);
1741 }
1742
1629 /* argument too large for an unsigned long? */ 1743 /* argument too large for an unsigned long? then reduce it */
1630 if (expect_false (v >= shift)) 1744 if (ecb_expect_false (v >= shift))
1631 { 1745 {
1632 ev_tstamp f; 1746 ev_tstamp f;
1633 1747
1634 if (v == v - 1.) 1748 if (v == v - 1.)
1635 return v; /* very large number */ 1749 return v; /* very large numbers are assumed to be integer */
1636 1750
1637 f = shift * ev_floor (v * (1. / shift)); 1751 f = shift * ev_floor (v * (1. / shift));
1638 return f + ev_floor (v - f); 1752 return f + ev_floor (v - f);
1639 } 1753 }
1640 1754
1641 /* special treatment for negative args? */
1642 if (expect_false (v < 0.))
1643 {
1644 ev_tstamp f = -ev_floor (-v);
1645
1646 return f - (f == v ? 0 : 1);
1647 }
1648
1649 /* fits into an unsigned long */ 1755 /* fits into an unsigned long */
1650 return (unsigned long)v; 1756 return (unsigned long)v;
1651} 1757}
1652 1758
1653#endif 1759#endif
1656 1762
1657#ifdef __linux 1763#ifdef __linux
1658# include <sys/utsname.h> 1764# include <sys/utsname.h>
1659#endif 1765#endif
1660 1766
1661noinline ecb_cold 1767ecb_noinline ecb_cold
1662static unsigned int 1768static unsigned int
1663ev_linux_version (void) 1769ev_linux_version (void)
1664{ 1770{
1665#ifdef __linux 1771#ifdef __linux
1666 unsigned int v = 0; 1772 unsigned int v = 0;
1696} 1802}
1697 1803
1698/*****************************************************************************/ 1804/*****************************************************************************/
1699 1805
1700#if EV_AVOID_STDIO 1806#if EV_AVOID_STDIO
1701noinline ecb_cold 1807ecb_noinline ecb_cold
1702static void 1808static void
1703ev_printerr (const char *msg) 1809ev_printerr (const char *msg)
1704{ 1810{
1705 write (STDERR_FILENO, msg, strlen (msg)); 1811 write (STDERR_FILENO, msg, strlen (msg));
1706} 1812}
1713ev_set_syserr_cb (void (*cb)(const char *msg) EV_NOEXCEPT) EV_NOEXCEPT 1819ev_set_syserr_cb (void (*cb)(const char *msg) EV_NOEXCEPT) EV_NOEXCEPT
1714{ 1820{
1715 syserr_cb = cb; 1821 syserr_cb = cb;
1716} 1822}
1717 1823
1718noinline ecb_cold 1824ecb_noinline ecb_cold
1719static void 1825static void
1720ev_syserr (const char *msg) 1826ev_syserr (const char *msg)
1721{ 1827{
1722 if (!msg) 1828 if (!msg)
1723 msg = "(libev) system error"; 1829 msg = "(libev) system error";
1795{ 1901{
1796 WL head; 1902 WL head;
1797 unsigned char events; /* the events watched for */ 1903 unsigned char events; /* the events watched for */
1798 unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */ 1904 unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */
1799 unsigned char emask; /* some backends store the actual kernel mask in here */ 1905 unsigned char emask; /* some backends store the actual kernel mask in here */
1800 unsigned char unused; 1906 unsigned char eflags; /* flags field for use by backends */
1801#if EV_USE_EPOLL 1907#if EV_USE_EPOLL
1802 unsigned int egen; /* generation counter to counter epoll bugs */ 1908 unsigned int egen; /* generation counter to counter epoll bugs */
1803#endif 1909#endif
1804#if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP 1910#if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP
1805 SOCKET handle; 1911 SOCKET handle;
1869 static int ev_default_loop_ptr; 1975 static int ev_default_loop_ptr;
1870 1976
1871#endif 1977#endif
1872 1978
1873#if EV_FEATURE_API 1979#if EV_FEATURE_API
1874# define EV_RELEASE_CB if (expect_false (release_cb)) release_cb (EV_A) 1980# define EV_RELEASE_CB if (ecb_expect_false (release_cb)) release_cb (EV_A)
1875# define EV_ACQUIRE_CB if (expect_false (acquire_cb)) acquire_cb (EV_A) 1981# define EV_ACQUIRE_CB if (ecb_expect_false (acquire_cb)) acquire_cb (EV_A)
1876# define EV_INVOKE_PENDING invoke_cb (EV_A) 1982# define EV_INVOKE_PENDING invoke_cb (EV_A)
1877#else 1983#else
1878# define EV_RELEASE_CB (void)0 1984# define EV_RELEASE_CB (void)0
1879# define EV_ACQUIRE_CB (void)0 1985# define EV_ACQUIRE_CB (void)0
1880# define EV_INVOKE_PENDING ev_invoke_pending (EV_A) 1986# define EV_INVOKE_PENDING ev_invoke_pending (EV_A)
1887#ifndef EV_HAVE_EV_TIME 1993#ifndef EV_HAVE_EV_TIME
1888ev_tstamp 1994ev_tstamp
1889ev_time (void) EV_NOEXCEPT 1995ev_time (void) EV_NOEXCEPT
1890{ 1996{
1891#if EV_USE_REALTIME 1997#if EV_USE_REALTIME
1892 if (expect_true (have_realtime)) 1998 if (ecb_expect_true (have_realtime))
1893 { 1999 {
1894 struct timespec ts; 2000 struct timespec ts;
1895 clock_gettime (CLOCK_REALTIME, &ts); 2001 clock_gettime (CLOCK_REALTIME, &ts);
1896 return ts.tv_sec + ts.tv_nsec * 1e-9; 2002 return EV_TS_GET (ts);
1897 } 2003 }
1898#endif 2004#endif
1899 2005
1900 struct timeval tv; 2006 struct timeval tv;
1901 gettimeofday (&tv, 0); 2007 gettimeofday (&tv, 0);
1902 return tv.tv_sec + tv.tv_usec * 1e-6; 2008 return EV_TV_GET (tv);
1903} 2009}
1904#endif 2010#endif
1905 2011
1906inline_size ev_tstamp 2012inline_size ev_tstamp
1907get_clock (void) 2013get_clock (void)
1908{ 2014{
1909#if EV_USE_MONOTONIC 2015#if EV_USE_MONOTONIC
1910 if (expect_true (have_monotonic)) 2016 if (ecb_expect_true (have_monotonic))
1911 { 2017 {
1912 struct timespec ts; 2018 struct timespec ts;
1913 clock_gettime (CLOCK_MONOTONIC, &ts); 2019 clock_gettime (CLOCK_MONOTONIC, &ts);
1914 return ts.tv_sec + ts.tv_nsec * 1e-9; 2020 return EV_TS_GET (ts);
1915 } 2021 }
1916#endif 2022#endif
1917 2023
1918 return ev_time (); 2024 return ev_time ();
1919} 2025}
1937 EV_TS_SET (ts, delay); 2043 EV_TS_SET (ts, delay);
1938 nanosleep (&ts, 0); 2044 nanosleep (&ts, 0);
1939#elif defined _WIN32 2045#elif defined _WIN32
1940 /* maybe this should round up, as ms is very low resolution */ 2046 /* maybe this should round up, as ms is very low resolution */
1941 /* compared to select (µs) or nanosleep (ns) */ 2047 /* compared to select (µs) or nanosleep (ns) */
1942 Sleep ((unsigned long)(delay * 1e3)); 2048 Sleep ((unsigned long)(EV_TS_TO_MS (delay)));
1943#else 2049#else
1944 struct timeval tv; 2050 struct timeval tv;
1945 2051
1946 /* here we rely on sys/time.h + sys/types.h + unistd.h providing select */ 2052 /* here we rely on sys/time.h + sys/types.h + unistd.h providing select */
1947 /* something not guaranteed by newer posix versions, but guaranteed */ 2053 /* something not guaranteed by newer posix versions, but guaranteed */
1977 } 2083 }
1978 2084
1979 return ncur; 2085 return ncur;
1980} 2086}
1981 2087
1982noinline ecb_cold 2088ecb_noinline ecb_cold
1983static void * 2089static void *
1984array_realloc (int elem, void *base, int *cur, int cnt) 2090array_realloc (int elem, void *base, int *cur, int cnt)
1985{ 2091{
1986 *cur = array_nextsize (elem, *cur, cnt); 2092 *cur = array_nextsize (elem, *cur, cnt);
1987 return ev_realloc (base, elem * *cur); 2093 return ev_realloc (base, elem * *cur);
1988} 2094}
1989 2095
1990#define array_needsize_noinit(base,count) 2096#define array_needsize_noinit(base,offset,count)
1991 2097
1992#define array_needsize_zerofill(base,count) \ 2098#define array_needsize_zerofill(base,offset,count) \
1993 memset ((void *)(base), 0, sizeof (*(base)) * (count)) 2099 memset ((void *)(base + offset), 0, sizeof (*(base)) * (count))
1994 2100
1995#define array_needsize(type,base,cur,cnt,init) \ 2101#define array_needsize(type,base,cur,cnt,init) \
1996 if (expect_false ((cnt) > (cur))) \ 2102 if (ecb_expect_false ((cnt) > (cur))) \
1997 { \ 2103 { \
1998 ecb_unused int ocur_ = (cur); \ 2104 ecb_unused int ocur_ = (cur); \
1999 (base) = (type *)array_realloc \ 2105 (base) = (type *)array_realloc \
2000 (sizeof (type), (base), &(cur), (cnt)); \ 2106 (sizeof (type), (base), &(cur), (cnt)); \
2001 init ((base) + (ocur_), (cur) - ocur_); \ 2107 init ((base), ocur_, ((cur) - ocur_)); \
2002 } 2108 }
2003 2109
2004#if 0 2110#if 0
2005#define array_slim(type,stem) \ 2111#define array_slim(type,stem) \
2006 if (stem ## max < array_roundsize (stem ## cnt >> 2)) \ 2112 if (stem ## max < array_roundsize (stem ## cnt >> 2)) \
2015 ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0; stem ## s idx = 0 2121 ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0; stem ## s idx = 0
2016 2122
2017/*****************************************************************************/ 2123/*****************************************************************************/
2018 2124
2019/* dummy callback for pending events */ 2125/* dummy callback for pending events */
2020noinline 2126ecb_noinline
2021static void 2127static void
2022pendingcb (EV_P_ ev_prepare *w, int revents) 2128pendingcb (EV_P_ ev_prepare *w, int revents)
2023{ 2129{
2024} 2130}
2025 2131
2026noinline 2132ecb_noinline
2027void 2133void
2028ev_feed_event (EV_P_ void *w, int revents) EV_NOEXCEPT 2134ev_feed_event (EV_P_ void *w, int revents) EV_NOEXCEPT
2029{ 2135{
2030 W w_ = (W)w; 2136 W w_ = (W)w;
2031 int pri = ABSPRI (w_); 2137 int pri = ABSPRI (w_);
2032 2138
2033 if (expect_false (w_->pending)) 2139 if (ecb_expect_false (w_->pending))
2034 pendings [pri][w_->pending - 1].events |= revents; 2140 pendings [pri][w_->pending - 1].events |= revents;
2035 else 2141 else
2036 { 2142 {
2037 w_->pending = ++pendingcnt [pri]; 2143 w_->pending = ++pendingcnt [pri];
2038 array_needsize (ANPENDING, pendings [pri], pendingmax [pri], w_->pending, array_needsize_noinit); 2144 array_needsize (ANPENDING, pendings [pri], pendingmax [pri], w_->pending, array_needsize_noinit);
2089inline_speed void 2195inline_speed void
2090fd_event (EV_P_ int fd, int revents) 2196fd_event (EV_P_ int fd, int revents)
2091{ 2197{
2092 ANFD *anfd = anfds + fd; 2198 ANFD *anfd = anfds + fd;
2093 2199
2094 if (expect_true (!anfd->reify)) 2200 if (ecb_expect_true (!anfd->reify))
2095 fd_event_nocheck (EV_A_ fd, revents); 2201 fd_event_nocheck (EV_A_ fd, revents);
2096} 2202}
2097 2203
2098void 2204void
2099ev_feed_fd_event (EV_P_ int fd, int revents) EV_NOEXCEPT 2205ev_feed_fd_event (EV_P_ int fd, int revents) EV_NOEXCEPT
2141 ev_io *w; 2247 ev_io *w;
2142 2248
2143 unsigned char o_events = anfd->events; 2249 unsigned char o_events = anfd->events;
2144 unsigned char o_reify = anfd->reify; 2250 unsigned char o_reify = anfd->reify;
2145 2251
2146 anfd->reify = 0; 2252 anfd->reify = 0;
2147 2253
2148 /*if (expect_true (o_reify & EV_ANFD_REIFY)) probably a deoptimisation */ 2254 /*if (ecb_expect_true (o_reify & EV_ANFD_REIFY)) probably a deoptimisation */
2149 { 2255 {
2150 anfd->events = 0; 2256 anfd->events = 0;
2151 2257
2152 for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next) 2258 for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next)
2153 anfd->events |= (unsigned char)w->events; 2259 anfd->events |= (unsigned char)w->events;
2169fd_change (EV_P_ int fd, int flags) 2275fd_change (EV_P_ int fd, int flags)
2170{ 2276{
2171 unsigned char reify = anfds [fd].reify; 2277 unsigned char reify = anfds [fd].reify;
2172 anfds [fd].reify |= flags; 2278 anfds [fd].reify |= flags;
2173 2279
2174 if (expect_true (!reify)) 2280 if (ecb_expect_true (!reify))
2175 { 2281 {
2176 ++fdchangecnt; 2282 ++fdchangecnt;
2177 array_needsize (int, fdchanges, fdchangemax, fdchangecnt, array_needsize_noinit); 2283 array_needsize (int, fdchanges, fdchangemax, fdchangecnt, array_needsize_noinit);
2178 fdchanges [fdchangecnt - 1] = fd; 2284 fdchanges [fdchangecnt - 1] = fd;
2179 } 2285 }
2202 return fcntl (fd, F_GETFD) != -1; 2308 return fcntl (fd, F_GETFD) != -1;
2203#endif 2309#endif
2204} 2310}
2205 2311
2206/* called on EBADF to verify fds */ 2312/* called on EBADF to verify fds */
2207noinline ecb_cold 2313ecb_noinline ecb_cold
2208static void 2314static void
2209fd_ebadf (EV_P) 2315fd_ebadf (EV_P)
2210{ 2316{
2211 int fd; 2317 int fd;
2212 2318
2215 if (!fd_valid (fd) && errno == EBADF) 2321 if (!fd_valid (fd) && errno == EBADF)
2216 fd_kill (EV_A_ fd); 2322 fd_kill (EV_A_ fd);
2217} 2323}
2218 2324
2219/* called on ENOMEM in select/poll to kill some fds and retry */ 2325/* called on ENOMEM in select/poll to kill some fds and retry */
2220noinline ecb_cold 2326ecb_noinline ecb_cold
2221static void 2327static void
2222fd_enomem (EV_P) 2328fd_enomem (EV_P)
2223{ 2329{
2224 int fd; 2330 int fd;
2225 2331
2230 break; 2336 break;
2231 } 2337 }
2232} 2338}
2233 2339
2234/* usually called after fork if backend needs to re-arm all fds from scratch */ 2340/* usually called after fork if backend needs to re-arm all fds from scratch */
2235noinline 2341ecb_noinline
2236static void 2342static void
2237fd_rearm_all (EV_P) 2343fd_rearm_all (EV_P)
2238{ 2344{
2239 int fd; 2345 int fd;
2240 2346
2294 ev_tstamp minat; 2400 ev_tstamp minat;
2295 ANHE *minpos; 2401 ANHE *minpos;
2296 ANHE *pos = heap + DHEAP * (k - HEAP0) + HEAP0 + 1; 2402 ANHE *pos = heap + DHEAP * (k - HEAP0) + HEAP0 + 1;
2297 2403
2298 /* find minimum child */ 2404 /* find minimum child */
2299 if (expect_true (pos + DHEAP - 1 < E)) 2405 if (ecb_expect_true (pos + DHEAP - 1 < E))
2300 { 2406 {
2301 /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos)); 2407 /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
2302 if ( ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos)); 2408 if ( minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));
2303 if ( ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos)); 2409 if ( minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));
2304 if ( ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos)); 2410 if ( minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));
2305 } 2411 }
2306 else if (pos < E) 2412 else if (pos < E)
2307 { 2413 {
2308 /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos)); 2414 /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
2309 if (pos + 1 < E && ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos)); 2415 if (pos + 1 < E && minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));
2310 if (pos + 2 < E && ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos)); 2416 if (pos + 2 < E && minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));
2311 if (pos + 3 < E && ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos)); 2417 if (pos + 3 < E && minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));
2312 } 2418 }
2313 else 2419 else
2314 break; 2420 break;
2315 2421
2316 if (ANHE_at (he) <= minat) 2422 if (ANHE_at (he) <= minat)
2324 2430
2325 heap [k] = he; 2431 heap [k] = he;
2326 ev_active (ANHE_w (he)) = k; 2432 ev_active (ANHE_w (he)) = k;
2327} 2433}
2328 2434
2329#else /* 4HEAP */ 2435#else /* not 4HEAP */
2330 2436
2331#define HEAP0 1 2437#define HEAP0 1
2332#define HPARENT(k) ((k) >> 1) 2438#define HPARENT(k) ((k) >> 1)
2333#define UPHEAP_DONE(p,k) (!(p)) 2439#define UPHEAP_DONE(p,k) (!(p))
2334 2440
2422 2528
2423/*****************************************************************************/ 2529/*****************************************************************************/
2424 2530
2425#if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE 2531#if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
2426 2532
2427noinline ecb_cold 2533ecb_noinline ecb_cold
2428static void 2534static void
2429evpipe_init (EV_P) 2535evpipe_init (EV_P)
2430{ 2536{
2431 if (!ev_is_active (&pipe_w)) 2537 if (!ev_is_active (&pipe_w))
2432 { 2538 {
2473inline_speed void 2579inline_speed void
2474evpipe_write (EV_P_ EV_ATOMIC_T *flag) 2580evpipe_write (EV_P_ EV_ATOMIC_T *flag)
2475{ 2581{
2476 ECB_MEMORY_FENCE; /* push out the write before this function was called, acquire flag */ 2582 ECB_MEMORY_FENCE; /* push out the write before this function was called, acquire flag */
2477 2583
2478 if (expect_true (*flag)) 2584 if (ecb_expect_true (*flag))
2479 return; 2585 return;
2480 2586
2481 *flag = 1; 2587 *flag = 1;
2482 ECB_MEMORY_FENCE_RELEASE; /* make sure flag is visible before the wakeup */ 2588 ECB_MEMORY_FENCE_RELEASE; /* make sure flag is visible before the wakeup */
2483 2589
2560 sig_pending = 0; 2666 sig_pending = 0;
2561 2667
2562 ECB_MEMORY_FENCE; 2668 ECB_MEMORY_FENCE;
2563 2669
2564 for (i = EV_NSIG - 1; i--; ) 2670 for (i = EV_NSIG - 1; i--; )
2565 if (expect_false (signals [i].pending)) 2671 if (ecb_expect_false (signals [i].pending))
2566 ev_feed_signal_event (EV_A_ i + 1); 2672 ev_feed_signal_event (EV_A_ i + 1);
2567 } 2673 }
2568#endif 2674#endif
2569 2675
2570#if EV_ASYNC_ENABLE 2676#if EV_ASYNC_ENABLE
2611#endif 2717#endif
2612 2718
2613 ev_feed_signal (signum); 2719 ev_feed_signal (signum);
2614} 2720}
2615 2721
2616noinline 2722ecb_noinline
2617void 2723void
2618ev_feed_signal_event (EV_P_ int signum) EV_NOEXCEPT 2724ev_feed_signal_event (EV_P_ int signum) EV_NOEXCEPT
2619{ 2725{
2620 WL w; 2726 WL w;
2621 2727
2622 if (expect_false (signum <= 0 || signum >= EV_NSIG)) 2728 if (ecb_expect_false (signum <= 0 || signum >= EV_NSIG))
2623 return; 2729 return;
2624 2730
2625 --signum; 2731 --signum;
2626 2732
2627#if EV_MULTIPLICITY 2733#if EV_MULTIPLICITY
2628 /* it is permissible to try to feed a signal to the wrong loop */ 2734 /* it is permissible to try to feed a signal to the wrong loop */
2629 /* or, likely more useful, feeding a signal nobody is waiting for */ 2735 /* or, likely more useful, feeding a signal nobody is waiting for */
2630 2736
2631 if (expect_false (signals [signum].loop != EV_A)) 2737 if (ecb_expect_false (signals [signum].loop != EV_A))
2632 return; 2738 return;
2633#endif 2739#endif
2634 2740
2635 signals [signum].pending = 0; 2741 signals [signum].pending = 0;
2636 ECB_MEMORY_FENCE_RELEASE; 2742 ECB_MEMORY_FENCE_RELEASE;
2729# include "ev_port.c" 2835# include "ev_port.c"
2730#endif 2836#endif
2731#if EV_USE_KQUEUE 2837#if EV_USE_KQUEUE
2732# include "ev_kqueue.c" 2838# include "ev_kqueue.c"
2733#endif 2839#endif
2840#if EV_USE_EPOLL
2841# include "ev_epoll.c"
2842#endif
2734#if EV_USE_LINUXAIO 2843#if EV_USE_LINUXAIO
2735# include "ev_linuxaio.c" 2844# include "ev_linuxaio.c"
2736#endif 2845#endif
2737#if EV_USE_EPOLL 2846#if EV_USE_IOURING
2738# include "ev_epoll.c" 2847# include "ev_iouring.c"
2739#endif 2848#endif
2740#if EV_USE_POLL 2849#if EV_USE_POLL
2741# include "ev_poll.c" 2850# include "ev_poll.c"
2742#endif 2851#endif
2743#if EV_USE_SELECT 2852#if EV_USE_SELECT
2776 2885
2777 if (EV_USE_PORT ) flags |= EVBACKEND_PORT; 2886 if (EV_USE_PORT ) flags |= EVBACKEND_PORT;
2778 if (EV_USE_KQUEUE ) flags |= EVBACKEND_KQUEUE; 2887 if (EV_USE_KQUEUE ) flags |= EVBACKEND_KQUEUE;
2779 if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL; 2888 if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL;
2780 if (EV_USE_LINUXAIO) flags |= EVBACKEND_LINUXAIO; 2889 if (EV_USE_LINUXAIO) flags |= EVBACKEND_LINUXAIO;
2890 if (EV_USE_IOURING ) flags |= EVBACKEND_IOURING;
2781 if (EV_USE_POLL ) flags |= EVBACKEND_POLL; 2891 if (EV_USE_POLL ) flags |= EVBACKEND_POLL;
2782 if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT; 2892 if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT;
2783 2893
2784 return flags; 2894 return flags;
2785} 2895}
2803#ifdef __FreeBSD__ 2913#ifdef __FreeBSD__
2804 flags &= ~EVBACKEND_POLL; /* poll return value is unusable (http://forums.freebsd.org/archive/index.php/t-10270.html) */ 2914 flags &= ~EVBACKEND_POLL; /* poll return value is unusable (http://forums.freebsd.org/archive/index.php/t-10270.html) */
2805#endif 2915#endif
2806 2916
2807 /* TODO: linuxaio is very experimental */ 2917 /* TODO: linuxaio is very experimental */
2918#if !EV_RECOMMEND_LINUXAIO
2808 flags &= ~EVBACKEND_LINUXAIO; 2919 flags &= ~EVBACKEND_LINUXAIO;
2920#endif
2921 /* TODO: linuxaio is super experimental */
2922#if !EV_RECOMMEND_IOURING
2923 flags &= ~EVBACKEND_IOURING;
2924#endif
2809 2925
2810 return flags; 2926 return flags;
2811} 2927}
2812 2928
2813ecb_cold 2929ecb_cold
2818 2934
2819 /* epoll embeddability broken on all linux versions up to at least 2.6.23 */ 2935 /* epoll embeddability broken on all linux versions up to at least 2.6.23 */
2820 if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */ 2936 if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */
2821 flags &= ~EVBACKEND_EPOLL; 2937 flags &= ~EVBACKEND_EPOLL;
2822 2938
2939 /* EVBACKEND_LINUXAIO is theoretically embeddable, but suffers from a performance overhead */
2940
2941 /* EVBACKEND_IOURING is practically embeddable, but the current implementation is not
2942 * because our backend_fd is the epoll fd we need as fallback.
2943 * if the kernel ever is fixed, this might change...
2944 */
2945
2823 return flags; 2946 return flags;
2824} 2947}
2825 2948
2826unsigned int 2949unsigned int
2827ev_backend (EV_P) EV_NOEXCEPT 2950ev_backend (EV_P) EV_NOEXCEPT
2879 acquire_cb = acquire; 3002 acquire_cb = acquire;
2880} 3003}
2881#endif 3004#endif
2882 3005
2883/* initialise a loop structure, must be zero-initialised */ 3006/* initialise a loop structure, must be zero-initialised */
2884noinline ecb_cold 3007ecb_noinline ecb_cold
2885static void 3008static void
2886loop_init (EV_P_ unsigned int flags) EV_NOEXCEPT 3009loop_init (EV_P_ unsigned int flags) EV_NOEXCEPT
2887{ 3010{
2888 if (!backend) 3011 if (!backend)
2889 { 3012 {
2957 if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags); 3080 if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags);
2958#endif 3081#endif
2959#if EV_USE_KQUEUE 3082#if EV_USE_KQUEUE
2960 if (!backend && (flags & EVBACKEND_KQUEUE )) backend = kqueue_init (EV_A_ flags); 3083 if (!backend && (flags & EVBACKEND_KQUEUE )) backend = kqueue_init (EV_A_ flags);
2961#endif 3084#endif
3085#if EV_USE_IOURING
3086 if (!backend && (flags & EVBACKEND_IOURING )) backend = iouring_init (EV_A_ flags);
3087#endif
2962#if EV_USE_LINUXAIO 3088#if EV_USE_LINUXAIO
2963 if (!backend && (flags & EVBACKEND_LINUXAIO)) backend = linuxaio_init (EV_A_ flags); 3089 if (!backend && (flags & EVBACKEND_LINUXAIO)) backend = linuxaio_init (EV_A_ flags);
2964#endif 3090#endif
2965#if EV_USE_EPOLL 3091#if EV_USE_EPOLL
2966 if (!backend && (flags & EVBACKEND_EPOLL )) backend = epoll_init (EV_A_ flags); 3092 if (!backend && (flags & EVBACKEND_EPOLL )) backend = epoll_init (EV_A_ flags);
2994 return; 3120 return;
2995#endif 3121#endif
2996 3122
2997#if EV_CLEANUP_ENABLE 3123#if EV_CLEANUP_ENABLE
2998 /* queue cleanup watchers (and execute them) */ 3124 /* queue cleanup watchers (and execute them) */
2999 if (expect_false (cleanupcnt)) 3125 if (ecb_expect_false (cleanupcnt))
3000 { 3126 {
3001 queue_events (EV_A_ (W *)cleanups, cleanupcnt, EV_CLEANUP); 3127 queue_events (EV_A_ (W *)cleanups, cleanupcnt, EV_CLEANUP);
3002 EV_INVOKE_PENDING; 3128 EV_INVOKE_PENDING;
3003 } 3129 }
3004#endif 3130#endif
3039#if EV_USE_PORT 3165#if EV_USE_PORT
3040 if (backend == EVBACKEND_PORT ) port_destroy (EV_A); 3166 if (backend == EVBACKEND_PORT ) port_destroy (EV_A);
3041#endif 3167#endif
3042#if EV_USE_KQUEUE 3168#if EV_USE_KQUEUE
3043 if (backend == EVBACKEND_KQUEUE ) kqueue_destroy (EV_A); 3169 if (backend == EVBACKEND_KQUEUE ) kqueue_destroy (EV_A);
3170#endif
3171#if EV_USE_IOURING
3172 if (backend == EVBACKEND_IOURING ) iouring_destroy (EV_A);
3044#endif 3173#endif
3045#if EV_USE_LINUXAIO 3174#if EV_USE_LINUXAIO
3046 if (backend == EVBACKEND_LINUXAIO) linuxaio_destroy (EV_A); 3175 if (backend == EVBACKEND_LINUXAIO) linuxaio_destroy (EV_A);
3047#endif 3176#endif
3048#if EV_USE_EPOLL 3177#if EV_USE_EPOLL
3107 if (backend == EVBACKEND_PORT ) port_fork (EV_A); 3236 if (backend == EVBACKEND_PORT ) port_fork (EV_A);
3108#endif 3237#endif
3109#if EV_USE_KQUEUE 3238#if EV_USE_KQUEUE
3110 if (backend == EVBACKEND_KQUEUE ) kqueue_fork (EV_A); 3239 if (backend == EVBACKEND_KQUEUE ) kqueue_fork (EV_A);
3111#endif 3240#endif
3241#if EV_USE_IOURING
3242 if (backend == EVBACKEND_IOURING ) iouring_fork (EV_A);
3243#endif
3112#if EV_USE_LINUXAIO 3244#if EV_USE_LINUXAIO
3113 if (backend == EVBACKEND_LINUXAIO) linuxaio_fork (EV_A); 3245 if (backend == EVBACKEND_LINUXAIO) linuxaio_fork (EV_A);
3114#endif 3246#endif
3115#if EV_USE_EPOLL 3247#if EV_USE_EPOLL
3116 if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A); 3248 if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A);
3158} 3290}
3159 3291
3160#endif /* multiplicity */ 3292#endif /* multiplicity */
3161 3293
3162#if EV_VERIFY 3294#if EV_VERIFY
3163noinline ecb_cold 3295ecb_noinline ecb_cold
3164static void 3296static void
3165verify_watcher (EV_P_ W w) 3297verify_watcher (EV_P_ W w)
3166{ 3298{
3167 assert (("libev: watcher has invalid priority", ABSPRI (w) >= 0 && ABSPRI (w) < NUMPRI)); 3299 assert (("libev: watcher has invalid priority", ABSPRI (w) >= 0 && ABSPRI (w) < NUMPRI));
3168 3300
3169 if (w->pending) 3301 if (w->pending)
3170 assert (("libev: pending watcher not on pending queue", pendings [ABSPRI (w)][w->pending - 1].w == w)); 3302 assert (("libev: pending watcher not on pending queue", pendings [ABSPRI (w)][w->pending - 1].w == w));
3171} 3303}
3172 3304
3173noinline ecb_cold 3305ecb_noinline ecb_cold
3174static void 3306static void
3175verify_heap (EV_P_ ANHE *heap, int N) 3307verify_heap (EV_P_ ANHE *heap, int N)
3176{ 3308{
3177 int i; 3309 int i;
3178 3310
3184 3316
3185 verify_watcher (EV_A_ (W)ANHE_w (heap [i])); 3317 verify_watcher (EV_A_ (W)ANHE_w (heap [i]));
3186 } 3318 }
3187} 3319}
3188 3320
3189noinline ecb_cold 3321ecb_noinline ecb_cold
3190static void 3322static void
3191array_verify (EV_P_ W *ws, int cnt) 3323array_verify (EV_P_ W *ws, int cnt)
3192{ 3324{
3193 while (cnt--) 3325 while (cnt--)
3194 { 3326 {
3343 count += pendingcnt [pri]; 3475 count += pendingcnt [pri];
3344 3476
3345 return count; 3477 return count;
3346} 3478}
3347 3479
3348noinline 3480ecb_noinline
3349void 3481void
3350ev_invoke_pending (EV_P) 3482ev_invoke_pending (EV_P)
3351{ 3483{
3352 pendingpri = NUMPRI; 3484 pendingpri = NUMPRI;
3353 3485
3372/* make idle watchers pending. this handles the "call-idle */ 3504/* make idle watchers pending. this handles the "call-idle */
3373/* only when higher priorities are idle" logic */ 3505/* only when higher priorities are idle" logic */
3374inline_size void 3506inline_size void
3375idle_reify (EV_P) 3507idle_reify (EV_P)
3376{ 3508{
3377 if (expect_false (idleall)) 3509 if (ecb_expect_false (idleall))
3378 { 3510 {
3379 int pri; 3511 int pri;
3380 3512
3381 for (pri = NUMPRI; pri--; ) 3513 for (pri = NUMPRI; pri--; )
3382 { 3514 {
3431 } 3563 }
3432} 3564}
3433 3565
3434#if EV_PERIODIC_ENABLE 3566#if EV_PERIODIC_ENABLE
3435 3567
3436noinline 3568ecb_noinline
3437static void 3569static void
3438periodic_recalc (EV_P_ ev_periodic *w) 3570periodic_recalc (EV_P_ ev_periodic *w)
3439{ 3571{
3440 ev_tstamp interval = w->interval > MIN_INTERVAL ? w->interval : MIN_INTERVAL; 3572 ev_tstamp interval = w->interval > MIN_INTERVAL ? w->interval : MIN_INTERVAL;
3441 ev_tstamp at = w->offset + interval * ev_floor ((ev_rt_now - w->offset) / interval); 3573 ev_tstamp at = w->offset + interval * ev_floor ((ev_rt_now - w->offset) / interval);
3444 while (at <= ev_rt_now) 3576 while (at <= ev_rt_now)
3445 { 3577 {
3446 ev_tstamp nat = at + w->interval; 3578 ev_tstamp nat = at + w->interval;
3447 3579
3448 /* when resolution fails us, we use ev_rt_now */ 3580 /* when resolution fails us, we use ev_rt_now */
3449 if (expect_false (nat == at)) 3581 if (ecb_expect_false (nat == at))
3450 { 3582 {
3451 at = ev_rt_now; 3583 at = ev_rt_now;
3452 break; 3584 break;
3453 } 3585 }
3454 3586
3500 } 3632 }
3501} 3633}
3502 3634
3503/* simply recalculate all periodics */ 3635/* simply recalculate all periodics */
3504/* TODO: maybe ensure that at least one event happens when jumping forward? */ 3636/* TODO: maybe ensure that at least one event happens when jumping forward? */
3505noinline ecb_cold 3637ecb_noinline ecb_cold
3506static void 3638static void
3507periodics_reschedule (EV_P) 3639periodics_reschedule (EV_P)
3508{ 3640{
3509 int i; 3641 int i;
3510 3642
3524 reheap (periodics, periodiccnt); 3656 reheap (periodics, periodiccnt);
3525} 3657}
3526#endif 3658#endif
3527 3659
3528/* adjust all timers by a given offset */ 3660/* adjust all timers by a given offset */
3529noinline ecb_cold 3661ecb_noinline ecb_cold
3530static void 3662static void
3531timers_reschedule (EV_P_ ev_tstamp adjust) 3663timers_reschedule (EV_P_ ev_tstamp adjust)
3532{ 3664{
3533 int i; 3665 int i;
3534 3666
3544/* also detect if there was a timejump, and act accordingly */ 3676/* also detect if there was a timejump, and act accordingly */
3545inline_speed void 3677inline_speed void
3546time_update (EV_P_ ev_tstamp max_block) 3678time_update (EV_P_ ev_tstamp max_block)
3547{ 3679{
3548#if EV_USE_MONOTONIC 3680#if EV_USE_MONOTONIC
3549 if (expect_true (have_monotonic)) 3681 if (ecb_expect_true (have_monotonic))
3550 { 3682 {
3551 int i; 3683 int i;
3552 ev_tstamp odiff = rtmn_diff; 3684 ev_tstamp odiff = rtmn_diff;
3553 3685
3554 mn_now = get_clock (); 3686 mn_now = get_clock ();
3555 3687
3556 /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */ 3688 /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */
3557 /* interpolate in the meantime */ 3689 /* interpolate in the meantime */
3558 if (expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5)) 3690 if (ecb_expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5))
3559 { 3691 {
3560 ev_rt_now = rtmn_diff + mn_now; 3692 ev_rt_now = rtmn_diff + mn_now;
3561 return; 3693 return;
3562 } 3694 }
3563 3695
3577 ev_tstamp diff; 3709 ev_tstamp diff;
3578 rtmn_diff = ev_rt_now - mn_now; 3710 rtmn_diff = ev_rt_now - mn_now;
3579 3711
3580 diff = odiff - rtmn_diff; 3712 diff = odiff - rtmn_diff;
3581 3713
3582 if (expect_true ((diff < 0. ? -diff : diff) < MIN_TIMEJUMP)) 3714 if (ecb_expect_true ((diff < 0. ? -diff : diff) < MIN_TIMEJUMP))
3583 return; /* all is well */ 3715 return; /* all is well */
3584 3716
3585 ev_rt_now = ev_time (); 3717 ev_rt_now = ev_time ();
3586 mn_now = get_clock (); 3718 mn_now = get_clock ();
3587 now_floor = mn_now; 3719 now_floor = mn_now;
3596 else 3728 else
3597#endif 3729#endif
3598 { 3730 {
3599 ev_rt_now = ev_time (); 3731 ev_rt_now = ev_time ();
3600 3732
3601 if (expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + MIN_TIMEJUMP)) 3733 if (ecb_expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + MIN_TIMEJUMP))
3602 { 3734 {
3603 /* adjust timers. this is easy, as the offset is the same for all of them */ 3735 /* adjust timers. this is easy, as the offset is the same for all of them */
3604 timers_reschedule (EV_A_ ev_rt_now - mn_now); 3736 timers_reschedule (EV_A_ ev_rt_now - mn_now);
3605#if EV_PERIODIC_ENABLE 3737#if EV_PERIODIC_ENABLE
3606 periodics_reschedule (EV_A); 3738 periodics_reschedule (EV_A);
3629#if EV_VERIFY >= 2 3761#if EV_VERIFY >= 2
3630 ev_verify (EV_A); 3762 ev_verify (EV_A);
3631#endif 3763#endif
3632 3764
3633#ifndef _WIN32 3765#ifndef _WIN32
3634 if (expect_false (curpid)) /* penalise the forking check even more */ 3766 if (ecb_expect_false (curpid)) /* penalise the forking check even more */
3635 if (expect_false (getpid () != curpid)) 3767 if (ecb_expect_false (getpid () != curpid))
3636 { 3768 {
3637 curpid = getpid (); 3769 curpid = getpid ();
3638 postfork = 1; 3770 postfork = 1;
3639 } 3771 }
3640#endif 3772#endif
3641 3773
3642#if EV_FORK_ENABLE 3774#if EV_FORK_ENABLE
3643 /* we might have forked, so queue fork handlers */ 3775 /* we might have forked, so queue fork handlers */
3644 if (expect_false (postfork)) 3776 if (ecb_expect_false (postfork))
3645 if (forkcnt) 3777 if (forkcnt)
3646 { 3778 {
3647 queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK); 3779 queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK);
3648 EV_INVOKE_PENDING; 3780 EV_INVOKE_PENDING;
3649 } 3781 }
3650#endif 3782#endif
3651 3783
3652#if EV_PREPARE_ENABLE 3784#if EV_PREPARE_ENABLE
3653 /* queue prepare watchers (and execute them) */ 3785 /* queue prepare watchers (and execute them) */
3654 if (expect_false (preparecnt)) 3786 if (ecb_expect_false (preparecnt))
3655 { 3787 {
3656 queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE); 3788 queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE);
3657 EV_INVOKE_PENDING; 3789 EV_INVOKE_PENDING;
3658 } 3790 }
3659#endif 3791#endif
3660 3792
3661 if (expect_false (loop_done)) 3793 if (ecb_expect_false (loop_done))
3662 break; 3794 break;
3663 3795
3664 /* we might have forked, so reify kernel state if necessary */ 3796 /* we might have forked, so reify kernel state if necessary */
3665 if (expect_false (postfork)) 3797 if (ecb_expect_false (postfork))
3666 loop_fork (EV_A); 3798 loop_fork (EV_A);
3667 3799
3668 /* update fd-related kernel structures */ 3800 /* update fd-related kernel structures */
3669 fd_reify (EV_A); 3801 fd_reify (EV_A);
3670 3802
3682 /* from now on, we want a pipe-wake-up */ 3814 /* from now on, we want a pipe-wake-up */
3683 pipe_write_wanted = 1; 3815 pipe_write_wanted = 1;
3684 3816
3685 ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */ 3817 ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */
3686 3818
3687 if (expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped))) 3819 if (ecb_expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped)))
3688 { 3820 {
3689 waittime = MAX_BLOCKTIME; 3821 waittime = MAX_BLOCKTIME;
3690 3822
3691 if (timercnt) 3823 if (timercnt)
3692 { 3824 {
3701 if (waittime > to) waittime = to; 3833 if (waittime > to) waittime = to;
3702 } 3834 }
3703#endif 3835#endif
3704 3836
3705 /* don't let timeouts decrease the waittime below timeout_blocktime */ 3837 /* don't let timeouts decrease the waittime below timeout_blocktime */
3706 if (expect_false (waittime < timeout_blocktime)) 3838 if (ecb_expect_false (waittime < timeout_blocktime))
3707 waittime = timeout_blocktime; 3839 waittime = timeout_blocktime;
3708 3840
3709 /* at this point, we NEED to wait, so we have to ensure */ 3841 /* at this point, we NEED to wait, so we have to ensure */
3710 /* to pass a minimum nonzero value to the backend */ 3842 /* to pass a minimum nonzero value to the backend */
3711 if (expect_false (waittime < backend_mintime)) 3843 if (ecb_expect_false (waittime < backend_mintime))
3712 waittime = backend_mintime; 3844 waittime = backend_mintime;
3713 3845
3714 /* extra check because io_blocktime is commonly 0 */ 3846 /* extra check because io_blocktime is commonly 0 */
3715 if (expect_false (io_blocktime)) 3847 if (ecb_expect_false (io_blocktime))
3716 { 3848 {
3717 sleeptime = io_blocktime - (mn_now - prev_mn_now); 3849 sleeptime = io_blocktime - (mn_now - prev_mn_now);
3718 3850
3719 if (sleeptime > waittime - backend_mintime) 3851 if (sleeptime > waittime - backend_mintime)
3720 sleeptime = waittime - backend_mintime; 3852 sleeptime = waittime - backend_mintime;
3721 3853
3722 if (expect_true (sleeptime > 0.)) 3854 if (ecb_expect_true (sleeptime > 0.))
3723 { 3855 {
3724 ev_sleep (sleeptime); 3856 ev_sleep (sleeptime);
3725 waittime -= sleeptime; 3857 waittime -= sleeptime;
3726 } 3858 }
3727 } 3859 }
3741 { 3873 {
3742 assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w))); 3874 assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w)));
3743 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM); 3875 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
3744 } 3876 }
3745 3877
3746
3747 /* update ev_rt_now, do magic */ 3878 /* update ev_rt_now, do magic */
3748 time_update (EV_A_ waittime + sleeptime); 3879 time_update (EV_A_ waittime + sleeptime);
3749 } 3880 }
3750 3881
3751 /* queue pending timers and reschedule them */ 3882 /* queue pending timers and reschedule them */
3759 idle_reify (EV_A); 3890 idle_reify (EV_A);
3760#endif 3891#endif
3761 3892
3762#if EV_CHECK_ENABLE 3893#if EV_CHECK_ENABLE
3763 /* queue check watchers, to be executed first */ 3894 /* queue check watchers, to be executed first */
3764 if (expect_false (checkcnt)) 3895 if (ecb_expect_false (checkcnt))
3765 queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK); 3896 queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK);
3766#endif 3897#endif
3767 3898
3768 EV_INVOKE_PENDING; 3899 EV_INVOKE_PENDING;
3769 } 3900 }
3770 while (expect_true ( 3901 while (ecb_expect_true (
3771 activecnt 3902 activecnt
3772 && !loop_done 3903 && !loop_done
3773 && !(flags & (EVRUN_ONCE | EVRUN_NOWAIT)) 3904 && !(flags & (EVRUN_ONCE | EVRUN_NOWAIT))
3774 )); 3905 ));
3775 3906
3839inline_size void 3970inline_size void
3840wlist_del (WL *head, WL elem) 3971wlist_del (WL *head, WL elem)
3841{ 3972{
3842 while (*head) 3973 while (*head)
3843 { 3974 {
3844 if (expect_true (*head == elem)) 3975 if (ecb_expect_true (*head == elem))
3845 { 3976 {
3846 *head = elem->next; 3977 *head = elem->next;
3847 break; 3978 break;
3848 } 3979 }
3849 3980
3866ev_clear_pending (EV_P_ void *w) EV_NOEXCEPT 3997ev_clear_pending (EV_P_ void *w) EV_NOEXCEPT
3867{ 3998{
3868 W w_ = (W)w; 3999 W w_ = (W)w;
3869 int pending = w_->pending; 4000 int pending = w_->pending;
3870 4001
3871 if (expect_true (pending)) 4002 if (ecb_expect_true (pending))
3872 { 4003 {
3873 ANPENDING *p = pendings [ABSPRI (w_)] + pending - 1; 4004 ANPENDING *p = pendings [ABSPRI (w_)] + pending - 1;
3874 p->w = (W)&pending_w; 4005 p->w = (W)&pending_w;
3875 w_->pending = 0; 4006 w_->pending = 0;
3876 return p->events; 4007 return p->events;
3903 w->active = 0; 4034 w->active = 0;
3904} 4035}
3905 4036
3906/*****************************************************************************/ 4037/*****************************************************************************/
3907 4038
3908noinline 4039ecb_noinline
3909void 4040void
3910ev_io_start (EV_P_ ev_io *w) EV_NOEXCEPT 4041ev_io_start (EV_P_ ev_io *w) EV_NOEXCEPT
3911{ 4042{
3912 int fd = w->fd; 4043 int fd = w->fd;
3913 4044
3914 if (expect_false (ev_is_active (w))) 4045 if (ecb_expect_false (ev_is_active (w)))
3915 return; 4046 return;
3916 4047
3917 assert (("libev: ev_io_start called with negative fd", fd >= 0)); 4048 assert (("libev: ev_io_start called with negative fd", fd >= 0));
3918 assert (("libev: ev_io_start called with illegal event mask", !(w->events & ~(EV__IOFDSET | EV_READ | EV_WRITE)))); 4049 assert (("libev: ev_io_start called with illegal event mask", !(w->events & ~(EV__IOFDSET | EV_READ | EV_WRITE))));
3919 4050
4051#if EV_VERIFY >= 2
4052 assert (("libev: ev_io_start called on watcher with invalid fd", fd_valid (fd)));
4053#endif
3920 EV_FREQUENT_CHECK; 4054 EV_FREQUENT_CHECK;
3921 4055
3922 ev_start (EV_A_ (W)w, 1); 4056 ev_start (EV_A_ (W)w, 1);
3923 array_needsize (ANFD, anfds, anfdmax, fd + 1, array_needsize_zerofill); 4057 array_needsize (ANFD, anfds, anfdmax, fd + 1, array_needsize_zerofill);
3924 wlist_add (&anfds[fd].head, (WL)w); 4058 wlist_add (&anfds[fd].head, (WL)w);
3930 w->events &= ~EV__IOFDSET; 4064 w->events &= ~EV__IOFDSET;
3931 4065
3932 EV_FREQUENT_CHECK; 4066 EV_FREQUENT_CHECK;
3933} 4067}
3934 4068
3935noinline 4069ecb_noinline
3936void 4070void
3937ev_io_stop (EV_P_ ev_io *w) EV_NOEXCEPT 4071ev_io_stop (EV_P_ ev_io *w) EV_NOEXCEPT
3938{ 4072{
3939 clear_pending (EV_A_ (W)w); 4073 clear_pending (EV_A_ (W)w);
3940 if (expect_false (!ev_is_active (w))) 4074 if (ecb_expect_false (!ev_is_active (w)))
3941 return; 4075 return;
3942 4076
3943 assert (("libev: ev_io_stop called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax)); 4077 assert (("libev: ev_io_stop called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax));
3944 4078
4079#if EV_VERIFY >= 2
4080 assert (("libev: ev_io_stop called on watcher with invalid fd", fd_valid (w->fd)));
4081#endif
3945 EV_FREQUENT_CHECK; 4082 EV_FREQUENT_CHECK;
3946 4083
3947 wlist_del (&anfds[w->fd].head, (WL)w); 4084 wlist_del (&anfds[w->fd].head, (WL)w);
3948 ev_stop (EV_A_ (W)w); 4085 ev_stop (EV_A_ (W)w);
3949 4086
3950 fd_change (EV_A_ w->fd, EV_ANFD_REIFY); 4087 fd_change (EV_A_ w->fd, EV_ANFD_REIFY);
3951 4088
3952 EV_FREQUENT_CHECK; 4089 EV_FREQUENT_CHECK;
3953} 4090}
3954 4091
3955noinline 4092ecb_noinline
3956void 4093void
3957ev_timer_start (EV_P_ ev_timer *w) EV_NOEXCEPT 4094ev_timer_start (EV_P_ ev_timer *w) EV_NOEXCEPT
3958{ 4095{
3959 if (expect_false (ev_is_active (w))) 4096 if (ecb_expect_false (ev_is_active (w)))
3960 return; 4097 return;
3961 4098
3962 ev_at (w) += mn_now; 4099 ev_at (w) += mn_now;
3963 4100
3964 assert (("libev: ev_timer_start called with negative timer repeat value", w->repeat >= 0.)); 4101 assert (("libev: ev_timer_start called with negative timer repeat value", w->repeat >= 0.));
3975 EV_FREQUENT_CHECK; 4112 EV_FREQUENT_CHECK;
3976 4113
3977 /*assert (("libev: internal timer heap corruption", timers [ev_active (w)] == (WT)w));*/ 4114 /*assert (("libev: internal timer heap corruption", timers [ev_active (w)] == (WT)w));*/
3978} 4115}
3979 4116
3980noinline 4117ecb_noinline
3981void 4118void
3982ev_timer_stop (EV_P_ ev_timer *w) EV_NOEXCEPT 4119ev_timer_stop (EV_P_ ev_timer *w) EV_NOEXCEPT
3983{ 4120{
3984 clear_pending (EV_A_ (W)w); 4121 clear_pending (EV_A_ (W)w);
3985 if (expect_false (!ev_is_active (w))) 4122 if (ecb_expect_false (!ev_is_active (w)))
3986 return; 4123 return;
3987 4124
3988 EV_FREQUENT_CHECK; 4125 EV_FREQUENT_CHECK;
3989 4126
3990 { 4127 {
3992 4129
3993 assert (("libev: internal timer heap corruption", ANHE_w (timers [active]) == (WT)w)); 4130 assert (("libev: internal timer heap corruption", ANHE_w (timers [active]) == (WT)w));
3994 4131
3995 --timercnt; 4132 --timercnt;
3996 4133
3997 if (expect_true (active < timercnt + HEAP0)) 4134 if (ecb_expect_true (active < timercnt + HEAP0))
3998 { 4135 {
3999 timers [active] = timers [timercnt + HEAP0]; 4136 timers [active] = timers [timercnt + HEAP0];
4000 adjustheap (timers, timercnt, active); 4137 adjustheap (timers, timercnt, active);
4001 } 4138 }
4002 } 4139 }
4006 ev_stop (EV_A_ (W)w); 4143 ev_stop (EV_A_ (W)w);
4007 4144
4008 EV_FREQUENT_CHECK; 4145 EV_FREQUENT_CHECK;
4009} 4146}
4010 4147
4011noinline 4148ecb_noinline
4012void 4149void
4013ev_timer_again (EV_P_ ev_timer *w) EV_NOEXCEPT 4150ev_timer_again (EV_P_ ev_timer *w) EV_NOEXCEPT
4014{ 4151{
4015 EV_FREQUENT_CHECK; 4152 EV_FREQUENT_CHECK;
4016 4153
4041{ 4178{
4042 return ev_at (w) - (ev_is_active (w) ? mn_now : 0.); 4179 return ev_at (w) - (ev_is_active (w) ? mn_now : 0.);
4043} 4180}
4044 4181
4045#if EV_PERIODIC_ENABLE 4182#if EV_PERIODIC_ENABLE
4046noinline 4183ecb_noinline
4047void 4184void
4048ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT 4185ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT
4049{ 4186{
4050 if (expect_false (ev_is_active (w))) 4187 if (ecb_expect_false (ev_is_active (w)))
4051 return; 4188 return;
4052 4189
4053 if (w->reschedule_cb) 4190 if (w->reschedule_cb)
4054 ev_at (w) = w->reschedule_cb (w, ev_rt_now); 4191 ev_at (w) = w->reschedule_cb (w, ev_rt_now);
4055 else if (w->interval) 4192 else if (w->interval)
4072 EV_FREQUENT_CHECK; 4209 EV_FREQUENT_CHECK;
4073 4210
4074 /*assert (("libev: internal periodic heap corruption", ANHE_w (periodics [ev_active (w)]) == (WT)w));*/ 4211 /*assert (("libev: internal periodic heap corruption", ANHE_w (periodics [ev_active (w)]) == (WT)w));*/
4075} 4212}
4076 4213
4077noinline 4214ecb_noinline
4078void 4215void
4079ev_periodic_stop (EV_P_ ev_periodic *w) EV_NOEXCEPT 4216ev_periodic_stop (EV_P_ ev_periodic *w) EV_NOEXCEPT
4080{ 4217{
4081 clear_pending (EV_A_ (W)w); 4218 clear_pending (EV_A_ (W)w);
4082 if (expect_false (!ev_is_active (w))) 4219 if (ecb_expect_false (!ev_is_active (w)))
4083 return; 4220 return;
4084 4221
4085 EV_FREQUENT_CHECK; 4222 EV_FREQUENT_CHECK;
4086 4223
4087 { 4224 {
4089 4226
4090 assert (("libev: internal periodic heap corruption", ANHE_w (periodics [active]) == (WT)w)); 4227 assert (("libev: internal periodic heap corruption", ANHE_w (periodics [active]) == (WT)w));
4091 4228
4092 --periodiccnt; 4229 --periodiccnt;
4093 4230
4094 if (expect_true (active < periodiccnt + HEAP0)) 4231 if (ecb_expect_true (active < periodiccnt + HEAP0))
4095 { 4232 {
4096 periodics [active] = periodics [periodiccnt + HEAP0]; 4233 periodics [active] = periodics [periodiccnt + HEAP0];
4097 adjustheap (periodics, periodiccnt, active); 4234 adjustheap (periodics, periodiccnt, active);
4098 } 4235 }
4099 } 4236 }
4101 ev_stop (EV_A_ (W)w); 4238 ev_stop (EV_A_ (W)w);
4102 4239
4103 EV_FREQUENT_CHECK; 4240 EV_FREQUENT_CHECK;
4104} 4241}
4105 4242
4106noinline 4243ecb_noinline
4107void 4244void
4108ev_periodic_again (EV_P_ ev_periodic *w) EV_NOEXCEPT 4245ev_periodic_again (EV_P_ ev_periodic *w) EV_NOEXCEPT
4109{ 4246{
4110 /* TODO: use adjustheap and recalculation */ 4247 /* TODO: use adjustheap and recalculation */
4111 ev_periodic_stop (EV_A_ w); 4248 ev_periodic_stop (EV_A_ w);
4117# define SA_RESTART 0 4254# define SA_RESTART 0
4118#endif 4255#endif
4119 4256
4120#if EV_SIGNAL_ENABLE 4257#if EV_SIGNAL_ENABLE
4121 4258
4122noinline 4259ecb_noinline
4123void 4260void
4124ev_signal_start (EV_P_ ev_signal *w) EV_NOEXCEPT 4261ev_signal_start (EV_P_ ev_signal *w) EV_NOEXCEPT
4125{ 4262{
4126 if (expect_false (ev_is_active (w))) 4263 if (ecb_expect_false (ev_is_active (w)))
4127 return; 4264 return;
4128 4265
4129 assert (("libev: ev_signal_start called with illegal signal number", w->signum > 0 && w->signum < EV_NSIG)); 4266 assert (("libev: ev_signal_start called with illegal signal number", w->signum > 0 && w->signum < EV_NSIG));
4130 4267
4131#if EV_MULTIPLICITY 4268#if EV_MULTIPLICITY
4200 } 4337 }
4201 4338
4202 EV_FREQUENT_CHECK; 4339 EV_FREQUENT_CHECK;
4203} 4340}
4204 4341
4205noinline 4342ecb_noinline
4206void 4343void
4207ev_signal_stop (EV_P_ ev_signal *w) EV_NOEXCEPT 4344ev_signal_stop (EV_P_ ev_signal *w) EV_NOEXCEPT
4208{ 4345{
4209 clear_pending (EV_A_ (W)w); 4346 clear_pending (EV_A_ (W)w);
4210 if (expect_false (!ev_is_active (w))) 4347 if (ecb_expect_false (!ev_is_active (w)))
4211 return; 4348 return;
4212 4349
4213 EV_FREQUENT_CHECK; 4350 EV_FREQUENT_CHECK;
4214 4351
4215 wlist_del (&signals [w->signum - 1].head, (WL)w); 4352 wlist_del (&signals [w->signum - 1].head, (WL)w);
4248ev_child_start (EV_P_ ev_child *w) EV_NOEXCEPT 4385ev_child_start (EV_P_ ev_child *w) EV_NOEXCEPT
4249{ 4386{
4250#if EV_MULTIPLICITY 4387#if EV_MULTIPLICITY
4251 assert (("libev: child watchers are only supported in the default loop", loop == ev_default_loop_ptr)); 4388 assert (("libev: child watchers are only supported in the default loop", loop == ev_default_loop_ptr));
4252#endif 4389#endif
4253 if (expect_false (ev_is_active (w))) 4390 if (ecb_expect_false (ev_is_active (w)))
4254 return; 4391 return;
4255 4392
4256 EV_FREQUENT_CHECK; 4393 EV_FREQUENT_CHECK;
4257 4394
4258 ev_start (EV_A_ (W)w, 1); 4395 ev_start (EV_A_ (W)w, 1);
4263 4400
4264void 4401void
4265ev_child_stop (EV_P_ ev_child *w) EV_NOEXCEPT 4402ev_child_stop (EV_P_ ev_child *w) EV_NOEXCEPT
4266{ 4403{
4267 clear_pending (EV_A_ (W)w); 4404 clear_pending (EV_A_ (W)w);
4268 if (expect_false (!ev_is_active (w))) 4405 if (ecb_expect_false (!ev_is_active (w)))
4269 return; 4406 return;
4270 4407
4271 EV_FREQUENT_CHECK; 4408 EV_FREQUENT_CHECK;
4272 4409
4273 wlist_del (&childs [w->pid & ((EV_PID_HASHSIZE) - 1)], (WL)w); 4410 wlist_del (&childs [w->pid & ((EV_PID_HASHSIZE) - 1)], (WL)w);
4287 4424
4288#define DEF_STAT_INTERVAL 5.0074891 4425#define DEF_STAT_INTERVAL 5.0074891
4289#define NFS_STAT_INTERVAL 30.1074891 /* for filesystems potentially failing inotify */ 4426#define NFS_STAT_INTERVAL 30.1074891 /* for filesystems potentially failing inotify */
4290#define MIN_STAT_INTERVAL 0.1074891 4427#define MIN_STAT_INTERVAL 0.1074891
4291 4428
4292noinline static void stat_timer_cb (EV_P_ ev_timer *w_, int revents); 4429ecb_noinline static void stat_timer_cb (EV_P_ ev_timer *w_, int revents);
4293 4430
4294#if EV_USE_INOTIFY 4431#if EV_USE_INOTIFY
4295 4432
4296/* the * 2 is to allow for alignment padding, which for some reason is >> 8 */ 4433/* the * 2 is to allow for alignment padding, which for some reason is >> 8 */
4297# define EV_INOTIFY_BUFSIZE (sizeof (struct inotify_event) * 2 + NAME_MAX) 4434# define EV_INOTIFY_BUFSIZE (sizeof (struct inotify_event) * 2 + NAME_MAX)
4298 4435
4299noinline 4436ecb_noinline
4300static void 4437static void
4301infy_add (EV_P_ ev_stat *w) 4438infy_add (EV_P_ ev_stat *w)
4302{ 4439{
4303 w->wd = inotify_add_watch (fs_fd, w->path, 4440 w->wd = inotify_add_watch (fs_fd, w->path,
4304 IN_ATTRIB | IN_DELETE_SELF | IN_MOVE_SELF | IN_MODIFY 4441 IN_ATTRIB | IN_DELETE_SELF | IN_MOVE_SELF | IN_MODIFY
4369 if (ev_is_active (&w->timer)) ev_ref (EV_A); 4506 if (ev_is_active (&w->timer)) ev_ref (EV_A);
4370 ev_timer_again (EV_A_ &w->timer); 4507 ev_timer_again (EV_A_ &w->timer);
4371 if (ev_is_active (&w->timer)) ev_unref (EV_A); 4508 if (ev_is_active (&w->timer)) ev_unref (EV_A);
4372} 4509}
4373 4510
4374noinline 4511ecb_noinline
4375static void 4512static void
4376infy_del (EV_P_ ev_stat *w) 4513infy_del (EV_P_ ev_stat *w)
4377{ 4514{
4378 int slot; 4515 int slot;
4379 int wd = w->wd; 4516 int wd = w->wd;
4387 4524
4388 /* remove this watcher, if others are watching it, they will rearm */ 4525 /* remove this watcher, if others are watching it, they will rearm */
4389 inotify_rm_watch (fs_fd, wd); 4526 inotify_rm_watch (fs_fd, wd);
4390} 4527}
4391 4528
4392noinline 4529ecb_noinline
4393static void 4530static void
4394infy_wd (EV_P_ int slot, int wd, struct inotify_event *ev) 4531infy_wd (EV_P_ int slot, int wd, struct inotify_event *ev)
4395{ 4532{
4396 if (slot < 0) 4533 if (slot < 0)
4397 /* overflow, need to check for all hash slots */ 4534 /* overflow, need to check for all hash slots */
4543 w->attr.st_nlink = 0; 4680 w->attr.st_nlink = 0;
4544 else if (!w->attr.st_nlink) 4681 else if (!w->attr.st_nlink)
4545 w->attr.st_nlink = 1; 4682 w->attr.st_nlink = 1;
4546} 4683}
4547 4684
4548noinline 4685ecb_noinline
4549static void 4686static void
4550stat_timer_cb (EV_P_ ev_timer *w_, int revents) 4687stat_timer_cb (EV_P_ ev_timer *w_, int revents)
4551{ 4688{
4552 ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer)); 4689 ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer));
4553 4690
4587} 4724}
4588 4725
4589void 4726void
4590ev_stat_start (EV_P_ ev_stat *w) EV_NOEXCEPT 4727ev_stat_start (EV_P_ ev_stat *w) EV_NOEXCEPT
4591{ 4728{
4592 if (expect_false (ev_is_active (w))) 4729 if (ecb_expect_false (ev_is_active (w)))
4593 return; 4730 return;
4594 4731
4595 ev_stat_stat (EV_A_ w); 4732 ev_stat_stat (EV_A_ w);
4596 4733
4597 if (w->interval < MIN_STAT_INTERVAL && w->interval) 4734 if (w->interval < MIN_STAT_INTERVAL && w->interval)
4619 4756
4620void 4757void
4621ev_stat_stop (EV_P_ ev_stat *w) EV_NOEXCEPT 4758ev_stat_stop (EV_P_ ev_stat *w) EV_NOEXCEPT
4622{ 4759{
4623 clear_pending (EV_A_ (W)w); 4760 clear_pending (EV_A_ (W)w);
4624 if (expect_false (!ev_is_active (w))) 4761 if (ecb_expect_false (!ev_is_active (w)))
4625 return; 4762 return;
4626 4763
4627 EV_FREQUENT_CHECK; 4764 EV_FREQUENT_CHECK;
4628 4765
4629#if EV_USE_INOTIFY 4766#if EV_USE_INOTIFY
4644 4781
4645#if EV_IDLE_ENABLE 4782#if EV_IDLE_ENABLE
4646void 4783void
4647ev_idle_start (EV_P_ ev_idle *w) EV_NOEXCEPT 4784ev_idle_start (EV_P_ ev_idle *w) EV_NOEXCEPT
4648{ 4785{
4649 if (expect_false (ev_is_active (w))) 4786 if (ecb_expect_false (ev_is_active (w)))
4650 return; 4787 return;
4651 4788
4652 pri_adjust (EV_A_ (W)w); 4789 pri_adjust (EV_A_ (W)w);
4653 4790
4654 EV_FREQUENT_CHECK; 4791 EV_FREQUENT_CHECK;
4668 4805
4669void 4806void
4670ev_idle_stop (EV_P_ ev_idle *w) EV_NOEXCEPT 4807ev_idle_stop (EV_P_ ev_idle *w) EV_NOEXCEPT
4671{ 4808{
4672 clear_pending (EV_A_ (W)w); 4809 clear_pending (EV_A_ (W)w);
4673 if (expect_false (!ev_is_active (w))) 4810 if (ecb_expect_false (!ev_is_active (w)))
4674 return; 4811 return;
4675 4812
4676 EV_FREQUENT_CHECK; 4813 EV_FREQUENT_CHECK;
4677 4814
4678 { 4815 {
4691 4828
4692#if EV_PREPARE_ENABLE 4829#if EV_PREPARE_ENABLE
4693void 4830void
4694ev_prepare_start (EV_P_ ev_prepare *w) EV_NOEXCEPT 4831ev_prepare_start (EV_P_ ev_prepare *w) EV_NOEXCEPT
4695{ 4832{
4696 if (expect_false (ev_is_active (w))) 4833 if (ecb_expect_false (ev_is_active (w)))
4697 return; 4834 return;
4698 4835
4699 EV_FREQUENT_CHECK; 4836 EV_FREQUENT_CHECK;
4700 4837
4701 ev_start (EV_A_ (W)w, ++preparecnt); 4838 ev_start (EV_A_ (W)w, ++preparecnt);
4707 4844
4708void 4845void
4709ev_prepare_stop (EV_P_ ev_prepare *w) EV_NOEXCEPT 4846ev_prepare_stop (EV_P_ ev_prepare *w) EV_NOEXCEPT
4710{ 4847{
4711 clear_pending (EV_A_ (W)w); 4848 clear_pending (EV_A_ (W)w);
4712 if (expect_false (!ev_is_active (w))) 4849 if (ecb_expect_false (!ev_is_active (w)))
4713 return; 4850 return;
4714 4851
4715 EV_FREQUENT_CHECK; 4852 EV_FREQUENT_CHECK;
4716 4853
4717 { 4854 {
4729 4866
4730#if EV_CHECK_ENABLE 4867#if EV_CHECK_ENABLE
4731void 4868void
4732ev_check_start (EV_P_ ev_check *w) EV_NOEXCEPT 4869ev_check_start (EV_P_ ev_check *w) EV_NOEXCEPT
4733{ 4870{
4734 if (expect_false (ev_is_active (w))) 4871 if (ecb_expect_false (ev_is_active (w)))
4735 return; 4872 return;
4736 4873
4737 EV_FREQUENT_CHECK; 4874 EV_FREQUENT_CHECK;
4738 4875
4739 ev_start (EV_A_ (W)w, ++checkcnt); 4876 ev_start (EV_A_ (W)w, ++checkcnt);
4745 4882
4746void 4883void
4747ev_check_stop (EV_P_ ev_check *w) EV_NOEXCEPT 4884ev_check_stop (EV_P_ ev_check *w) EV_NOEXCEPT
4748{ 4885{
4749 clear_pending (EV_A_ (W)w); 4886 clear_pending (EV_A_ (W)w);
4750 if (expect_false (!ev_is_active (w))) 4887 if (ecb_expect_false (!ev_is_active (w)))
4751 return; 4888 return;
4752 4889
4753 EV_FREQUENT_CHECK; 4890 EV_FREQUENT_CHECK;
4754 4891
4755 { 4892 {
4764 EV_FREQUENT_CHECK; 4901 EV_FREQUENT_CHECK;
4765} 4902}
4766#endif 4903#endif
4767 4904
4768#if EV_EMBED_ENABLE 4905#if EV_EMBED_ENABLE
4769noinline 4906ecb_noinline
4770void 4907void
4771ev_embed_sweep (EV_P_ ev_embed *w) EV_NOEXCEPT 4908ev_embed_sweep (EV_P_ ev_embed *w) EV_NOEXCEPT
4772{ 4909{
4773 ev_run (w->other, EVRUN_NOWAIT); 4910 ev_run (w->other, EVRUN_NOWAIT);
4774} 4911}
4826#endif 4963#endif
4827 4964
4828void 4965void
4829ev_embed_start (EV_P_ ev_embed *w) EV_NOEXCEPT 4966ev_embed_start (EV_P_ ev_embed *w) EV_NOEXCEPT
4830{ 4967{
4831 if (expect_false (ev_is_active (w))) 4968 if (ecb_expect_false (ev_is_active (w)))
4832 return; 4969 return;
4833 4970
4834 { 4971 {
4835 EV_P = w->other; 4972 EV_P = w->other;
4836 assert (("libev: loop to be embedded is not embeddable", backend & ev_embeddable_backends ())); 4973 assert (("libev: loop to be embedded is not embeddable", backend & ev_embeddable_backends ()));
4858 4995
4859void 4996void
4860ev_embed_stop (EV_P_ ev_embed *w) EV_NOEXCEPT 4997ev_embed_stop (EV_P_ ev_embed *w) EV_NOEXCEPT
4861{ 4998{
4862 clear_pending (EV_A_ (W)w); 4999 clear_pending (EV_A_ (W)w);
4863 if (expect_false (!ev_is_active (w))) 5000 if (ecb_expect_false (!ev_is_active (w)))
4864 return; 5001 return;
4865 5002
4866 EV_FREQUENT_CHECK; 5003 EV_FREQUENT_CHECK;
4867 5004
4868 ev_io_stop (EV_A_ &w->io); 5005 ev_io_stop (EV_A_ &w->io);
4877 5014
4878#if EV_FORK_ENABLE 5015#if EV_FORK_ENABLE
4879void 5016void
4880ev_fork_start (EV_P_ ev_fork *w) EV_NOEXCEPT 5017ev_fork_start (EV_P_ ev_fork *w) EV_NOEXCEPT
4881{ 5018{
4882 if (expect_false (ev_is_active (w))) 5019 if (ecb_expect_false (ev_is_active (w)))
4883 return; 5020 return;
4884 5021
4885 EV_FREQUENT_CHECK; 5022 EV_FREQUENT_CHECK;
4886 5023
4887 ev_start (EV_A_ (W)w, ++forkcnt); 5024 ev_start (EV_A_ (W)w, ++forkcnt);
4893 5030
4894void 5031void
4895ev_fork_stop (EV_P_ ev_fork *w) EV_NOEXCEPT 5032ev_fork_stop (EV_P_ ev_fork *w) EV_NOEXCEPT
4896{ 5033{
4897 clear_pending (EV_A_ (W)w); 5034 clear_pending (EV_A_ (W)w);
4898 if (expect_false (!ev_is_active (w))) 5035 if (ecb_expect_false (!ev_is_active (w)))
4899 return; 5036 return;
4900 5037
4901 EV_FREQUENT_CHECK; 5038 EV_FREQUENT_CHECK;
4902 5039
4903 { 5040 {
4915 5052
4916#if EV_CLEANUP_ENABLE 5053#if EV_CLEANUP_ENABLE
4917void 5054void
4918ev_cleanup_start (EV_P_ ev_cleanup *w) EV_NOEXCEPT 5055ev_cleanup_start (EV_P_ ev_cleanup *w) EV_NOEXCEPT
4919{ 5056{
4920 if (expect_false (ev_is_active (w))) 5057 if (ecb_expect_false (ev_is_active (w)))
4921 return; 5058 return;
4922 5059
4923 EV_FREQUENT_CHECK; 5060 EV_FREQUENT_CHECK;
4924 5061
4925 ev_start (EV_A_ (W)w, ++cleanupcnt); 5062 ev_start (EV_A_ (W)w, ++cleanupcnt);
4933 5070
4934void 5071void
4935ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_NOEXCEPT 5072ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_NOEXCEPT
4936{ 5073{
4937 clear_pending (EV_A_ (W)w); 5074 clear_pending (EV_A_ (W)w);
4938 if (expect_false (!ev_is_active (w))) 5075 if (ecb_expect_false (!ev_is_active (w)))
4939 return; 5076 return;
4940 5077
4941 EV_FREQUENT_CHECK; 5078 EV_FREQUENT_CHECK;
4942 ev_ref (EV_A); 5079 ev_ref (EV_A);
4943 5080
4956 5093
4957#if EV_ASYNC_ENABLE 5094#if EV_ASYNC_ENABLE
4958void 5095void
4959ev_async_start (EV_P_ ev_async *w) EV_NOEXCEPT 5096ev_async_start (EV_P_ ev_async *w) EV_NOEXCEPT
4960{ 5097{
4961 if (expect_false (ev_is_active (w))) 5098 if (ecb_expect_false (ev_is_active (w)))
4962 return; 5099 return;
4963 5100
4964 w->sent = 0; 5101 w->sent = 0;
4965 5102
4966 evpipe_init (EV_A); 5103 evpipe_init (EV_A);
4976 5113
4977void 5114void
4978ev_async_stop (EV_P_ ev_async *w) EV_NOEXCEPT 5115ev_async_stop (EV_P_ ev_async *w) EV_NOEXCEPT
4979{ 5116{
4980 clear_pending (EV_A_ (W)w); 5117 clear_pending (EV_A_ (W)w);
4981 if (expect_false (!ev_is_active (w))) 5118 if (ecb_expect_false (!ev_is_active (w)))
4982 return; 5119 return;
4983 5120
4984 EV_FREQUENT_CHECK; 5121 EV_FREQUENT_CHECK;
4985 5122
4986 { 5123 {

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines