ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev.c
(Generate patch)

Comparing libev/ev.c (file contents):
Revision 1.494 by root, Sun Jun 23 23:28:45 2019 UTC vs.
Revision 1.515 by root, Fri Dec 20 20:51:46 2019 UTC

124# else 124# else
125# undef EV_USE_LINUXAIO 125# undef EV_USE_LINUXAIO
126# define EV_USE_LINUXAIO 0 126# define EV_USE_LINUXAIO 0
127# endif 127# endif
128 128
129# if HAVE_LINUX_FS_H && HAVE_SYS_TIMERFD_H && HAVE_KERNEL_RWF_T
130# ifndef EV_USE_IOURING
131# define EV_USE_IOURING EV_FEATURE_BACKENDS
132# endif
133# else
134# undef EV_USE_IOURING
135# define EV_USE_IOURING 0
136# endif
137
129# if HAVE_KQUEUE && HAVE_SYS_EVENT_H 138# if HAVE_KQUEUE && HAVE_SYS_EVENT_H
130# ifndef EV_USE_KQUEUE 139# ifndef EV_USE_KQUEUE
131# define EV_USE_KQUEUE EV_FEATURE_BACKENDS 140# define EV_USE_KQUEUE EV_FEATURE_BACKENDS
132# endif 141# endif
133# else 142# else
168# endif 177# endif
169# else 178# else
170# undef EV_USE_EVENTFD 179# undef EV_USE_EVENTFD
171# define EV_USE_EVENTFD 0 180# define EV_USE_EVENTFD 0
172# endif 181# endif
173 182
183# if HAVE_SYS_TIMERFD_H
184# ifndef EV_USE_TIMERFD
185# define EV_USE_TIMERFD EV_FEATURE_OS
186# endif
187# else
188# undef EV_USE_TIMERFD
189# define EV_USE_TIMERFD 0
190# endif
191
174#endif 192#endif
175 193
176/* OS X, in its infinite idiocy, actually HARDCODES 194/* OS X, in its infinite idiocy, actually HARDCODES
177 * a limit of 1024 into their select. Where people have brains, 195 * a limit of 1024 into their select. Where people have brains,
178 * OS X engineers apparently have a vacuum. Or maybe they were 196 * OS X engineers apparently have a vacuum. Or maybe they were
332# else 350# else
333# define EV_USE_LINUXAIO 0 351# define EV_USE_LINUXAIO 0
334# endif 352# endif
335#endif 353#endif
336 354
355#ifndef EV_USE_IOURING
356# if __linux /* later checks might disable again */
357# define EV_USE_IOURING 1
358# else
359# define EV_USE_IOURING 0
360# endif
361#endif
362
337#ifndef EV_USE_INOTIFY 363#ifndef EV_USE_INOTIFY
338# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4)) 364# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))
339# define EV_USE_INOTIFY EV_FEATURE_OS 365# define EV_USE_INOTIFY EV_FEATURE_OS
340# else 366# else
341# define EV_USE_INOTIFY 0 367# define EV_USE_INOTIFY 0
361#ifndef EV_USE_SIGNALFD 387#ifndef EV_USE_SIGNALFD
362# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7)) 388# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7))
363# define EV_USE_SIGNALFD EV_FEATURE_OS 389# define EV_USE_SIGNALFD EV_FEATURE_OS
364# else 390# else
365# define EV_USE_SIGNALFD 0 391# define EV_USE_SIGNALFD 0
392# endif
393#endif
394
395#ifndef EV_USE_TIMERFD
396# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 8))
397# define EV_USE_TIMERFD EV_FEATURE_OS
398# else
399# define EV_USE_TIMERFD 0
366# endif 400# endif
367#endif 401#endif
368 402
369#if 0 /* debugging */ 403#if 0 /* debugging */
370# define EV_VERIFY 3 404# define EV_VERIFY 3
406# include <sys/syscall.h> 440# include <sys/syscall.h>
407# ifdef SYS_clock_gettime 441# ifdef SYS_clock_gettime
408# define clock_gettime(id, ts) syscall (SYS_clock_gettime, (id), (ts)) 442# define clock_gettime(id, ts) syscall (SYS_clock_gettime, (id), (ts))
409# undef EV_USE_MONOTONIC 443# undef EV_USE_MONOTONIC
410# define EV_USE_MONOTONIC 1 444# define EV_USE_MONOTONIC 1
445# define EV_NEED_SYSCALL 1
411# else 446# else
412# undef EV_USE_CLOCK_SYSCALL 447# undef EV_USE_CLOCK_SYSCALL
413# define EV_USE_CLOCK_SYSCALL 0 448# define EV_USE_CLOCK_SYSCALL 0
414# endif 449# endif
415#endif 450#endif
427#endif 462#endif
428 463
429#if !EV_STAT_ENABLE 464#if !EV_STAT_ENABLE
430# undef EV_USE_INOTIFY 465# undef EV_USE_INOTIFY
431# define EV_USE_INOTIFY 0 466# define EV_USE_INOTIFY 0
467#endif
468
469#if __linux && EV_USE_IOURING
470# include <linux/version.h>
471# if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)
472# undef EV_USE_IOURING
473# define EV_USE_IOURING 0
474# endif
432#endif 475#endif
433 476
434#if !EV_USE_NANOSLEEP 477#if !EV_USE_NANOSLEEP
435/* hp-ux has it in sys/time.h, which we unconditionally include above */ 478/* hp-ux has it in sys/time.h, which we unconditionally include above */
436# if !defined _WIN32 && !defined __hpux 479# if !defined _WIN32 && !defined __hpux
438# endif 481# endif
439#endif 482#endif
440 483
441#if EV_USE_LINUXAIO 484#if EV_USE_LINUXAIO
442# include <sys/syscall.h> 485# include <sys/syscall.h>
443# if !SYS_io_getevents || !EV_USE_EPOLL 486# if SYS_io_getevents && EV_USE_EPOLL /* linuxaio backend requires epoll backend */
487# define EV_NEED_SYSCALL 1
488# else
444# undef EV_USE_LINUXAIO 489# undef EV_USE_LINUXAIO
445# define EV_USE_LINUXAIO 0 490# define EV_USE_LINUXAIO 0
491# endif
492#endif
493
494#if EV_USE_IOURING
495# include <sys/syscall.h>
496# if !SYS_io_uring_setup && __linux && !__alpha
497# define SYS_io_uring_setup 425
498# define SYS_io_uring_enter 426
499# define SYS_io_uring_wregister 427
500# endif
501# if SYS_io_uring_setup && EV_USE_EPOLL /* iouring backend requires epoll backend */
502# define EV_NEED_SYSCALL 1
503# else
504# undef EV_USE_IOURING
505# define EV_USE_IOURING 0
446# endif 506# endif
447#endif 507#endif
448 508
449#if EV_USE_INOTIFY 509#if EV_USE_INOTIFY
450# include <sys/statfs.h> 510# include <sys/statfs.h>
455# define EV_USE_INOTIFY 0 515# define EV_USE_INOTIFY 0
456# endif 516# endif
457#endif 517#endif
458 518
459#if EV_USE_EVENTFD 519#if EV_USE_EVENTFD
460/* our minimum requirement is glibc 2.7 which has the stub, but not the header */ 520/* our minimum requirement is glibc 2.7 which has the stub, but not the full header */
461# include <stdint.h> 521# include <stdint.h>
462# ifndef EFD_NONBLOCK 522# ifndef EFD_NONBLOCK
463# define EFD_NONBLOCK O_NONBLOCK 523# define EFD_NONBLOCK O_NONBLOCK
464# endif 524# endif
465# ifndef EFD_CLOEXEC 525# ifndef EFD_CLOEXEC
471# endif 531# endif
472EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags); 532EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags);
473#endif 533#endif
474 534
475#if EV_USE_SIGNALFD 535#if EV_USE_SIGNALFD
476/* our minimum requirement is glibc 2.7 which has the stub, but not the header */ 536/* our minimum requirement is glibc 2.7 which has the stub, but not the full header */
477# include <stdint.h> 537# include <stdint.h>
478# ifndef SFD_NONBLOCK 538# ifndef SFD_NONBLOCK
479# define SFD_NONBLOCK O_NONBLOCK 539# define SFD_NONBLOCK O_NONBLOCK
480# endif 540# endif
481# ifndef SFD_CLOEXEC 541# ifndef SFD_CLOEXEC
483# define SFD_CLOEXEC O_CLOEXEC 543# define SFD_CLOEXEC O_CLOEXEC
484# else 544# else
485# define SFD_CLOEXEC 02000000 545# define SFD_CLOEXEC 02000000
486# endif 546# endif
487# endif 547# endif
488EV_CPP (extern "C") int signalfd (int fd, const sigset_t *mask, int flags); 548EV_CPP (extern "C") int (signalfd) (int fd, const sigset_t *mask, int flags);
489 549
490struct signalfd_siginfo 550struct signalfd_siginfo
491{ 551{
492 uint32_t ssi_signo; 552 uint32_t ssi_signo;
493 char pad[128 - sizeof (uint32_t)]; 553 char pad[128 - sizeof (uint32_t)];
494}; 554};
495#endif 555#endif
496 556
497/**/ 557/* for timerfd, libev core requires TFD_TIMER_CANCEL_ON_SET &c */
558#if EV_USE_TIMERFD
559# include <sys/timerfd.h>
560/* timerfd is only used for periodics */
561# if !(defined (TFD_TIMER_CANCEL_ON_SET) && defined (TFD_CLOEXEC) && defined (TFD_NONBLOCK)) || !EV_PERIODIC_ENABLE
562# undef EV_USE_TIMERFD
563# define EV_USE_TIMERFD 0
564# endif
565#endif
566
567/*****************************************************************************/
498 568
499#if EV_VERIFY >= 3 569#if EV_VERIFY >= 3
500# define EV_FREQUENT_CHECK ev_verify (EV_A) 570# define EV_FREQUENT_CHECK ev_verify (EV_A)
501#else 571#else
502# define EV_FREQUENT_CHECK do { } while (0) 572# define EV_FREQUENT_CHECK do { } while (0)
507 * This value is good at least till the year 4000. 577 * This value is good at least till the year 4000.
508 */ 578 */
509#define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */ 579#define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */
510/*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */ 580/*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */
511 581
512#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */ 582#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
513#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */ 583#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
514 584
585/* find a portable timestamp that is "always" in the future but fits into time_t.
586 * this is quite hard, and we are mostly guessing - we handle 32 bit signed/unsigned time_t,
587 * and sizes larger than 32 bit, and maybe the unlikely floating point time_t */
588#define EV_TSTAMP_HUGE \
589 (sizeof (time_t) >= 8 ? 10000000000000. \
590 : 0 < (time_t)4294967295 ? 4294967295. \
591 : 2147483647.) \
592
593#ifndef EV_TS_CONST
594# define EV_TS_CONST(nv) nv
595# define EV_TS_TO_MSEC(a) a * 1e3 + 0.9999
596# define EV_TS_FROM_USEC(us) us * 1e-6
515#define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0) 597# define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0)
516#define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0) 598# define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0)
599# define EV_TV_GET(tv) ((tv).tv_sec + (tv).tv_usec * 1e-6)
600# define EV_TS_GET(ts) ((ts).tv_sec + (ts).tv_nsec * 1e-9)
601#endif
517 602
518/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */ 603/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */
519/* ECB.H BEGIN */ 604/* ECB.H BEGIN */
520/* 605/*
521 * libecb - http://software.schmorp.de/pkg/libecb 606 * libecb - http://software.schmorp.de/pkg/libecb
559 644
560#ifndef ECB_H 645#ifndef ECB_H
561#define ECB_H 646#define ECB_H
562 647
563/* 16 bits major, 16 bits minor */ 648/* 16 bits major, 16 bits minor */
564#define ECB_VERSION 0x00010005 649#define ECB_VERSION 0x00010006
565 650
566#ifdef _WIN32 651#ifdef _WIN32
567 typedef signed char int8_t; 652 typedef signed char int8_t;
568 typedef unsigned char uint8_t; 653 typedef unsigned char uint8_t;
569 typedef signed short int16_t; 654 typedef signed short int16_t;
683 #include <intrin.h> /* fence functions _ReadBarrier, also bit search functions _BitScanReverse */ 768 #include <intrin.h> /* fence functions _ReadBarrier, also bit search functions _BitScanReverse */
684#endif 769#endif
685 770
686#ifndef ECB_MEMORY_FENCE 771#ifndef ECB_MEMORY_FENCE
687 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 772 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
773 #define ECB_MEMORY_FENCE_RELAXED __asm__ __volatile__ ("" : : : "memory")
688 #if __i386 || __i386__ 774 #if __i386 || __i386__
689 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") 775 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
690 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") 776 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
691 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory") 777 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory")
692 #elif ECB_GCC_AMD64 778 #elif ECB_GCC_AMD64
742 #if ECB_GCC_VERSION(4,7) 828 #if ECB_GCC_VERSION(4,7)
743 /* see comment below (stdatomic.h) about the C11 memory model. */ 829 /* see comment below (stdatomic.h) about the C11 memory model. */
744 #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST) 830 #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST)
745 #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE) 831 #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE)
746 #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE) 832 #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE)
833 #define ECB_MEMORY_FENCE_RELAXED __atomic_thread_fence (__ATOMIC_RELAXED)
747 834
748 #elif ECB_CLANG_EXTENSION(c_atomic) 835 #elif ECB_CLANG_EXTENSION(c_atomic)
749 /* see comment below (stdatomic.h) about the C11 memory model. */ 836 /* see comment below (stdatomic.h) about the C11 memory model. */
750 #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST) 837 #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST)
751 #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE) 838 #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE)
752 #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE) 839 #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE)
840 #define ECB_MEMORY_FENCE_RELAXED __c11_atomic_thread_fence (__ATOMIC_RELAXED)
753 841
754 #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ 842 #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__
755 #define ECB_MEMORY_FENCE __sync_synchronize () 843 #define ECB_MEMORY_FENCE __sync_synchronize ()
756 #elif _MSC_VER >= 1500 /* VC++ 2008 */ 844 #elif _MSC_VER >= 1500 /* VC++ 2008 */
757 /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */ 845 /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */
767 #elif defined _WIN32 855 #elif defined _WIN32
768 #include <WinNT.h> 856 #include <WinNT.h>
769 #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */ 857 #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */
770 #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 858 #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
771 #include <mbarrier.h> 859 #include <mbarrier.h>
772 #define ECB_MEMORY_FENCE __machine_rw_barrier () 860 #define ECB_MEMORY_FENCE __machine_rw_barrier ()
773 #define ECB_MEMORY_FENCE_ACQUIRE __machine_r_barrier () 861 #define ECB_MEMORY_FENCE_ACQUIRE __machine_acq_barrier ()
774 #define ECB_MEMORY_FENCE_RELEASE __machine_w_barrier () 862 #define ECB_MEMORY_FENCE_RELEASE __machine_rel_barrier ()
863 #define ECB_MEMORY_FENCE_RELAXED __compiler_barrier ()
775 #elif __xlC__ 864 #elif __xlC__
776 #define ECB_MEMORY_FENCE __sync () 865 #define ECB_MEMORY_FENCE __sync ()
777 #endif 866 #endif
778#endif 867#endif
779 868
780#ifndef ECB_MEMORY_FENCE 869#ifndef ECB_MEMORY_FENCE
781 #if ECB_C11 && !defined __STDC_NO_ATOMICS__ 870 #if ECB_C11 && !defined __STDC_NO_ATOMICS__
782 /* we assume that these memory fences work on all variables/all memory accesses, */ 871 /* we assume that these memory fences work on all variables/all memory accesses, */
783 /* not just C11 atomics and atomic accesses */ 872 /* not just C11 atomics and atomic accesses */
784 #include <stdatomic.h> 873 #include <stdatomic.h>
785 /* Unfortunately, neither gcc 4.7 nor clang 3.1 generate any instructions for */
786 /* any fence other than seq_cst, which isn't very efficient for us. */
787 /* Why that is, we don't know - either the C11 memory model is quite useless */
788 /* for most usages, or gcc and clang have a bug */
789 /* I *currently* lean towards the latter, and inefficiently implement */
790 /* all three of ecb's fences as a seq_cst fence */
791 /* Update, gcc-4.8 generates mfence for all c++ fences, but nothing */
792 /* for all __atomic_thread_fence's except seq_cst */
793 #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst) 874 #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst)
875 #define ECB_MEMORY_FENCE_ACQUIRE atomic_thread_fence (memory_order_acquire)
876 #define ECB_MEMORY_FENCE_RELEASE atomic_thread_fence (memory_order_release)
794 #endif 877 #endif
795#endif 878#endif
796 879
797#ifndef ECB_MEMORY_FENCE 880#ifndef ECB_MEMORY_FENCE
798 #if !ECB_AVOID_PTHREADS 881 #if !ECB_AVOID_PTHREADS
816 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE 899 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
817#endif 900#endif
818 901
819#if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE 902#if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE
820 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE 903 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
904#endif
905
906#if !defined ECB_MEMORY_FENCE_RELAXED && defined ECB_MEMORY_FENCE
907 #define ECB_MEMORY_FENCE_RELAXED ECB_MEMORY_FENCE /* very heavy-handed */
821#endif 908#endif
822 909
823/*****************************************************************************/ 910/*****************************************************************************/
824 911
825#if ECB_CPP 912#if ECB_CPP
1534/* ECB.H END */ 1621/* ECB.H END */
1535 1622
1536#if ECB_MEMORY_FENCE_NEEDS_PTHREADS 1623#if ECB_MEMORY_FENCE_NEEDS_PTHREADS
1537/* if your architecture doesn't need memory fences, e.g. because it is 1624/* if your architecture doesn't need memory fences, e.g. because it is
1538 * single-cpu/core, or if you use libev in a project that doesn't use libev 1625 * single-cpu/core, or if you use libev in a project that doesn't use libev
1539 * from multiple threads, then you can define ECB_AVOID_PTHREADS when compiling 1626 * from multiple threads, then you can define ECB_NO_THREADS when compiling
1540 * libev, in which cases the memory fences become nops. 1627 * libev, in which cases the memory fences become nops.
1541 * alternatively, you can remove this #error and link against libpthread, 1628 * alternatively, you can remove this #error and link against libpthread,
1542 * which will then provide the memory fences. 1629 * which will then provide the memory fences.
1543 */ 1630 */
1544# error "memory fences not defined for your architecture, please report" 1631# error "memory fences not defined for your architecture, please report"
1548# define ECB_MEMORY_FENCE do { } while (0) 1635# define ECB_MEMORY_FENCE do { } while (0)
1549# define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE 1636# define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
1550# define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE 1637# define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
1551#endif 1638#endif
1552 1639
1553#define expect_false(cond) ecb_expect_false (cond)
1554#define expect_true(cond) ecb_expect_true (cond)
1555#define noinline ecb_noinline
1556
1557#define inline_size ecb_inline 1640#define inline_size ecb_inline
1558 1641
1559#if EV_FEATURE_CODE 1642#if EV_FEATURE_CODE
1560# define inline_speed ecb_inline 1643# define inline_speed ecb_inline
1561#else 1644#else
1562# define inline_speed noinline static 1645# define inline_speed ecb_noinline static
1563#endif 1646#endif
1647
1648/*****************************************************************************/
1649/* raw syscall wrappers */
1650
1651#if EV_NEED_SYSCALL
1652
1653#include <sys/syscall.h>
1654
1655/*
1656 * define some syscall wrappers for common architectures
1657 * this is mostly for nice looks during debugging, not performance.
1658 * our syscalls return < 0, not == -1, on error. which is good
1659 * enough for linux aio.
1660 * TODO: arm is also common nowadays, maybe even mips and x86
1661 * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove...
1662 */
1663#if __GNUC__ && __linux && ECB_AMD64 && !defined __OPTIMIZE_SIZE__
1664 /* the costly errno access probably kills this for size optimisation */
1665
1666 #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5,arg6) \
1667 ({ \
1668 long res; \
1669 register unsigned long r6 __asm__ ("r9" ); \
1670 register unsigned long r5 __asm__ ("r8" ); \
1671 register unsigned long r4 __asm__ ("r10"); \
1672 register unsigned long r3 __asm__ ("rdx"); \
1673 register unsigned long r2 __asm__ ("rsi"); \
1674 register unsigned long r1 __asm__ ("rdi"); \
1675 if (narg >= 6) r6 = (unsigned long)(arg6); \
1676 if (narg >= 5) r5 = (unsigned long)(arg5); \
1677 if (narg >= 4) r4 = (unsigned long)(arg4); \
1678 if (narg >= 3) r3 = (unsigned long)(arg3); \
1679 if (narg >= 2) r2 = (unsigned long)(arg2); \
1680 if (narg >= 1) r1 = (unsigned long)(arg1); \
1681 __asm__ __volatile__ ( \
1682 "syscall\n\t" \
1683 : "=a" (res) \
1684 : "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \
1685 : "cc", "r11", "cx", "memory"); \
1686 errno = -res; \
1687 res; \
1688 })
1689
1690#endif
1691
1692#ifdef ev_syscall
1693 #define ev_syscall0(nr) ev_syscall (nr, 0, 0, 0, 0, 0, 0, 0)
1694 #define ev_syscall1(nr,arg1) ev_syscall (nr, 1, arg1, 0, 0, 0, 0, 0)
1695 #define ev_syscall2(nr,arg1,arg2) ev_syscall (nr, 2, arg1, arg2, 0, 0, 0, 0)
1696 #define ev_syscall3(nr,arg1,arg2,arg3) ev_syscall (nr, 3, arg1, arg2, arg3, 0, 0, 0)
1697 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) ev_syscall (nr, 3, arg1, arg2, arg3, arg4, 0, 0)
1698 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5, 0)
1699 #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) ev_syscall (nr, 6, arg1, arg2, arg3, arg4, arg5,arg6)
1700#else
1701 #define ev_syscall0(nr) syscall (nr)
1702 #define ev_syscall1(nr,arg1) syscall (nr, arg1)
1703 #define ev_syscall2(nr,arg1,arg2) syscall (nr, arg1, arg2)
1704 #define ev_syscall3(nr,arg1,arg2,arg3) syscall (nr, arg1, arg2, arg3)
1705 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) syscall (nr, arg1, arg2, arg3, arg4)
1706 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) syscall (nr, arg1, arg2, arg3, arg4, arg5)
1707 #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) syscall (nr, arg1, arg2, arg3, arg4, arg5,arg6)
1708#endif
1709
1710#endif
1711
1712/*****************************************************************************/
1564 1713
1565#define NUMPRI (EV_MAXPRI - EV_MINPRI + 1) 1714#define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)
1566 1715
1567#if EV_MINPRI == EV_MAXPRI 1716#if EV_MINPRI == EV_MAXPRI
1568# define ABSPRI(w) (((W)w), 0) 1717# define ABSPRI(w) (((W)w), 0)
1617#else 1766#else
1618 1767
1619#include <float.h> 1768#include <float.h>
1620 1769
1621/* a floor() replacement function, should be independent of ev_tstamp type */ 1770/* a floor() replacement function, should be independent of ev_tstamp type */
1622noinline 1771ecb_noinline
1623static ev_tstamp 1772static ev_tstamp
1624ev_floor (ev_tstamp v) 1773ev_floor (ev_tstamp v)
1625{ 1774{
1626 /* the choice of shift factor is not terribly important */ 1775 /* the choice of shift factor is not terribly important */
1627#if FLT_RADIX != 2 /* assume FLT_RADIX == 10 */ 1776#if FLT_RADIX != 2 /* assume FLT_RADIX == 10 */
1628 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 10000000000000000000. : 1000000000.; 1777 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 10000000000000000000. : 1000000000.;
1629#else 1778#else
1630 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.; 1779 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.;
1631#endif 1780#endif
1632 1781
1782 /* special treatment for negative arguments */
1783 if (ecb_expect_false (v < 0.))
1784 {
1785 ev_tstamp f = -ev_floor (-v);
1786
1787 return f - (f == v ? 0 : 1);
1788 }
1789
1633 /* argument too large for an unsigned long? */ 1790 /* argument too large for an unsigned long? then reduce it */
1634 if (expect_false (v >= shift)) 1791 if (ecb_expect_false (v >= shift))
1635 { 1792 {
1636 ev_tstamp f; 1793 ev_tstamp f;
1637 1794
1638 if (v == v - 1.) 1795 if (v == v - 1.)
1639 return v; /* very large number */ 1796 return v; /* very large numbers are assumed to be integer */
1640 1797
1641 f = shift * ev_floor (v * (1. / shift)); 1798 f = shift * ev_floor (v * (1. / shift));
1642 return f + ev_floor (v - f); 1799 return f + ev_floor (v - f);
1643 } 1800 }
1644 1801
1645 /* special treatment for negative args? */
1646 if (expect_false (v < 0.))
1647 {
1648 ev_tstamp f = -ev_floor (-v);
1649
1650 return f - (f == v ? 0 : 1);
1651 }
1652
1653 /* fits into an unsigned long */ 1802 /* fits into an unsigned long */
1654 return (unsigned long)v; 1803 return (unsigned long)v;
1655} 1804}
1656 1805
1657#endif 1806#endif
1660 1809
1661#ifdef __linux 1810#ifdef __linux
1662# include <sys/utsname.h> 1811# include <sys/utsname.h>
1663#endif 1812#endif
1664 1813
1665noinline ecb_cold 1814ecb_noinline ecb_cold
1666static unsigned int 1815static unsigned int
1667ev_linux_version (void) 1816ev_linux_version (void)
1668{ 1817{
1669#ifdef __linux 1818#ifdef __linux
1670 unsigned int v = 0; 1819 unsigned int v = 0;
1700} 1849}
1701 1850
1702/*****************************************************************************/ 1851/*****************************************************************************/
1703 1852
1704#if EV_AVOID_STDIO 1853#if EV_AVOID_STDIO
1705noinline ecb_cold 1854ecb_noinline ecb_cold
1706static void 1855static void
1707ev_printerr (const char *msg) 1856ev_printerr (const char *msg)
1708{ 1857{
1709 write (STDERR_FILENO, msg, strlen (msg)); 1858 write (STDERR_FILENO, msg, strlen (msg));
1710} 1859}
1717ev_set_syserr_cb (void (*cb)(const char *msg) EV_NOEXCEPT) EV_NOEXCEPT 1866ev_set_syserr_cb (void (*cb)(const char *msg) EV_NOEXCEPT) EV_NOEXCEPT
1718{ 1867{
1719 syserr_cb = cb; 1868 syserr_cb = cb;
1720} 1869}
1721 1870
1722noinline ecb_cold 1871ecb_noinline ecb_cold
1723static void 1872static void
1724ev_syserr (const char *msg) 1873ev_syserr (const char *msg)
1725{ 1874{
1726 if (!msg) 1875 if (!msg)
1727 msg = "(libev) system error"; 1876 msg = "(libev) system error";
1799{ 1948{
1800 WL head; 1949 WL head;
1801 unsigned char events; /* the events watched for */ 1950 unsigned char events; /* the events watched for */
1802 unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */ 1951 unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */
1803 unsigned char emask; /* some backends store the actual kernel mask in here */ 1952 unsigned char emask; /* some backends store the actual kernel mask in here */
1804 unsigned char unused; 1953 unsigned char eflags; /* flags field for use by backends */
1805#if EV_USE_EPOLL 1954#if EV_USE_EPOLL
1806 unsigned int egen; /* generation counter to counter epoll bugs */ 1955 unsigned int egen; /* generation counter to counter epoll bugs */
1807#endif 1956#endif
1808#if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP 1957#if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP
1809 SOCKET handle; 1958 SOCKET handle;
1863 static struct ev_loop default_loop_struct; 2012 static struct ev_loop default_loop_struct;
1864 EV_API_DECL struct ev_loop *ev_default_loop_ptr = 0; /* needs to be initialised to make it a definition despite extern */ 2013 EV_API_DECL struct ev_loop *ev_default_loop_ptr = 0; /* needs to be initialised to make it a definition despite extern */
1865 2014
1866#else 2015#else
1867 2016
1868 EV_API_DECL ev_tstamp ev_rt_now = 0; /* needs to be initialised to make it a definition despite extern */ 2017 EV_API_DECL ev_tstamp ev_rt_now = EV_TS_CONST (0.); /* needs to be initialised to make it a definition despite extern */
1869 #define VAR(name,decl) static decl; 2018 #define VAR(name,decl) static decl;
1870 #include "ev_vars.h" 2019 #include "ev_vars.h"
1871 #undef VAR 2020 #undef VAR
1872 2021
1873 static int ev_default_loop_ptr; 2022 static int ev_default_loop_ptr;
1874 2023
1875#endif 2024#endif
1876 2025
1877#if EV_FEATURE_API 2026#if EV_FEATURE_API
1878# define EV_RELEASE_CB if (expect_false (release_cb)) release_cb (EV_A) 2027# define EV_RELEASE_CB if (ecb_expect_false (release_cb)) release_cb (EV_A)
1879# define EV_ACQUIRE_CB if (expect_false (acquire_cb)) acquire_cb (EV_A) 2028# define EV_ACQUIRE_CB if (ecb_expect_false (acquire_cb)) acquire_cb (EV_A)
1880# define EV_INVOKE_PENDING invoke_cb (EV_A) 2029# define EV_INVOKE_PENDING invoke_cb (EV_A)
1881#else 2030#else
1882# define EV_RELEASE_CB (void)0 2031# define EV_RELEASE_CB (void)0
1883# define EV_ACQUIRE_CB (void)0 2032# define EV_ACQUIRE_CB (void)0
1884# define EV_INVOKE_PENDING ev_invoke_pending (EV_A) 2033# define EV_INVOKE_PENDING ev_invoke_pending (EV_A)
1891#ifndef EV_HAVE_EV_TIME 2040#ifndef EV_HAVE_EV_TIME
1892ev_tstamp 2041ev_tstamp
1893ev_time (void) EV_NOEXCEPT 2042ev_time (void) EV_NOEXCEPT
1894{ 2043{
1895#if EV_USE_REALTIME 2044#if EV_USE_REALTIME
1896 if (expect_true (have_realtime)) 2045 if (ecb_expect_true (have_realtime))
1897 { 2046 {
1898 struct timespec ts; 2047 struct timespec ts;
1899 clock_gettime (CLOCK_REALTIME, &ts); 2048 clock_gettime (CLOCK_REALTIME, &ts);
1900 return ts.tv_sec + ts.tv_nsec * 1e-9; 2049 return EV_TS_GET (ts);
1901 } 2050 }
1902#endif 2051#endif
1903 2052
2053 {
1904 struct timeval tv; 2054 struct timeval tv;
1905 gettimeofday (&tv, 0); 2055 gettimeofday (&tv, 0);
1906 return tv.tv_sec + tv.tv_usec * 1e-6; 2056 return EV_TV_GET (tv);
2057 }
1907} 2058}
1908#endif 2059#endif
1909 2060
1910inline_size ev_tstamp 2061inline_size ev_tstamp
1911get_clock (void) 2062get_clock (void)
1912{ 2063{
1913#if EV_USE_MONOTONIC 2064#if EV_USE_MONOTONIC
1914 if (expect_true (have_monotonic)) 2065 if (ecb_expect_true (have_monotonic))
1915 { 2066 {
1916 struct timespec ts; 2067 struct timespec ts;
1917 clock_gettime (CLOCK_MONOTONIC, &ts); 2068 clock_gettime (CLOCK_MONOTONIC, &ts);
1918 return ts.tv_sec + ts.tv_nsec * 1e-9; 2069 return EV_TS_GET (ts);
1919 } 2070 }
1920#endif 2071#endif
1921 2072
1922 return ev_time (); 2073 return ev_time ();
1923} 2074}
1931#endif 2082#endif
1932 2083
1933void 2084void
1934ev_sleep (ev_tstamp delay) EV_NOEXCEPT 2085ev_sleep (ev_tstamp delay) EV_NOEXCEPT
1935{ 2086{
1936 if (delay > 0.) 2087 if (delay > EV_TS_CONST (0.))
1937 { 2088 {
1938#if EV_USE_NANOSLEEP 2089#if EV_USE_NANOSLEEP
1939 struct timespec ts; 2090 struct timespec ts;
1940 2091
1941 EV_TS_SET (ts, delay); 2092 EV_TS_SET (ts, delay);
1942 nanosleep (&ts, 0); 2093 nanosleep (&ts, 0);
1943#elif defined _WIN32 2094#elif defined _WIN32
1944 /* maybe this should round up, as ms is very low resolution */ 2095 /* maybe this should round up, as ms is very low resolution */
1945 /* compared to select (µs) or nanosleep (ns) */ 2096 /* compared to select (µs) or nanosleep (ns) */
1946 Sleep ((unsigned long)(delay * 1e3)); 2097 Sleep ((unsigned long)(EV_TS_TO_MSEC (delay)));
1947#else 2098#else
1948 struct timeval tv; 2099 struct timeval tv;
1949 2100
1950 /* here we rely on sys/time.h + sys/types.h + unistd.h providing select */ 2101 /* here we rely on sys/time.h + sys/types.h + unistd.h providing select */
1951 /* something not guaranteed by newer posix versions, but guaranteed */ 2102 /* something not guaranteed by newer posix versions, but guaranteed */
1981 } 2132 }
1982 2133
1983 return ncur; 2134 return ncur;
1984} 2135}
1985 2136
1986noinline ecb_cold 2137ecb_noinline ecb_cold
1987static void * 2138static void *
1988array_realloc (int elem, void *base, int *cur, int cnt) 2139array_realloc (int elem, void *base, int *cur, int cnt)
1989{ 2140{
1990 *cur = array_nextsize (elem, *cur, cnt); 2141 *cur = array_nextsize (elem, *cur, cnt);
1991 return ev_realloc (base, elem * *cur); 2142 return ev_realloc (base, elem * *cur);
1992} 2143}
1993 2144
1994#define array_needsize_noinit(base,count) 2145#define array_needsize_noinit(base,offset,count)
1995 2146
1996#define array_needsize_zerofill(base,count) \ 2147#define array_needsize_zerofill(base,offset,count) \
1997 memset ((void *)(base), 0, sizeof (*(base)) * (count)) 2148 memset ((void *)(base + offset), 0, sizeof (*(base)) * (count))
1998 2149
1999#define array_needsize(type,base,cur,cnt,init) \ 2150#define array_needsize(type,base,cur,cnt,init) \
2000 if (expect_false ((cnt) > (cur))) \ 2151 if (ecb_expect_false ((cnt) > (cur))) \
2001 { \ 2152 { \
2002 ecb_unused int ocur_ = (cur); \ 2153 ecb_unused int ocur_ = (cur); \
2003 (base) = (type *)array_realloc \ 2154 (base) = (type *)array_realloc \
2004 (sizeof (type), (base), &(cur), (cnt)); \ 2155 (sizeof (type), (base), &(cur), (cnt)); \
2005 init ((base) + (ocur_), (cur) - ocur_); \ 2156 init ((base), ocur_, ((cur) - ocur_)); \
2006 } 2157 }
2007 2158
2008#if 0 2159#if 0
2009#define array_slim(type,stem) \ 2160#define array_slim(type,stem) \
2010 if (stem ## max < array_roundsize (stem ## cnt >> 2)) \ 2161 if (stem ## max < array_roundsize (stem ## cnt >> 2)) \
2019 ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0; stem ## s idx = 0 2170 ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0; stem ## s idx = 0
2020 2171
2021/*****************************************************************************/ 2172/*****************************************************************************/
2022 2173
2023/* dummy callback for pending events */ 2174/* dummy callback for pending events */
2024noinline 2175ecb_noinline
2025static void 2176static void
2026pendingcb (EV_P_ ev_prepare *w, int revents) 2177pendingcb (EV_P_ ev_prepare *w, int revents)
2027{ 2178{
2028} 2179}
2029 2180
2030noinline 2181ecb_noinline
2031void 2182void
2032ev_feed_event (EV_P_ void *w, int revents) EV_NOEXCEPT 2183ev_feed_event (EV_P_ void *w, int revents) EV_NOEXCEPT
2033{ 2184{
2034 W w_ = (W)w; 2185 W w_ = (W)w;
2035 int pri = ABSPRI (w_); 2186 int pri = ABSPRI (w_);
2036 2187
2037 if (expect_false (w_->pending)) 2188 if (ecb_expect_false (w_->pending))
2038 pendings [pri][w_->pending - 1].events |= revents; 2189 pendings [pri][w_->pending - 1].events |= revents;
2039 else 2190 else
2040 { 2191 {
2041 w_->pending = ++pendingcnt [pri]; 2192 w_->pending = ++pendingcnt [pri];
2042 array_needsize (ANPENDING, pendings [pri], pendingmax [pri], w_->pending, array_needsize_noinit); 2193 array_needsize (ANPENDING, pendings [pri], pendingmax [pri], w_->pending, array_needsize_noinit);
2093inline_speed void 2244inline_speed void
2094fd_event (EV_P_ int fd, int revents) 2245fd_event (EV_P_ int fd, int revents)
2095{ 2246{
2096 ANFD *anfd = anfds + fd; 2247 ANFD *anfd = anfds + fd;
2097 2248
2098 if (expect_true (!anfd->reify)) 2249 if (ecb_expect_true (!anfd->reify))
2099 fd_event_nocheck (EV_A_ fd, revents); 2250 fd_event_nocheck (EV_A_ fd, revents);
2100} 2251}
2101 2252
2102void 2253void
2103ev_feed_fd_event (EV_P_ int fd, int revents) EV_NOEXCEPT 2254ev_feed_fd_event (EV_P_ int fd, int revents) EV_NOEXCEPT
2145 ev_io *w; 2296 ev_io *w;
2146 2297
2147 unsigned char o_events = anfd->events; 2298 unsigned char o_events = anfd->events;
2148 unsigned char o_reify = anfd->reify; 2299 unsigned char o_reify = anfd->reify;
2149 2300
2150 anfd->reify = 0; 2301 anfd->reify = 0;
2151 2302
2152 /*if (expect_true (o_reify & EV_ANFD_REIFY)) probably a deoptimisation */ 2303 /*if (ecb_expect_true (o_reify & EV_ANFD_REIFY)) probably a deoptimisation */
2153 { 2304 {
2154 anfd->events = 0; 2305 anfd->events = 0;
2155 2306
2156 for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next) 2307 for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next)
2157 anfd->events |= (unsigned char)w->events; 2308 anfd->events |= (unsigned char)w->events;
2173fd_change (EV_P_ int fd, int flags) 2324fd_change (EV_P_ int fd, int flags)
2174{ 2325{
2175 unsigned char reify = anfds [fd].reify; 2326 unsigned char reify = anfds [fd].reify;
2176 anfds [fd].reify |= flags; 2327 anfds [fd].reify |= flags;
2177 2328
2178 if (expect_true (!reify)) 2329 if (ecb_expect_true (!reify))
2179 { 2330 {
2180 ++fdchangecnt; 2331 ++fdchangecnt;
2181 array_needsize (int, fdchanges, fdchangemax, fdchangecnt, array_needsize_noinit); 2332 array_needsize (int, fdchanges, fdchangemax, fdchangecnt, array_needsize_noinit);
2182 fdchanges [fdchangecnt - 1] = fd; 2333 fdchanges [fdchangecnt - 1] = fd;
2183 } 2334 }
2206 return fcntl (fd, F_GETFD) != -1; 2357 return fcntl (fd, F_GETFD) != -1;
2207#endif 2358#endif
2208} 2359}
2209 2360
2210/* called on EBADF to verify fds */ 2361/* called on EBADF to verify fds */
2211noinline ecb_cold 2362ecb_noinline ecb_cold
2212static void 2363static void
2213fd_ebadf (EV_P) 2364fd_ebadf (EV_P)
2214{ 2365{
2215 int fd; 2366 int fd;
2216 2367
2219 if (!fd_valid (fd) && errno == EBADF) 2370 if (!fd_valid (fd) && errno == EBADF)
2220 fd_kill (EV_A_ fd); 2371 fd_kill (EV_A_ fd);
2221} 2372}
2222 2373
2223/* called on ENOMEM in select/poll to kill some fds and retry */ 2374/* called on ENOMEM in select/poll to kill some fds and retry */
2224noinline ecb_cold 2375ecb_noinline ecb_cold
2225static void 2376static void
2226fd_enomem (EV_P) 2377fd_enomem (EV_P)
2227{ 2378{
2228 int fd; 2379 int fd;
2229 2380
2234 break; 2385 break;
2235 } 2386 }
2236} 2387}
2237 2388
2238/* usually called after fork if backend needs to re-arm all fds from scratch */ 2389/* usually called after fork if backend needs to re-arm all fds from scratch */
2239noinline 2390ecb_noinline
2240static void 2391static void
2241fd_rearm_all (EV_P) 2392fd_rearm_all (EV_P)
2242{ 2393{
2243 int fd; 2394 int fd;
2244 2395
2298 ev_tstamp minat; 2449 ev_tstamp minat;
2299 ANHE *minpos; 2450 ANHE *minpos;
2300 ANHE *pos = heap + DHEAP * (k - HEAP0) + HEAP0 + 1; 2451 ANHE *pos = heap + DHEAP * (k - HEAP0) + HEAP0 + 1;
2301 2452
2302 /* find minimum child */ 2453 /* find minimum child */
2303 if (expect_true (pos + DHEAP - 1 < E)) 2454 if (ecb_expect_true (pos + DHEAP - 1 < E))
2304 { 2455 {
2305 /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos)); 2456 /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
2306 if ( ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos)); 2457 if ( minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));
2307 if ( ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos)); 2458 if ( minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));
2308 if ( ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos)); 2459 if ( minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));
2309 } 2460 }
2310 else if (pos < E) 2461 else if (pos < E)
2311 { 2462 {
2312 /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos)); 2463 /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
2313 if (pos + 1 < E && ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos)); 2464 if (pos + 1 < E && minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));
2314 if (pos + 2 < E && ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos)); 2465 if (pos + 2 < E && minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));
2315 if (pos + 3 < E && ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos)); 2466 if (pos + 3 < E && minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));
2316 } 2467 }
2317 else 2468 else
2318 break; 2469 break;
2319 2470
2320 if (ANHE_at (he) <= minat) 2471 if (ANHE_at (he) <= minat)
2328 2479
2329 heap [k] = he; 2480 heap [k] = he;
2330 ev_active (ANHE_w (he)) = k; 2481 ev_active (ANHE_w (he)) = k;
2331} 2482}
2332 2483
2333#else /* 4HEAP */ 2484#else /* not 4HEAP */
2334 2485
2335#define HEAP0 1 2486#define HEAP0 1
2336#define HPARENT(k) ((k) >> 1) 2487#define HPARENT(k) ((k) >> 1)
2337#define UPHEAP_DONE(p,k) (!(p)) 2488#define UPHEAP_DONE(p,k) (!(p))
2338 2489
2426 2577
2427/*****************************************************************************/ 2578/*****************************************************************************/
2428 2579
2429#if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE 2580#if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
2430 2581
2431noinline ecb_cold 2582ecb_noinline ecb_cold
2432static void 2583static void
2433evpipe_init (EV_P) 2584evpipe_init (EV_P)
2434{ 2585{
2435 if (!ev_is_active (&pipe_w)) 2586 if (!ev_is_active (&pipe_w))
2436 { 2587 {
2477inline_speed void 2628inline_speed void
2478evpipe_write (EV_P_ EV_ATOMIC_T *flag) 2629evpipe_write (EV_P_ EV_ATOMIC_T *flag)
2479{ 2630{
2480 ECB_MEMORY_FENCE; /* push out the write before this function was called, acquire flag */ 2631 ECB_MEMORY_FENCE; /* push out the write before this function was called, acquire flag */
2481 2632
2482 if (expect_true (*flag)) 2633 if (ecb_expect_true (*flag))
2483 return; 2634 return;
2484 2635
2485 *flag = 1; 2636 *flag = 1;
2486 ECB_MEMORY_FENCE_RELEASE; /* make sure flag is visible before the wakeup */ 2637 ECB_MEMORY_FENCE_RELEASE; /* make sure flag is visible before the wakeup */
2487 2638
2564 sig_pending = 0; 2715 sig_pending = 0;
2565 2716
2566 ECB_MEMORY_FENCE; 2717 ECB_MEMORY_FENCE;
2567 2718
2568 for (i = EV_NSIG - 1; i--; ) 2719 for (i = EV_NSIG - 1; i--; )
2569 if (expect_false (signals [i].pending)) 2720 if (ecb_expect_false (signals [i].pending))
2570 ev_feed_signal_event (EV_A_ i + 1); 2721 ev_feed_signal_event (EV_A_ i + 1);
2571 } 2722 }
2572#endif 2723#endif
2573 2724
2574#if EV_ASYNC_ENABLE 2725#if EV_ASYNC_ENABLE
2615#endif 2766#endif
2616 2767
2617 ev_feed_signal (signum); 2768 ev_feed_signal (signum);
2618} 2769}
2619 2770
2620noinline 2771ecb_noinline
2621void 2772void
2622ev_feed_signal_event (EV_P_ int signum) EV_NOEXCEPT 2773ev_feed_signal_event (EV_P_ int signum) EV_NOEXCEPT
2623{ 2774{
2624 WL w; 2775 WL w;
2625 2776
2626 if (expect_false (signum <= 0 || signum >= EV_NSIG)) 2777 if (ecb_expect_false (signum <= 0 || signum >= EV_NSIG))
2627 return; 2778 return;
2628 2779
2629 --signum; 2780 --signum;
2630 2781
2631#if EV_MULTIPLICITY 2782#if EV_MULTIPLICITY
2632 /* it is permissible to try to feed a signal to the wrong loop */ 2783 /* it is permissible to try to feed a signal to the wrong loop */
2633 /* or, likely more useful, feeding a signal nobody is waiting for */ 2784 /* or, likely more useful, feeding a signal nobody is waiting for */
2634 2785
2635 if (expect_false (signals [signum].loop != EV_A)) 2786 if (ecb_expect_false (signals [signum].loop != EV_A))
2636 return; 2787 return;
2637#endif 2788#endif
2638 2789
2639 signals [signum].pending = 0; 2790 signals [signum].pending = 0;
2640 ECB_MEMORY_FENCE_RELEASE; 2791 ECB_MEMORY_FENCE_RELEASE;
2724 2875
2725#endif 2876#endif
2726 2877
2727/*****************************************************************************/ 2878/*****************************************************************************/
2728 2879
2880#if EV_USE_TIMERFD
2881
2882static void periodics_reschedule (EV_P);
2883
2884static void
2885timerfdcb (EV_P_ ev_io *iow, int revents)
2886{
2887 struct itimerspec its = { 0 };
2888
2889 /* since we can't easily come zup with a (portable) maximum value of time_t,
2890 * we wake up once per month, which hopefully is rare enough to not
2891 * be a problem. */
2892 its.it_value.tv_sec = ev_rt_now + 86400 * 30;
2893 timerfd_settime (timerfd, TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET, &its, 0);
2894
2895 ev_rt_now = ev_time ();
2896 /* periodics_reschedule only needs ev_rt_now */
2897 /* but maybe in the future we want the full treatment. */
2898 /*
2899 now_floor = EV_TS_CONST (0.);
2900 time_update (EV_A_ EV_TSTAMP_HUGE);
2901 */
2902 periodics_reschedule (EV_A);
2903}
2904
2905ecb_noinline ecb_cold
2906static void
2907evtimerfd_init (EV_P)
2908{
2909 if (!ev_is_active (&timerfd_w))
2910 {
2911 timerfd = timerfd_create (CLOCK_REALTIME, TFD_NONBLOCK | TFD_CLOEXEC);
2912
2913 if (timerfd >= 0)
2914 {
2915 fd_intern (timerfd); /* just to be sure */
2916
2917 ev_io_init (&timerfd_w, timerfdcb, timerfd, EV_READ);
2918 ev_set_priority (&sigfd_w, EV_MINPRI);
2919 ev_io_start (EV_A_ &timerfd_w);
2920 ev_unref (EV_A); /* watcher should not keep loop alive */
2921
2922 /* (re-) arm timer */
2923 timerfdcb (EV_A_ 0, 0);
2924 }
2925 }
2926}
2927
2928#endif
2929
2930/*****************************************************************************/
2931
2729#if EV_USE_IOCP 2932#if EV_USE_IOCP
2730# include "ev_iocp.c" 2933# include "ev_iocp.c"
2731#endif 2934#endif
2732#if EV_USE_PORT 2935#if EV_USE_PORT
2733# include "ev_port.c" 2936# include "ev_port.c"
2738#if EV_USE_EPOLL 2941#if EV_USE_EPOLL
2739# include "ev_epoll.c" 2942# include "ev_epoll.c"
2740#endif 2943#endif
2741#if EV_USE_LINUXAIO 2944#if EV_USE_LINUXAIO
2742# include "ev_linuxaio.c" 2945# include "ev_linuxaio.c"
2946#endif
2947#if EV_USE_IOURING
2948# include "ev_iouring.c"
2743#endif 2949#endif
2744#if EV_USE_POLL 2950#if EV_USE_POLL
2745# include "ev_poll.c" 2951# include "ev_poll.c"
2746#endif 2952#endif
2747#if EV_USE_SELECT 2953#if EV_USE_SELECT
2780 2986
2781 if (EV_USE_PORT ) flags |= EVBACKEND_PORT; 2987 if (EV_USE_PORT ) flags |= EVBACKEND_PORT;
2782 if (EV_USE_KQUEUE ) flags |= EVBACKEND_KQUEUE; 2988 if (EV_USE_KQUEUE ) flags |= EVBACKEND_KQUEUE;
2783 if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL; 2989 if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL;
2784 if (EV_USE_LINUXAIO) flags |= EVBACKEND_LINUXAIO; 2990 if (EV_USE_LINUXAIO) flags |= EVBACKEND_LINUXAIO;
2991 if (EV_USE_IOURING ) flags |= EVBACKEND_IOURING;
2785 if (EV_USE_POLL ) flags |= EVBACKEND_POLL; 2992 if (EV_USE_POLL ) flags |= EVBACKEND_POLL;
2786 if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT; 2993 if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT;
2787 2994
2788 return flags; 2995 return flags;
2789} 2996}
2810 3017
2811 /* TODO: linuxaio is very experimental */ 3018 /* TODO: linuxaio is very experimental */
2812#if !EV_RECOMMEND_LINUXAIO 3019#if !EV_RECOMMEND_LINUXAIO
2813 flags &= ~EVBACKEND_LINUXAIO; 3020 flags &= ~EVBACKEND_LINUXAIO;
2814#endif 3021#endif
3022 /* TODO: linuxaio is super experimental */
3023#if !EV_RECOMMEND_IOURING
3024 flags &= ~EVBACKEND_IOURING;
3025#endif
2815 3026
2816 return flags; 3027 return flags;
2817} 3028}
2818 3029
2819ecb_cold 3030ecb_cold
2824 3035
2825 /* epoll embeddability broken on all linux versions up to at least 2.6.23 */ 3036 /* epoll embeddability broken on all linux versions up to at least 2.6.23 */
2826 if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */ 3037 if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */
2827 flags &= ~EVBACKEND_EPOLL; 3038 flags &= ~EVBACKEND_EPOLL;
2828 3039
3040 /* EVBACKEND_LINUXAIO is theoretically embeddable, but suffers from a performance overhead */
3041
3042 /* EVBACKEND_IOURING is practically embeddable, but the current implementation is not
3043 * because our backend_fd is the epoll fd we need as fallback.
3044 * if the kernel ever is fixed, this might change...
3045 */
3046
2829 return flags; 3047 return flags;
2830} 3048}
2831 3049
2832unsigned int 3050unsigned int
2833ev_backend (EV_P) EV_NOEXCEPT 3051ev_backend (EV_P) EV_NOEXCEPT
2885 acquire_cb = acquire; 3103 acquire_cb = acquire;
2886} 3104}
2887#endif 3105#endif
2888 3106
2889/* initialise a loop structure, must be zero-initialised */ 3107/* initialise a loop structure, must be zero-initialised */
2890noinline ecb_cold 3108ecb_noinline ecb_cold
2891static void 3109static void
2892loop_init (EV_P_ unsigned int flags) EV_NOEXCEPT 3110loop_init (EV_P_ unsigned int flags) EV_NOEXCEPT
2893{ 3111{
2894 if (!backend) 3112 if (!backend)
2895 { 3113 {
2950 fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2; 3168 fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2;
2951#endif 3169#endif
2952#if EV_USE_SIGNALFD 3170#if EV_USE_SIGNALFD
2953 sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1; 3171 sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1;
2954#endif 3172#endif
3173#if EV_USE_TIMERFD
3174 timerfd = flags & EVFLAG_NOTIMERFD ? -1 : -2;
3175#endif
2955 3176
2956 if (!(flags & EVBACKEND_MASK)) 3177 if (!(flags & EVBACKEND_MASK))
2957 flags |= ev_recommended_backends (); 3178 flags |= ev_recommended_backends ();
2958 3179
2959#if EV_USE_IOCP 3180#if EV_USE_IOCP
2962#if EV_USE_PORT 3183#if EV_USE_PORT
2963 if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags); 3184 if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags);
2964#endif 3185#endif
2965#if EV_USE_KQUEUE 3186#if EV_USE_KQUEUE
2966 if (!backend && (flags & EVBACKEND_KQUEUE )) backend = kqueue_init (EV_A_ flags); 3187 if (!backend && (flags & EVBACKEND_KQUEUE )) backend = kqueue_init (EV_A_ flags);
3188#endif
3189#if EV_USE_IOURING
3190 if (!backend && (flags & EVBACKEND_IOURING )) backend = iouring_init (EV_A_ flags);
2967#endif 3191#endif
2968#if EV_USE_LINUXAIO 3192#if EV_USE_LINUXAIO
2969 if (!backend && (flags & EVBACKEND_LINUXAIO)) backend = linuxaio_init (EV_A_ flags); 3193 if (!backend && (flags & EVBACKEND_LINUXAIO)) backend = linuxaio_init (EV_A_ flags);
2970#endif 3194#endif
2971#if EV_USE_EPOLL 3195#if EV_USE_EPOLL
3000 return; 3224 return;
3001#endif 3225#endif
3002 3226
3003#if EV_CLEANUP_ENABLE 3227#if EV_CLEANUP_ENABLE
3004 /* queue cleanup watchers (and execute them) */ 3228 /* queue cleanup watchers (and execute them) */
3005 if (expect_false (cleanupcnt)) 3229 if (ecb_expect_false (cleanupcnt))
3006 { 3230 {
3007 queue_events (EV_A_ (W *)cleanups, cleanupcnt, EV_CLEANUP); 3231 queue_events (EV_A_ (W *)cleanups, cleanupcnt, EV_CLEANUP);
3008 EV_INVOKE_PENDING; 3232 EV_INVOKE_PENDING;
3009 } 3233 }
3010#endif 3234#endif
3029#if EV_USE_SIGNALFD 3253#if EV_USE_SIGNALFD
3030 if (ev_is_active (&sigfd_w)) 3254 if (ev_is_active (&sigfd_w))
3031 close (sigfd); 3255 close (sigfd);
3032#endif 3256#endif
3033 3257
3258#if EV_USE_TIMERFD
3259 if (ev_is_active (&timerfd_w))
3260 close (timerfd);
3261#endif
3262
3034#if EV_USE_INOTIFY 3263#if EV_USE_INOTIFY
3035 if (fs_fd >= 0) 3264 if (fs_fd >= 0)
3036 close (fs_fd); 3265 close (fs_fd);
3037#endif 3266#endif
3038 3267
3045#if EV_USE_PORT 3274#if EV_USE_PORT
3046 if (backend == EVBACKEND_PORT ) port_destroy (EV_A); 3275 if (backend == EVBACKEND_PORT ) port_destroy (EV_A);
3047#endif 3276#endif
3048#if EV_USE_KQUEUE 3277#if EV_USE_KQUEUE
3049 if (backend == EVBACKEND_KQUEUE ) kqueue_destroy (EV_A); 3278 if (backend == EVBACKEND_KQUEUE ) kqueue_destroy (EV_A);
3279#endif
3280#if EV_USE_IOURING
3281 if (backend == EVBACKEND_IOURING ) iouring_destroy (EV_A);
3050#endif 3282#endif
3051#if EV_USE_LINUXAIO 3283#if EV_USE_LINUXAIO
3052 if (backend == EVBACKEND_LINUXAIO) linuxaio_destroy (EV_A); 3284 if (backend == EVBACKEND_LINUXAIO) linuxaio_destroy (EV_A);
3053#endif 3285#endif
3054#if EV_USE_EPOLL 3286#if EV_USE_EPOLL
3113 if (backend == EVBACKEND_PORT ) port_fork (EV_A); 3345 if (backend == EVBACKEND_PORT ) port_fork (EV_A);
3114#endif 3346#endif
3115#if EV_USE_KQUEUE 3347#if EV_USE_KQUEUE
3116 if (backend == EVBACKEND_KQUEUE ) kqueue_fork (EV_A); 3348 if (backend == EVBACKEND_KQUEUE ) kqueue_fork (EV_A);
3117#endif 3349#endif
3350#if EV_USE_IOURING
3351 if (backend == EVBACKEND_IOURING ) iouring_fork (EV_A);
3352#endif
3118#if EV_USE_LINUXAIO 3353#if EV_USE_LINUXAIO
3119 if (backend == EVBACKEND_LINUXAIO) linuxaio_fork (EV_A); 3354 if (backend == EVBACKEND_LINUXAIO) linuxaio_fork (EV_A);
3120#endif 3355#endif
3121#if EV_USE_EPOLL 3356#if EV_USE_EPOLL
3122 if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A); 3357 if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A);
3123#endif 3358#endif
3124#if EV_USE_INOTIFY 3359#if EV_USE_INOTIFY
3125 infy_fork (EV_A); 3360 infy_fork (EV_A);
3126#endif 3361#endif
3127 3362
3363 if (postfork != 2)
3364 {
3365 #if EV_USE_SIGNALFD
3366 /* surprisingly, nothing needs to be done for signalfd, accoridng to docs, it does the right thing on fork */
3367 #endif
3368
3369 #if EV_USE_TIMERFD
3370 if (ev_is_active (&timerfd_w))
3371 {
3372 ev_ref (EV_A);
3373 ev_io_stop (EV_A_ &timerfd_w);
3374
3375 close (timerfd);
3376 timerfd = -2;
3377
3378 evtimerfd_init (EV_A);
3379 /* reschedule periodics, in case we missed something */
3380 ev_feed_event (EV_A_ &timerfd_w, EV_CUSTOM);
3381 }
3382 #endif
3383
3128#if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE 3384 #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
3129 if (ev_is_active (&pipe_w) && postfork != 2) 3385 if (ev_is_active (&pipe_w))
3130 { 3386 {
3131 /* pipe_write_wanted must be false now, so modifying fd vars should be safe */ 3387 /* pipe_write_wanted must be false now, so modifying fd vars should be safe */
3132 3388
3133 ev_ref (EV_A); 3389 ev_ref (EV_A);
3134 ev_io_stop (EV_A_ &pipe_w); 3390 ev_io_stop (EV_A_ &pipe_w);
3135 3391
3136 if (evpipe [0] >= 0) 3392 if (evpipe [0] >= 0)
3137 EV_WIN32_CLOSE_FD (evpipe [0]); 3393 EV_WIN32_CLOSE_FD (evpipe [0]);
3138 3394
3139 evpipe_init (EV_A); 3395 evpipe_init (EV_A);
3140 /* iterate over everything, in case we missed something before */ 3396 /* iterate over everything, in case we missed something before */
3141 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM); 3397 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
3398 }
3399 #endif
3142 } 3400 }
3143#endif
3144 3401
3145 postfork = 0; 3402 postfork = 0;
3146} 3403}
3147 3404
3148#if EV_MULTIPLICITY 3405#if EV_MULTIPLICITY
3164} 3421}
3165 3422
3166#endif /* multiplicity */ 3423#endif /* multiplicity */
3167 3424
3168#if EV_VERIFY 3425#if EV_VERIFY
3169noinline ecb_cold 3426ecb_noinline ecb_cold
3170static void 3427static void
3171verify_watcher (EV_P_ W w) 3428verify_watcher (EV_P_ W w)
3172{ 3429{
3173 assert (("libev: watcher has invalid priority", ABSPRI (w) >= 0 && ABSPRI (w) < NUMPRI)); 3430 assert (("libev: watcher has invalid priority", ABSPRI (w) >= 0 && ABSPRI (w) < NUMPRI));
3174 3431
3175 if (w->pending) 3432 if (w->pending)
3176 assert (("libev: pending watcher not on pending queue", pendings [ABSPRI (w)][w->pending - 1].w == w)); 3433 assert (("libev: pending watcher not on pending queue", pendings [ABSPRI (w)][w->pending - 1].w == w));
3177} 3434}
3178 3435
3179noinline ecb_cold 3436ecb_noinline ecb_cold
3180static void 3437static void
3181verify_heap (EV_P_ ANHE *heap, int N) 3438verify_heap (EV_P_ ANHE *heap, int N)
3182{ 3439{
3183 int i; 3440 int i;
3184 3441
3190 3447
3191 verify_watcher (EV_A_ (W)ANHE_w (heap [i])); 3448 verify_watcher (EV_A_ (W)ANHE_w (heap [i]));
3192 } 3449 }
3193} 3450}
3194 3451
3195noinline ecb_cold 3452ecb_noinline ecb_cold
3196static void 3453static void
3197array_verify (EV_P_ W *ws, int cnt) 3454array_verify (EV_P_ W *ws, int cnt)
3198{ 3455{
3199 while (cnt--) 3456 while (cnt--)
3200 { 3457 {
3349 count += pendingcnt [pri]; 3606 count += pendingcnt [pri];
3350 3607
3351 return count; 3608 return count;
3352} 3609}
3353 3610
3354noinline 3611ecb_noinline
3355void 3612void
3356ev_invoke_pending (EV_P) 3613ev_invoke_pending (EV_P)
3357{ 3614{
3358 pendingpri = NUMPRI; 3615 pendingpri = NUMPRI;
3359 3616
3378/* make idle watchers pending. this handles the "call-idle */ 3635/* make idle watchers pending. this handles the "call-idle */
3379/* only when higher priorities are idle" logic */ 3636/* only when higher priorities are idle" logic */
3380inline_size void 3637inline_size void
3381idle_reify (EV_P) 3638idle_reify (EV_P)
3382{ 3639{
3383 if (expect_false (idleall)) 3640 if (ecb_expect_false (idleall))
3384 { 3641 {
3385 int pri; 3642 int pri;
3386 3643
3387 for (pri = NUMPRI; pri--; ) 3644 for (pri = NUMPRI; pri--; )
3388 { 3645 {
3418 { 3675 {
3419 ev_at (w) += w->repeat; 3676 ev_at (w) += w->repeat;
3420 if (ev_at (w) < mn_now) 3677 if (ev_at (w) < mn_now)
3421 ev_at (w) = mn_now; 3678 ev_at (w) = mn_now;
3422 3679
3423 assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > 0.)); 3680 assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > EV_TS_CONST (0.)));
3424 3681
3425 ANHE_at_cache (timers [HEAP0]); 3682 ANHE_at_cache (timers [HEAP0]);
3426 downheap (timers, timercnt, HEAP0); 3683 downheap (timers, timercnt, HEAP0);
3427 } 3684 }
3428 else 3685 else
3437 } 3694 }
3438} 3695}
3439 3696
3440#if EV_PERIODIC_ENABLE 3697#if EV_PERIODIC_ENABLE
3441 3698
3442noinline 3699ecb_noinline
3443static void 3700static void
3444periodic_recalc (EV_P_ ev_periodic *w) 3701periodic_recalc (EV_P_ ev_periodic *w)
3445{ 3702{
3446 ev_tstamp interval = w->interval > MIN_INTERVAL ? w->interval : MIN_INTERVAL; 3703 ev_tstamp interval = w->interval > MIN_INTERVAL ? w->interval : MIN_INTERVAL;
3447 ev_tstamp at = w->offset + interval * ev_floor ((ev_rt_now - w->offset) / interval); 3704 ev_tstamp at = w->offset + interval * ev_floor ((ev_rt_now - w->offset) / interval);
3450 while (at <= ev_rt_now) 3707 while (at <= ev_rt_now)
3451 { 3708 {
3452 ev_tstamp nat = at + w->interval; 3709 ev_tstamp nat = at + w->interval;
3453 3710
3454 /* when resolution fails us, we use ev_rt_now */ 3711 /* when resolution fails us, we use ev_rt_now */
3455 if (expect_false (nat == at)) 3712 if (ecb_expect_false (nat == at))
3456 { 3713 {
3457 at = ev_rt_now; 3714 at = ev_rt_now;
3458 break; 3715 break;
3459 } 3716 }
3460 3717
3506 } 3763 }
3507} 3764}
3508 3765
3509/* simply recalculate all periodics */ 3766/* simply recalculate all periodics */
3510/* TODO: maybe ensure that at least one event happens when jumping forward? */ 3767/* TODO: maybe ensure that at least one event happens when jumping forward? */
3511noinline ecb_cold 3768ecb_noinline ecb_cold
3512static void 3769static void
3513periodics_reschedule (EV_P) 3770periodics_reschedule (EV_P)
3514{ 3771{
3515 int i; 3772 int i;
3516 3773
3530 reheap (periodics, periodiccnt); 3787 reheap (periodics, periodiccnt);
3531} 3788}
3532#endif 3789#endif
3533 3790
3534/* adjust all timers by a given offset */ 3791/* adjust all timers by a given offset */
3535noinline ecb_cold 3792ecb_noinline ecb_cold
3536static void 3793static void
3537timers_reschedule (EV_P_ ev_tstamp adjust) 3794timers_reschedule (EV_P_ ev_tstamp adjust)
3538{ 3795{
3539 int i; 3796 int i;
3540 3797
3550/* also detect if there was a timejump, and act accordingly */ 3807/* also detect if there was a timejump, and act accordingly */
3551inline_speed void 3808inline_speed void
3552time_update (EV_P_ ev_tstamp max_block) 3809time_update (EV_P_ ev_tstamp max_block)
3553{ 3810{
3554#if EV_USE_MONOTONIC 3811#if EV_USE_MONOTONIC
3555 if (expect_true (have_monotonic)) 3812 if (ecb_expect_true (have_monotonic))
3556 { 3813 {
3557 int i; 3814 int i;
3558 ev_tstamp odiff = rtmn_diff; 3815 ev_tstamp odiff = rtmn_diff;
3559 3816
3560 mn_now = get_clock (); 3817 mn_now = get_clock ();
3561 3818
3562 /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */ 3819 /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */
3563 /* interpolate in the meantime */ 3820 /* interpolate in the meantime */
3564 if (expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5)) 3821 if (ecb_expect_true (mn_now - now_floor < EV_TS_CONST (MIN_TIMEJUMP * .5)))
3565 { 3822 {
3566 ev_rt_now = rtmn_diff + mn_now; 3823 ev_rt_now = rtmn_diff + mn_now;
3567 return; 3824 return;
3568 } 3825 }
3569 3826
3583 ev_tstamp diff; 3840 ev_tstamp diff;
3584 rtmn_diff = ev_rt_now - mn_now; 3841 rtmn_diff = ev_rt_now - mn_now;
3585 3842
3586 diff = odiff - rtmn_diff; 3843 diff = odiff - rtmn_diff;
3587 3844
3588 if (expect_true ((diff < 0. ? -diff : diff) < MIN_TIMEJUMP)) 3845 if (ecb_expect_true ((diff < EV_TS_CONST (0.) ? -diff : diff) < EV_TS_CONST (MIN_TIMEJUMP)))
3589 return; /* all is well */ 3846 return; /* all is well */
3590 3847
3591 ev_rt_now = ev_time (); 3848 ev_rt_now = ev_time ();
3592 mn_now = get_clock (); 3849 mn_now = get_clock ();
3593 now_floor = mn_now; 3850 now_floor = mn_now;
3602 else 3859 else
3603#endif 3860#endif
3604 { 3861 {
3605 ev_rt_now = ev_time (); 3862 ev_rt_now = ev_time ();
3606 3863
3607 if (expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + MIN_TIMEJUMP)) 3864 if (ecb_expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + EV_TS_CONST (MIN_TIMEJUMP)))
3608 { 3865 {
3609 /* adjust timers. this is easy, as the offset is the same for all of them */ 3866 /* adjust timers. this is easy, as the offset is the same for all of them */
3610 timers_reschedule (EV_A_ ev_rt_now - mn_now); 3867 timers_reschedule (EV_A_ ev_rt_now - mn_now);
3611#if EV_PERIODIC_ENABLE 3868#if EV_PERIODIC_ENABLE
3612 periodics_reschedule (EV_A); 3869 periodics_reschedule (EV_A);
3635#if EV_VERIFY >= 2 3892#if EV_VERIFY >= 2
3636 ev_verify (EV_A); 3893 ev_verify (EV_A);
3637#endif 3894#endif
3638 3895
3639#ifndef _WIN32 3896#ifndef _WIN32
3640 if (expect_false (curpid)) /* penalise the forking check even more */ 3897 if (ecb_expect_false (curpid)) /* penalise the forking check even more */
3641 if (expect_false (getpid () != curpid)) 3898 if (ecb_expect_false (getpid () != curpid))
3642 { 3899 {
3643 curpid = getpid (); 3900 curpid = getpid ();
3644 postfork = 1; 3901 postfork = 1;
3645 } 3902 }
3646#endif 3903#endif
3647 3904
3648#if EV_FORK_ENABLE 3905#if EV_FORK_ENABLE
3649 /* we might have forked, so queue fork handlers */ 3906 /* we might have forked, so queue fork handlers */
3650 if (expect_false (postfork)) 3907 if (ecb_expect_false (postfork))
3651 if (forkcnt) 3908 if (forkcnt)
3652 { 3909 {
3653 queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK); 3910 queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK);
3654 EV_INVOKE_PENDING; 3911 EV_INVOKE_PENDING;
3655 } 3912 }
3656#endif 3913#endif
3657 3914
3658#if EV_PREPARE_ENABLE 3915#if EV_PREPARE_ENABLE
3659 /* queue prepare watchers (and execute them) */ 3916 /* queue prepare watchers (and execute them) */
3660 if (expect_false (preparecnt)) 3917 if (ecb_expect_false (preparecnt))
3661 { 3918 {
3662 queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE); 3919 queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE);
3663 EV_INVOKE_PENDING; 3920 EV_INVOKE_PENDING;
3664 } 3921 }
3665#endif 3922#endif
3666 3923
3667 if (expect_false (loop_done)) 3924 if (ecb_expect_false (loop_done))
3668 break; 3925 break;
3669 3926
3670 /* we might have forked, so reify kernel state if necessary */ 3927 /* we might have forked, so reify kernel state if necessary */
3671 if (expect_false (postfork)) 3928 if (ecb_expect_false (postfork))
3672 loop_fork (EV_A); 3929 loop_fork (EV_A);
3673 3930
3674 /* update fd-related kernel structures */ 3931 /* update fd-related kernel structures */
3675 fd_reify (EV_A); 3932 fd_reify (EV_A);
3676 3933
3681 3938
3682 /* remember old timestamp for io_blocktime calculation */ 3939 /* remember old timestamp for io_blocktime calculation */
3683 ev_tstamp prev_mn_now = mn_now; 3940 ev_tstamp prev_mn_now = mn_now;
3684 3941
3685 /* update time to cancel out callback processing overhead */ 3942 /* update time to cancel out callback processing overhead */
3686 time_update (EV_A_ 1e100); 3943 time_update (EV_A_ EV_TS_CONST (EV_TSTAMP_HUGE));
3687 3944
3688 /* from now on, we want a pipe-wake-up */ 3945 /* from now on, we want a pipe-wake-up */
3689 pipe_write_wanted = 1; 3946 pipe_write_wanted = 1;
3690 3947
3691 ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */ 3948 ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */
3692 3949
3693 if (expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped))) 3950 if (ecb_expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped)))
3694 { 3951 {
3695 waittime = MAX_BLOCKTIME; 3952 waittime = EV_TS_CONST (MAX_BLOCKTIME);
3696 3953
3697 if (timercnt) 3954 if (timercnt)
3698 { 3955 {
3699 ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now; 3956 ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now;
3700 if (waittime > to) waittime = to; 3957 if (waittime > to) waittime = to;
3707 if (waittime > to) waittime = to; 3964 if (waittime > to) waittime = to;
3708 } 3965 }
3709#endif 3966#endif
3710 3967
3711 /* don't let timeouts decrease the waittime below timeout_blocktime */ 3968 /* don't let timeouts decrease the waittime below timeout_blocktime */
3712 if (expect_false (waittime < timeout_blocktime)) 3969 if (ecb_expect_false (waittime < timeout_blocktime))
3713 waittime = timeout_blocktime; 3970 waittime = timeout_blocktime;
3714 3971
3715 /* at this point, we NEED to wait, so we have to ensure */ 3972 /* now there are two more special cases left, either we have
3716 /* to pass a minimum nonzero value to the backend */ 3973 * already-expired timers, so we should not sleep, or we have timers
3974 * that expire very soon, in which case we need to wait for a minimum
3975 * amount of time for some event loop backends.
3976 */
3717 if (expect_false (waittime < backend_mintime)) 3977 if (ecb_expect_false (waittime < backend_mintime))
3978 waittime = waittime <= EV_TS_CONST (0.)
3979 ? EV_TS_CONST (0.)
3718 waittime = backend_mintime; 3980 : backend_mintime;
3719 3981
3720 /* extra check because io_blocktime is commonly 0 */ 3982 /* extra check because io_blocktime is commonly 0 */
3721 if (expect_false (io_blocktime)) 3983 if (ecb_expect_false (io_blocktime))
3722 { 3984 {
3723 sleeptime = io_blocktime - (mn_now - prev_mn_now); 3985 sleeptime = io_blocktime - (mn_now - prev_mn_now);
3724 3986
3725 if (sleeptime > waittime - backend_mintime) 3987 if (sleeptime > waittime - backend_mintime)
3726 sleeptime = waittime - backend_mintime; 3988 sleeptime = waittime - backend_mintime;
3727 3989
3728 if (expect_true (sleeptime > 0.)) 3990 if (ecb_expect_true (sleeptime > EV_TS_CONST (0.)))
3729 { 3991 {
3730 ev_sleep (sleeptime); 3992 ev_sleep (sleeptime);
3731 waittime -= sleeptime; 3993 waittime -= sleeptime;
3732 } 3994 }
3733 } 3995 }
3747 { 4009 {
3748 assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w))); 4010 assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w)));
3749 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM); 4011 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
3750 } 4012 }
3751 4013
3752
3753 /* update ev_rt_now, do magic */ 4014 /* update ev_rt_now, do magic */
3754 time_update (EV_A_ waittime + sleeptime); 4015 time_update (EV_A_ waittime + sleeptime);
3755 } 4016 }
3756 4017
3757 /* queue pending timers and reschedule them */ 4018 /* queue pending timers and reschedule them */
3765 idle_reify (EV_A); 4026 idle_reify (EV_A);
3766#endif 4027#endif
3767 4028
3768#if EV_CHECK_ENABLE 4029#if EV_CHECK_ENABLE
3769 /* queue check watchers, to be executed first */ 4030 /* queue check watchers, to be executed first */
3770 if (expect_false (checkcnt)) 4031 if (ecb_expect_false (checkcnt))
3771 queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK); 4032 queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK);
3772#endif 4033#endif
3773 4034
3774 EV_INVOKE_PENDING; 4035 EV_INVOKE_PENDING;
3775 } 4036 }
3776 while (expect_true ( 4037 while (ecb_expect_true (
3777 activecnt 4038 activecnt
3778 && !loop_done 4039 && !loop_done
3779 && !(flags & (EVRUN_ONCE | EVRUN_NOWAIT)) 4040 && !(flags & (EVRUN_ONCE | EVRUN_NOWAIT))
3780 )); 4041 ));
3781 4042
3808} 4069}
3809 4070
3810void 4071void
3811ev_now_update (EV_P) EV_NOEXCEPT 4072ev_now_update (EV_P) EV_NOEXCEPT
3812{ 4073{
3813 time_update (EV_A_ 1e100); 4074 time_update (EV_A_ EV_TSTAMP_HUGE);
3814} 4075}
3815 4076
3816void 4077void
3817ev_suspend (EV_P) EV_NOEXCEPT 4078ev_suspend (EV_P) EV_NOEXCEPT
3818{ 4079{
3845inline_size void 4106inline_size void
3846wlist_del (WL *head, WL elem) 4107wlist_del (WL *head, WL elem)
3847{ 4108{
3848 while (*head) 4109 while (*head)
3849 { 4110 {
3850 if (expect_true (*head == elem)) 4111 if (ecb_expect_true (*head == elem))
3851 { 4112 {
3852 *head = elem->next; 4113 *head = elem->next;
3853 break; 4114 break;
3854 } 4115 }
3855 4116
3872ev_clear_pending (EV_P_ void *w) EV_NOEXCEPT 4133ev_clear_pending (EV_P_ void *w) EV_NOEXCEPT
3873{ 4134{
3874 W w_ = (W)w; 4135 W w_ = (W)w;
3875 int pending = w_->pending; 4136 int pending = w_->pending;
3876 4137
3877 if (expect_true (pending)) 4138 if (ecb_expect_true (pending))
3878 { 4139 {
3879 ANPENDING *p = pendings [ABSPRI (w_)] + pending - 1; 4140 ANPENDING *p = pendings [ABSPRI (w_)] + pending - 1;
3880 p->w = (W)&pending_w; 4141 p->w = (W)&pending_w;
3881 w_->pending = 0; 4142 w_->pending = 0;
3882 return p->events; 4143 return p->events;
3909 w->active = 0; 4170 w->active = 0;
3910} 4171}
3911 4172
3912/*****************************************************************************/ 4173/*****************************************************************************/
3913 4174
3914noinline 4175ecb_noinline
3915void 4176void
3916ev_io_start (EV_P_ ev_io *w) EV_NOEXCEPT 4177ev_io_start (EV_P_ ev_io *w) EV_NOEXCEPT
3917{ 4178{
3918 int fd = w->fd; 4179 int fd = w->fd;
3919 4180
3920 if (expect_false (ev_is_active (w))) 4181 if (ecb_expect_false (ev_is_active (w)))
3921 return; 4182 return;
3922 4183
3923 assert (("libev: ev_io_start called with negative fd", fd >= 0)); 4184 assert (("libev: ev_io_start called with negative fd", fd >= 0));
3924 assert (("libev: ev_io_start called with illegal event mask", !(w->events & ~(EV__IOFDSET | EV_READ | EV_WRITE)))); 4185 assert (("libev: ev_io_start called with illegal event mask", !(w->events & ~(EV__IOFDSET | EV_READ | EV_WRITE))));
3925 4186
4187#if EV_VERIFY >= 2
4188 assert (("libev: ev_io_start called on watcher with invalid fd", fd_valid (fd)));
4189#endif
3926 EV_FREQUENT_CHECK; 4190 EV_FREQUENT_CHECK;
3927 4191
3928 ev_start (EV_A_ (W)w, 1); 4192 ev_start (EV_A_ (W)w, 1);
3929 array_needsize (ANFD, anfds, anfdmax, fd + 1, array_needsize_zerofill); 4193 array_needsize (ANFD, anfds, anfdmax, fd + 1, array_needsize_zerofill);
3930 wlist_add (&anfds[fd].head, (WL)w); 4194 wlist_add (&anfds[fd].head, (WL)w);
3936 w->events &= ~EV__IOFDSET; 4200 w->events &= ~EV__IOFDSET;
3937 4201
3938 EV_FREQUENT_CHECK; 4202 EV_FREQUENT_CHECK;
3939} 4203}
3940 4204
3941noinline 4205ecb_noinline
3942void 4206void
3943ev_io_stop (EV_P_ ev_io *w) EV_NOEXCEPT 4207ev_io_stop (EV_P_ ev_io *w) EV_NOEXCEPT
3944{ 4208{
3945 clear_pending (EV_A_ (W)w); 4209 clear_pending (EV_A_ (W)w);
3946 if (expect_false (!ev_is_active (w))) 4210 if (ecb_expect_false (!ev_is_active (w)))
3947 return; 4211 return;
3948 4212
3949 assert (("libev: ev_io_stop called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax)); 4213 assert (("libev: ev_io_stop called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax));
3950 4214
4215#if EV_VERIFY >= 2
4216 assert (("libev: ev_io_stop called on watcher with invalid fd", fd_valid (w->fd)));
4217#endif
3951 EV_FREQUENT_CHECK; 4218 EV_FREQUENT_CHECK;
3952 4219
3953 wlist_del (&anfds[w->fd].head, (WL)w); 4220 wlist_del (&anfds[w->fd].head, (WL)w);
3954 ev_stop (EV_A_ (W)w); 4221 ev_stop (EV_A_ (W)w);
3955 4222
3956 fd_change (EV_A_ w->fd, EV_ANFD_REIFY); 4223 fd_change (EV_A_ w->fd, EV_ANFD_REIFY);
3957 4224
3958 EV_FREQUENT_CHECK; 4225 EV_FREQUENT_CHECK;
3959} 4226}
3960 4227
3961noinline 4228ecb_noinline
3962void 4229void
3963ev_timer_start (EV_P_ ev_timer *w) EV_NOEXCEPT 4230ev_timer_start (EV_P_ ev_timer *w) EV_NOEXCEPT
3964{ 4231{
3965 if (expect_false (ev_is_active (w))) 4232 if (ecb_expect_false (ev_is_active (w)))
3966 return; 4233 return;
3967 4234
3968 ev_at (w) += mn_now; 4235 ev_at (w) += mn_now;
3969 4236
3970 assert (("libev: ev_timer_start called with negative timer repeat value", w->repeat >= 0.)); 4237 assert (("libev: ev_timer_start called with negative timer repeat value", w->repeat >= 0.));
3981 EV_FREQUENT_CHECK; 4248 EV_FREQUENT_CHECK;
3982 4249
3983 /*assert (("libev: internal timer heap corruption", timers [ev_active (w)] == (WT)w));*/ 4250 /*assert (("libev: internal timer heap corruption", timers [ev_active (w)] == (WT)w));*/
3984} 4251}
3985 4252
3986noinline 4253ecb_noinline
3987void 4254void
3988ev_timer_stop (EV_P_ ev_timer *w) EV_NOEXCEPT 4255ev_timer_stop (EV_P_ ev_timer *w) EV_NOEXCEPT
3989{ 4256{
3990 clear_pending (EV_A_ (W)w); 4257 clear_pending (EV_A_ (W)w);
3991 if (expect_false (!ev_is_active (w))) 4258 if (ecb_expect_false (!ev_is_active (w)))
3992 return; 4259 return;
3993 4260
3994 EV_FREQUENT_CHECK; 4261 EV_FREQUENT_CHECK;
3995 4262
3996 { 4263 {
3998 4265
3999 assert (("libev: internal timer heap corruption", ANHE_w (timers [active]) == (WT)w)); 4266 assert (("libev: internal timer heap corruption", ANHE_w (timers [active]) == (WT)w));
4000 4267
4001 --timercnt; 4268 --timercnt;
4002 4269
4003 if (expect_true (active < timercnt + HEAP0)) 4270 if (ecb_expect_true (active < timercnt + HEAP0))
4004 { 4271 {
4005 timers [active] = timers [timercnt + HEAP0]; 4272 timers [active] = timers [timercnt + HEAP0];
4006 adjustheap (timers, timercnt, active); 4273 adjustheap (timers, timercnt, active);
4007 } 4274 }
4008 } 4275 }
4012 ev_stop (EV_A_ (W)w); 4279 ev_stop (EV_A_ (W)w);
4013 4280
4014 EV_FREQUENT_CHECK; 4281 EV_FREQUENT_CHECK;
4015} 4282}
4016 4283
4017noinline 4284ecb_noinline
4018void 4285void
4019ev_timer_again (EV_P_ ev_timer *w) EV_NOEXCEPT 4286ev_timer_again (EV_P_ ev_timer *w) EV_NOEXCEPT
4020{ 4287{
4021 EV_FREQUENT_CHECK; 4288 EV_FREQUENT_CHECK;
4022 4289
4043} 4310}
4044 4311
4045ev_tstamp 4312ev_tstamp
4046ev_timer_remaining (EV_P_ ev_timer *w) EV_NOEXCEPT 4313ev_timer_remaining (EV_P_ ev_timer *w) EV_NOEXCEPT
4047{ 4314{
4048 return ev_at (w) - (ev_is_active (w) ? mn_now : 0.); 4315 return ev_at (w) - (ev_is_active (w) ? mn_now : EV_TS_CONST (0.));
4049} 4316}
4050 4317
4051#if EV_PERIODIC_ENABLE 4318#if EV_PERIODIC_ENABLE
4052noinline 4319ecb_noinline
4053void 4320void
4054ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT 4321ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT
4055{ 4322{
4056 if (expect_false (ev_is_active (w))) 4323 if (ecb_expect_false (ev_is_active (w)))
4057 return; 4324 return;
4325
4326#if EV_USE_TIMERFD
4327 if (timerfd == -2)
4328 evtimerfd_init (EV_A);
4329#endif
4058 4330
4059 if (w->reschedule_cb) 4331 if (w->reschedule_cb)
4060 ev_at (w) = w->reschedule_cb (w, ev_rt_now); 4332 ev_at (w) = w->reschedule_cb (w, ev_rt_now);
4061 else if (w->interval) 4333 else if (w->interval)
4062 { 4334 {
4078 EV_FREQUENT_CHECK; 4350 EV_FREQUENT_CHECK;
4079 4351
4080 /*assert (("libev: internal periodic heap corruption", ANHE_w (periodics [ev_active (w)]) == (WT)w));*/ 4352 /*assert (("libev: internal periodic heap corruption", ANHE_w (periodics [ev_active (w)]) == (WT)w));*/
4081} 4353}
4082 4354
4083noinline 4355ecb_noinline
4084void 4356void
4085ev_periodic_stop (EV_P_ ev_periodic *w) EV_NOEXCEPT 4357ev_periodic_stop (EV_P_ ev_periodic *w) EV_NOEXCEPT
4086{ 4358{
4087 clear_pending (EV_A_ (W)w); 4359 clear_pending (EV_A_ (W)w);
4088 if (expect_false (!ev_is_active (w))) 4360 if (ecb_expect_false (!ev_is_active (w)))
4089 return; 4361 return;
4090 4362
4091 EV_FREQUENT_CHECK; 4363 EV_FREQUENT_CHECK;
4092 4364
4093 { 4365 {
4095 4367
4096 assert (("libev: internal periodic heap corruption", ANHE_w (periodics [active]) == (WT)w)); 4368 assert (("libev: internal periodic heap corruption", ANHE_w (periodics [active]) == (WT)w));
4097 4369
4098 --periodiccnt; 4370 --periodiccnt;
4099 4371
4100 if (expect_true (active < periodiccnt + HEAP0)) 4372 if (ecb_expect_true (active < periodiccnt + HEAP0))
4101 { 4373 {
4102 periodics [active] = periodics [periodiccnt + HEAP0]; 4374 periodics [active] = periodics [periodiccnt + HEAP0];
4103 adjustheap (periodics, periodiccnt, active); 4375 adjustheap (periodics, periodiccnt, active);
4104 } 4376 }
4105 } 4377 }
4107 ev_stop (EV_A_ (W)w); 4379 ev_stop (EV_A_ (W)w);
4108 4380
4109 EV_FREQUENT_CHECK; 4381 EV_FREQUENT_CHECK;
4110} 4382}
4111 4383
4112noinline 4384ecb_noinline
4113void 4385void
4114ev_periodic_again (EV_P_ ev_periodic *w) EV_NOEXCEPT 4386ev_periodic_again (EV_P_ ev_periodic *w) EV_NOEXCEPT
4115{ 4387{
4116 /* TODO: use adjustheap and recalculation */ 4388 /* TODO: use adjustheap and recalculation */
4117 ev_periodic_stop (EV_A_ w); 4389 ev_periodic_stop (EV_A_ w);
4123# define SA_RESTART 0 4395# define SA_RESTART 0
4124#endif 4396#endif
4125 4397
4126#if EV_SIGNAL_ENABLE 4398#if EV_SIGNAL_ENABLE
4127 4399
4128noinline 4400ecb_noinline
4129void 4401void
4130ev_signal_start (EV_P_ ev_signal *w) EV_NOEXCEPT 4402ev_signal_start (EV_P_ ev_signal *w) EV_NOEXCEPT
4131{ 4403{
4132 if (expect_false (ev_is_active (w))) 4404 if (ecb_expect_false (ev_is_active (w)))
4133 return; 4405 return;
4134 4406
4135 assert (("libev: ev_signal_start called with illegal signal number", w->signum > 0 && w->signum < EV_NSIG)); 4407 assert (("libev: ev_signal_start called with illegal signal number", w->signum > 0 && w->signum < EV_NSIG));
4136 4408
4137#if EV_MULTIPLICITY 4409#if EV_MULTIPLICITY
4206 } 4478 }
4207 4479
4208 EV_FREQUENT_CHECK; 4480 EV_FREQUENT_CHECK;
4209} 4481}
4210 4482
4211noinline 4483ecb_noinline
4212void 4484void
4213ev_signal_stop (EV_P_ ev_signal *w) EV_NOEXCEPT 4485ev_signal_stop (EV_P_ ev_signal *w) EV_NOEXCEPT
4214{ 4486{
4215 clear_pending (EV_A_ (W)w); 4487 clear_pending (EV_A_ (W)w);
4216 if (expect_false (!ev_is_active (w))) 4488 if (ecb_expect_false (!ev_is_active (w)))
4217 return; 4489 return;
4218 4490
4219 EV_FREQUENT_CHECK; 4491 EV_FREQUENT_CHECK;
4220 4492
4221 wlist_del (&signals [w->signum - 1].head, (WL)w); 4493 wlist_del (&signals [w->signum - 1].head, (WL)w);
4254ev_child_start (EV_P_ ev_child *w) EV_NOEXCEPT 4526ev_child_start (EV_P_ ev_child *w) EV_NOEXCEPT
4255{ 4527{
4256#if EV_MULTIPLICITY 4528#if EV_MULTIPLICITY
4257 assert (("libev: child watchers are only supported in the default loop", loop == ev_default_loop_ptr)); 4529 assert (("libev: child watchers are only supported in the default loop", loop == ev_default_loop_ptr));
4258#endif 4530#endif
4259 if (expect_false (ev_is_active (w))) 4531 if (ecb_expect_false (ev_is_active (w)))
4260 return; 4532 return;
4261 4533
4262 EV_FREQUENT_CHECK; 4534 EV_FREQUENT_CHECK;
4263 4535
4264 ev_start (EV_A_ (W)w, 1); 4536 ev_start (EV_A_ (W)w, 1);
4269 4541
4270void 4542void
4271ev_child_stop (EV_P_ ev_child *w) EV_NOEXCEPT 4543ev_child_stop (EV_P_ ev_child *w) EV_NOEXCEPT
4272{ 4544{
4273 clear_pending (EV_A_ (W)w); 4545 clear_pending (EV_A_ (W)w);
4274 if (expect_false (!ev_is_active (w))) 4546 if (ecb_expect_false (!ev_is_active (w)))
4275 return; 4547 return;
4276 4548
4277 EV_FREQUENT_CHECK; 4549 EV_FREQUENT_CHECK;
4278 4550
4279 wlist_del (&childs [w->pid & ((EV_PID_HASHSIZE) - 1)], (WL)w); 4551 wlist_del (&childs [w->pid & ((EV_PID_HASHSIZE) - 1)], (WL)w);
4293 4565
4294#define DEF_STAT_INTERVAL 5.0074891 4566#define DEF_STAT_INTERVAL 5.0074891
4295#define NFS_STAT_INTERVAL 30.1074891 /* for filesystems potentially failing inotify */ 4567#define NFS_STAT_INTERVAL 30.1074891 /* for filesystems potentially failing inotify */
4296#define MIN_STAT_INTERVAL 0.1074891 4568#define MIN_STAT_INTERVAL 0.1074891
4297 4569
4298noinline static void stat_timer_cb (EV_P_ ev_timer *w_, int revents); 4570ecb_noinline static void stat_timer_cb (EV_P_ ev_timer *w_, int revents);
4299 4571
4300#if EV_USE_INOTIFY 4572#if EV_USE_INOTIFY
4301 4573
4302/* the * 2 is to allow for alignment padding, which for some reason is >> 8 */ 4574/* the * 2 is to allow for alignment padding, which for some reason is >> 8 */
4303# define EV_INOTIFY_BUFSIZE (sizeof (struct inotify_event) * 2 + NAME_MAX) 4575# define EV_INOTIFY_BUFSIZE (sizeof (struct inotify_event) * 2 + NAME_MAX)
4304 4576
4305noinline 4577ecb_noinline
4306static void 4578static void
4307infy_add (EV_P_ ev_stat *w) 4579infy_add (EV_P_ ev_stat *w)
4308{ 4580{
4309 w->wd = inotify_add_watch (fs_fd, w->path, 4581 w->wd = inotify_add_watch (fs_fd, w->path,
4310 IN_ATTRIB | IN_DELETE_SELF | IN_MOVE_SELF | IN_MODIFY 4582 IN_ATTRIB | IN_DELETE_SELF | IN_MOVE_SELF | IN_MODIFY
4375 if (ev_is_active (&w->timer)) ev_ref (EV_A); 4647 if (ev_is_active (&w->timer)) ev_ref (EV_A);
4376 ev_timer_again (EV_A_ &w->timer); 4648 ev_timer_again (EV_A_ &w->timer);
4377 if (ev_is_active (&w->timer)) ev_unref (EV_A); 4649 if (ev_is_active (&w->timer)) ev_unref (EV_A);
4378} 4650}
4379 4651
4380noinline 4652ecb_noinline
4381static void 4653static void
4382infy_del (EV_P_ ev_stat *w) 4654infy_del (EV_P_ ev_stat *w)
4383{ 4655{
4384 int slot; 4656 int slot;
4385 int wd = w->wd; 4657 int wd = w->wd;
4393 4665
4394 /* remove this watcher, if others are watching it, they will rearm */ 4666 /* remove this watcher, if others are watching it, they will rearm */
4395 inotify_rm_watch (fs_fd, wd); 4667 inotify_rm_watch (fs_fd, wd);
4396} 4668}
4397 4669
4398noinline 4670ecb_noinline
4399static void 4671static void
4400infy_wd (EV_P_ int slot, int wd, struct inotify_event *ev) 4672infy_wd (EV_P_ int slot, int wd, struct inotify_event *ev)
4401{ 4673{
4402 if (slot < 0) 4674 if (slot < 0)
4403 /* overflow, need to check for all hash slots */ 4675 /* overflow, need to check for all hash slots */
4549 w->attr.st_nlink = 0; 4821 w->attr.st_nlink = 0;
4550 else if (!w->attr.st_nlink) 4822 else if (!w->attr.st_nlink)
4551 w->attr.st_nlink = 1; 4823 w->attr.st_nlink = 1;
4552} 4824}
4553 4825
4554noinline 4826ecb_noinline
4555static void 4827static void
4556stat_timer_cb (EV_P_ ev_timer *w_, int revents) 4828stat_timer_cb (EV_P_ ev_timer *w_, int revents)
4557{ 4829{
4558 ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer)); 4830 ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer));
4559 4831
4593} 4865}
4594 4866
4595void 4867void
4596ev_stat_start (EV_P_ ev_stat *w) EV_NOEXCEPT 4868ev_stat_start (EV_P_ ev_stat *w) EV_NOEXCEPT
4597{ 4869{
4598 if (expect_false (ev_is_active (w))) 4870 if (ecb_expect_false (ev_is_active (w)))
4599 return; 4871 return;
4600 4872
4601 ev_stat_stat (EV_A_ w); 4873 ev_stat_stat (EV_A_ w);
4602 4874
4603 if (w->interval < MIN_STAT_INTERVAL && w->interval) 4875 if (w->interval < MIN_STAT_INTERVAL && w->interval)
4625 4897
4626void 4898void
4627ev_stat_stop (EV_P_ ev_stat *w) EV_NOEXCEPT 4899ev_stat_stop (EV_P_ ev_stat *w) EV_NOEXCEPT
4628{ 4900{
4629 clear_pending (EV_A_ (W)w); 4901 clear_pending (EV_A_ (W)w);
4630 if (expect_false (!ev_is_active (w))) 4902 if (ecb_expect_false (!ev_is_active (w)))
4631 return; 4903 return;
4632 4904
4633 EV_FREQUENT_CHECK; 4905 EV_FREQUENT_CHECK;
4634 4906
4635#if EV_USE_INOTIFY 4907#if EV_USE_INOTIFY
4650 4922
4651#if EV_IDLE_ENABLE 4923#if EV_IDLE_ENABLE
4652void 4924void
4653ev_idle_start (EV_P_ ev_idle *w) EV_NOEXCEPT 4925ev_idle_start (EV_P_ ev_idle *w) EV_NOEXCEPT
4654{ 4926{
4655 if (expect_false (ev_is_active (w))) 4927 if (ecb_expect_false (ev_is_active (w)))
4656 return; 4928 return;
4657 4929
4658 pri_adjust (EV_A_ (W)w); 4930 pri_adjust (EV_A_ (W)w);
4659 4931
4660 EV_FREQUENT_CHECK; 4932 EV_FREQUENT_CHECK;
4674 4946
4675void 4947void
4676ev_idle_stop (EV_P_ ev_idle *w) EV_NOEXCEPT 4948ev_idle_stop (EV_P_ ev_idle *w) EV_NOEXCEPT
4677{ 4949{
4678 clear_pending (EV_A_ (W)w); 4950 clear_pending (EV_A_ (W)w);
4679 if (expect_false (!ev_is_active (w))) 4951 if (ecb_expect_false (!ev_is_active (w)))
4680 return; 4952 return;
4681 4953
4682 EV_FREQUENT_CHECK; 4954 EV_FREQUENT_CHECK;
4683 4955
4684 { 4956 {
4697 4969
4698#if EV_PREPARE_ENABLE 4970#if EV_PREPARE_ENABLE
4699void 4971void
4700ev_prepare_start (EV_P_ ev_prepare *w) EV_NOEXCEPT 4972ev_prepare_start (EV_P_ ev_prepare *w) EV_NOEXCEPT
4701{ 4973{
4702 if (expect_false (ev_is_active (w))) 4974 if (ecb_expect_false (ev_is_active (w)))
4703 return; 4975 return;
4704 4976
4705 EV_FREQUENT_CHECK; 4977 EV_FREQUENT_CHECK;
4706 4978
4707 ev_start (EV_A_ (W)w, ++preparecnt); 4979 ev_start (EV_A_ (W)w, ++preparecnt);
4713 4985
4714void 4986void
4715ev_prepare_stop (EV_P_ ev_prepare *w) EV_NOEXCEPT 4987ev_prepare_stop (EV_P_ ev_prepare *w) EV_NOEXCEPT
4716{ 4988{
4717 clear_pending (EV_A_ (W)w); 4989 clear_pending (EV_A_ (W)w);
4718 if (expect_false (!ev_is_active (w))) 4990 if (ecb_expect_false (!ev_is_active (w)))
4719 return; 4991 return;
4720 4992
4721 EV_FREQUENT_CHECK; 4993 EV_FREQUENT_CHECK;
4722 4994
4723 { 4995 {
4735 5007
4736#if EV_CHECK_ENABLE 5008#if EV_CHECK_ENABLE
4737void 5009void
4738ev_check_start (EV_P_ ev_check *w) EV_NOEXCEPT 5010ev_check_start (EV_P_ ev_check *w) EV_NOEXCEPT
4739{ 5011{
4740 if (expect_false (ev_is_active (w))) 5012 if (ecb_expect_false (ev_is_active (w)))
4741 return; 5013 return;
4742 5014
4743 EV_FREQUENT_CHECK; 5015 EV_FREQUENT_CHECK;
4744 5016
4745 ev_start (EV_A_ (W)w, ++checkcnt); 5017 ev_start (EV_A_ (W)w, ++checkcnt);
4751 5023
4752void 5024void
4753ev_check_stop (EV_P_ ev_check *w) EV_NOEXCEPT 5025ev_check_stop (EV_P_ ev_check *w) EV_NOEXCEPT
4754{ 5026{
4755 clear_pending (EV_A_ (W)w); 5027 clear_pending (EV_A_ (W)w);
4756 if (expect_false (!ev_is_active (w))) 5028 if (ecb_expect_false (!ev_is_active (w)))
4757 return; 5029 return;
4758 5030
4759 EV_FREQUENT_CHECK; 5031 EV_FREQUENT_CHECK;
4760 5032
4761 { 5033 {
4770 EV_FREQUENT_CHECK; 5042 EV_FREQUENT_CHECK;
4771} 5043}
4772#endif 5044#endif
4773 5045
4774#if EV_EMBED_ENABLE 5046#if EV_EMBED_ENABLE
4775noinline 5047ecb_noinline
4776void 5048void
4777ev_embed_sweep (EV_P_ ev_embed *w) EV_NOEXCEPT 5049ev_embed_sweep (EV_P_ ev_embed *w) EV_NOEXCEPT
4778{ 5050{
4779 ev_run (w->other, EVRUN_NOWAIT); 5051 ev_run (w->other, EVRUN_NOWAIT);
4780} 5052}
4832#endif 5104#endif
4833 5105
4834void 5106void
4835ev_embed_start (EV_P_ ev_embed *w) EV_NOEXCEPT 5107ev_embed_start (EV_P_ ev_embed *w) EV_NOEXCEPT
4836{ 5108{
4837 if (expect_false (ev_is_active (w))) 5109 if (ecb_expect_false (ev_is_active (w)))
4838 return; 5110 return;
4839 5111
4840 { 5112 {
4841 EV_P = w->other; 5113 EV_P = w->other;
4842 assert (("libev: loop to be embedded is not embeddable", backend & ev_embeddable_backends ())); 5114 assert (("libev: loop to be embedded is not embeddable", backend & ev_embeddable_backends ()));
4864 5136
4865void 5137void
4866ev_embed_stop (EV_P_ ev_embed *w) EV_NOEXCEPT 5138ev_embed_stop (EV_P_ ev_embed *w) EV_NOEXCEPT
4867{ 5139{
4868 clear_pending (EV_A_ (W)w); 5140 clear_pending (EV_A_ (W)w);
4869 if (expect_false (!ev_is_active (w))) 5141 if (ecb_expect_false (!ev_is_active (w)))
4870 return; 5142 return;
4871 5143
4872 EV_FREQUENT_CHECK; 5144 EV_FREQUENT_CHECK;
4873 5145
4874 ev_io_stop (EV_A_ &w->io); 5146 ev_io_stop (EV_A_ &w->io);
4883 5155
4884#if EV_FORK_ENABLE 5156#if EV_FORK_ENABLE
4885void 5157void
4886ev_fork_start (EV_P_ ev_fork *w) EV_NOEXCEPT 5158ev_fork_start (EV_P_ ev_fork *w) EV_NOEXCEPT
4887{ 5159{
4888 if (expect_false (ev_is_active (w))) 5160 if (ecb_expect_false (ev_is_active (w)))
4889 return; 5161 return;
4890 5162
4891 EV_FREQUENT_CHECK; 5163 EV_FREQUENT_CHECK;
4892 5164
4893 ev_start (EV_A_ (W)w, ++forkcnt); 5165 ev_start (EV_A_ (W)w, ++forkcnt);
4899 5171
4900void 5172void
4901ev_fork_stop (EV_P_ ev_fork *w) EV_NOEXCEPT 5173ev_fork_stop (EV_P_ ev_fork *w) EV_NOEXCEPT
4902{ 5174{
4903 clear_pending (EV_A_ (W)w); 5175 clear_pending (EV_A_ (W)w);
4904 if (expect_false (!ev_is_active (w))) 5176 if (ecb_expect_false (!ev_is_active (w)))
4905 return; 5177 return;
4906 5178
4907 EV_FREQUENT_CHECK; 5179 EV_FREQUENT_CHECK;
4908 5180
4909 { 5181 {
4921 5193
4922#if EV_CLEANUP_ENABLE 5194#if EV_CLEANUP_ENABLE
4923void 5195void
4924ev_cleanup_start (EV_P_ ev_cleanup *w) EV_NOEXCEPT 5196ev_cleanup_start (EV_P_ ev_cleanup *w) EV_NOEXCEPT
4925{ 5197{
4926 if (expect_false (ev_is_active (w))) 5198 if (ecb_expect_false (ev_is_active (w)))
4927 return; 5199 return;
4928 5200
4929 EV_FREQUENT_CHECK; 5201 EV_FREQUENT_CHECK;
4930 5202
4931 ev_start (EV_A_ (W)w, ++cleanupcnt); 5203 ev_start (EV_A_ (W)w, ++cleanupcnt);
4939 5211
4940void 5212void
4941ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_NOEXCEPT 5213ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_NOEXCEPT
4942{ 5214{
4943 clear_pending (EV_A_ (W)w); 5215 clear_pending (EV_A_ (W)w);
4944 if (expect_false (!ev_is_active (w))) 5216 if (ecb_expect_false (!ev_is_active (w)))
4945 return; 5217 return;
4946 5218
4947 EV_FREQUENT_CHECK; 5219 EV_FREQUENT_CHECK;
4948 ev_ref (EV_A); 5220 ev_ref (EV_A);
4949 5221
4962 5234
4963#if EV_ASYNC_ENABLE 5235#if EV_ASYNC_ENABLE
4964void 5236void
4965ev_async_start (EV_P_ ev_async *w) EV_NOEXCEPT 5237ev_async_start (EV_P_ ev_async *w) EV_NOEXCEPT
4966{ 5238{
4967 if (expect_false (ev_is_active (w))) 5239 if (ecb_expect_false (ev_is_active (w)))
4968 return; 5240 return;
4969 5241
4970 w->sent = 0; 5242 w->sent = 0;
4971 5243
4972 evpipe_init (EV_A); 5244 evpipe_init (EV_A);
4982 5254
4983void 5255void
4984ev_async_stop (EV_P_ ev_async *w) EV_NOEXCEPT 5256ev_async_stop (EV_P_ ev_async *w) EV_NOEXCEPT
4985{ 5257{
4986 clear_pending (EV_A_ (W)w); 5258 clear_pending (EV_A_ (W)w);
4987 if (expect_false (!ev_is_active (w))) 5259 if (ecb_expect_false (!ev_is_active (w)))
4988 return; 5260 return;
4989 5261
4990 EV_FREQUENT_CHECK; 5262 EV_FREQUENT_CHECK;
4991 5263
4992 { 5264 {

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines