ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev.c
(Generate patch)

Comparing libev/ev.c (file contents):
Revision 1.435 by root, Sat May 26 08:52:09 2012 UTC vs.
Revision 1.449 by root, Sun Sep 23 21:21:58 2012 UTC

504 */ 504 */
505 505
506#ifndef ECB_H 506#ifndef ECB_H
507#define ECB_H 507#define ECB_H
508 508
509/* 16 bits major, 16 bits minor */
510#define ECB_VERSION 0x00010001
511
509#ifdef _WIN32 512#ifdef _WIN32
510 typedef signed char int8_t; 513 typedef signed char int8_t;
511 typedef unsigned char uint8_t; 514 typedef unsigned char uint8_t;
512 typedef signed short int16_t; 515 typedef signed short int16_t;
513 typedef unsigned short uint16_t; 516 typedef unsigned short uint16_t;
518 typedef unsigned long long uint64_t; 521 typedef unsigned long long uint64_t;
519 #else /* _MSC_VER || __BORLANDC__ */ 522 #else /* _MSC_VER || __BORLANDC__ */
520 typedef signed __int64 int64_t; 523 typedef signed __int64 int64_t;
521 typedef unsigned __int64 uint64_t; 524 typedef unsigned __int64 uint64_t;
522 #endif 525 #endif
526 #ifdef _WIN64
527 #define ECB_PTRSIZE 8
528 typedef uint64_t uintptr_t;
529 typedef int64_t intptr_t;
530 #else
531 #define ECB_PTRSIZE 4
532 typedef uint32_t uintptr_t;
533 typedef int32_t intptr_t;
534 #endif
535 typedef intptr_t ptrdiff_t;
523#else 536#else
524 #include <inttypes.h> 537 #include <inttypes.h>
538 #if UINTMAX_MAX > 0xffffffffU
539 #define ECB_PTRSIZE 8
540 #else
541 #define ECB_PTRSIZE 4
542 #endif
525#endif 543#endif
526 544
527/* many compilers define _GNUC_ to some versions but then only implement 545/* many compilers define _GNUC_ to some versions but then only implement
528 * what their idiot authors think are the "more important" extensions, 546 * what their idiot authors think are the "more important" extensions,
529 * causing enormous grief in return for some better fake benchmark numbers. 547 * causing enormous grief in return for some better fake benchmark numbers.
537 #else 555 #else
538 #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) 556 #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))
539 #endif 557 #endif
540#endif 558#endif
541 559
560#define ECB_C (__STDC__+0) /* this assumes that __STDC__ is either empty or a number */
561#define ECB_C99 (__STDC_VERSION__ >= 199901L)
562#define ECB_C11 (__STDC_VERSION__ >= 201112L)
563#define ECB_CPP (__cplusplus+0)
564#define ECB_CPP11 (__cplusplus >= 201103L)
565
542/*****************************************************************************/ 566/*****************************************************************************/
543 567
544/* ECB_NO_THREADS - ecb is not used by multiple threads, ever */ 568/* ECB_NO_THREADS - ecb is not used by multiple threads, ever */
545/* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */ 569/* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */
546 570
547#if ECB_NO_THREADS 571#if ECB_NO_THREADS
548# define ECB_NO_SMP 1 572 #define ECB_NO_SMP 1
549#endif 573#endif
550 574
551#if ECB_NO_THREADS || ECB_NO_SMP 575#if ECB_NO_SMP
552 #define ECB_MEMORY_FENCE do { } while (0) 576 #define ECB_MEMORY_FENCE do { } while (0)
553#endif 577#endif
554 578
555#ifndef ECB_MEMORY_FENCE 579#ifndef ECB_MEMORY_FENCE
556 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 580 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
557 #if __i386 || __i386__ 581 #if __i386 || __i386__
558 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") 582 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
559 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE /* non-lock xchg might be enough */ 583 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
560 #define ECB_MEMORY_FENCE_RELEASE do { } while (0) /* unlikely to change in future cpus */ 584 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
561 #elif __amd64 || __amd64__ || __x86_64 || __x86_64__ 585 #elif __amd64 || __amd64__ || __x86_64 || __x86_64__
562 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") 586 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory")
563 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("lfence" : : : "memory") 587 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
564 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("sfence") /* play safe - not needed in any current cpu */ 588 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
565 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ 589 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__
566 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") 590 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory")
567 #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \ 591 #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \
568 || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ 592 || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__
569 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") 593 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory")
570 #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ 594 #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \
571 || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__ 595 || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__
572 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") 596 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory")
573 #elif __sparc || __sparc__ 597 #elif __sparc || __sparc__
574 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad | " : : : "memory") 598 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory")
575 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory") 599 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory")
576 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore") 600 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore")
577 #elif defined __s390__ || defined __s390x__ 601 #elif defined __s390__ || defined __s390x__
578 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory") 602 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory")
579 #elif defined __mips__ 603 #elif defined __mips__
580 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") 604 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory")
581 #elif defined __alpha__ 605 #elif defined __alpha__
582 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory") 606 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory")
607 #elif defined __hppa__
608 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
609 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
610 #elif defined __ia64__
611 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory")
583 #endif 612 #endif
584 #endif 613 #endif
585#endif 614#endif
586 615
587#ifndef ECB_MEMORY_FENCE 616#ifndef ECB_MEMORY_FENCE
617 #if ECB_GCC_VERSION(4,7)
618 /* see comment below (stdatomic.h) about the C11 memory model. */
619 #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST)
620 #elif defined __clang && __has_feature (cxx_atomic)
621 /* see comment below (stdatomic.h) about the C11 memory model. */
622 #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST)
588 #if ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ 623 #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__
589 #define ECB_MEMORY_FENCE __sync_synchronize () 624 #define ECB_MEMORY_FENCE __sync_synchronize ()
590 /*#define ECB_MEMORY_FENCE_ACQUIRE ({ char dummy = 0; __sync_lock_test_and_set (&dummy, 1); }) */
591 /*#define ECB_MEMORY_FENCE_RELEASE ({ char dummy = 1; __sync_lock_release (&dummy ); }) */
592 #elif _MSC_VER >= 1400 /* VC++ 2005 */ 625 #elif _MSC_VER >= 1400 /* VC++ 2005 */
593 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) 626 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
594 #define ECB_MEMORY_FENCE _ReadWriteBarrier () 627 #define ECB_MEMORY_FENCE _ReadWriteBarrier ()
595 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */ 628 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */
596 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier () 629 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier ()
606 #define ECB_MEMORY_FENCE __sync () 639 #define ECB_MEMORY_FENCE __sync ()
607 #endif 640 #endif
608#endif 641#endif
609 642
610#ifndef ECB_MEMORY_FENCE 643#ifndef ECB_MEMORY_FENCE
644 #if ECB_C11 && !defined __STDC_NO_ATOMICS__
645 /* we assume that these memory fences work on all variables/all memory accesses, */
646 /* not just C11 atomics and atomic accesses */
647 #include <stdatomic.h>
648 /* Unfortunately, neither gcc 4.7 nor clang 3.1 generate any instructions for */
649 /* any fence other than seq_cst, which isn't very efficient for us. */
650 /* Why that is, we don't know - either the C11 memory model is quite useless */
651 /* for most usages, or gcc and clang have a bug */
652 /* I *currently* lean towards the latter, and inefficiently implement */
653 /* all three of ecb's fences as a seq_cst fence */
654 #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst)
655 #endif
656#endif
657
658#ifndef ECB_MEMORY_FENCE
611 #if !ECB_AVOID_PTHREADS 659 #if !ECB_AVOID_PTHREADS
612 /* 660 /*
613 * if you get undefined symbol references to pthread_mutex_lock, 661 * if you get undefined symbol references to pthread_mutex_lock,
614 * or failure to find pthread.h, then you should implement 662 * or failure to find pthread.h, then you should implement
615 * the ECB_MEMORY_FENCE operations for your cpu/compiler 663 * the ECB_MEMORY_FENCE operations for your cpu/compiler
633 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE 681 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
634#endif 682#endif
635 683
636/*****************************************************************************/ 684/*****************************************************************************/
637 685
638#define ECB_C99 (__STDC_VERSION__ >= 199901L)
639
640#if __cplusplus 686#if __cplusplus
641 #define ecb_inline static inline 687 #define ecb_inline static inline
642#elif ECB_GCC_VERSION(2,5) 688#elif ECB_GCC_VERSION(2,5)
643 #define ecb_inline static __inline__ 689 #define ecb_inline static __inline__
644#elif ECB_C99 690#elif ECB_C99
682#elif ECB_GCC_VERSION(3,0) 728#elif ECB_GCC_VERSION(3,0)
683 #define ecb_decltype(x) __typeof(x) 729 #define ecb_decltype(x) __typeof(x)
684#endif 730#endif
685 731
686#define ecb_noinline ecb_attribute ((__noinline__)) 732#define ecb_noinline ecb_attribute ((__noinline__))
687#define ecb_noreturn ecb_attribute ((__noreturn__))
688#define ecb_unused ecb_attribute ((__unused__)) 733#define ecb_unused ecb_attribute ((__unused__))
689#define ecb_const ecb_attribute ((__const__)) 734#define ecb_const ecb_attribute ((__const__))
690#define ecb_pure ecb_attribute ((__pure__)) 735#define ecb_pure ecb_attribute ((__pure__))
736
737#if ECB_C11
738 #define ecb_noreturn _Noreturn
739#else
740 #define ecb_noreturn ecb_attribute ((__noreturn__))
741#endif
691 742
692#if ECB_GCC_VERSION(4,3) 743#if ECB_GCC_VERSION(4,3)
693 #define ecb_artificial ecb_attribute ((__artificial__)) 744 #define ecb_artificial ecb_attribute ((__artificial__))
694 #define ecb_hot ecb_attribute ((__hot__)) 745 #define ecb_hot ecb_attribute ((__hot__))
695 #define ecb_cold ecb_attribute ((__cold__)) 746 #define ecb_cold ecb_attribute ((__cold__))
786 837
787 return r + ecb_ld32 (x); 838 return r + ecb_ld32 (x);
788 } 839 }
789#endif 840#endif
790 841
842ecb_function_ ecb_bool ecb_is_pot32 (uint32_t x) ecb_const;
843ecb_function_ ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); }
844ecb_function_ ecb_bool ecb_is_pot64 (uint64_t x) ecb_const;
845ecb_function_ ecb_bool ecb_is_pot64 (uint64_t x) { return !(x & (x - 1)); }
846
791ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) ecb_const; 847ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) ecb_const;
792ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) 848ecb_function_ uint8_t ecb_bitrev8 (uint8_t x)
793{ 849{
794 return ( (x * 0x0802U & 0x22110U) 850 return ( (x * 0x0802U & 0x22110U)
795 | (x * 0x8020U & 0x88440U)) * 0x10101U >> 16; 851 | (x * 0x8020U & 0x88440U)) * 0x10101U >> 16;
1135} 1191}
1136 1192
1137static void * 1193static void *
1138ev_realloc_emul (void *ptr, long size) EV_THROW 1194ev_realloc_emul (void *ptr, long size) EV_THROW
1139{ 1195{
1140#if __GLIBC__
1141 return realloc (ptr, size);
1142#else
1143 /* some systems, notably openbsd and darwin, fail to properly 1196 /* some systems, notably openbsd and darwin, fail to properly
1144 * implement realloc (x, 0) (as required by both ansi c-89 and 1197 * implement realloc (x, 0) (as required by both ansi c-89 and
1145 * the single unix specification, so work around them here. 1198 * the single unix specification, so work around them here.
1199 * recently, also (at least) fedora and debian started breaking it,
1200 * despite documenting it otherwise.
1146 */ 1201 */
1147 1202
1148 if (size) 1203 if (size)
1149 return realloc (ptr, size); 1204 return realloc (ptr, size);
1150 1205
1151 free (ptr); 1206 free (ptr);
1152 return 0; 1207 return 0;
1153#endif
1154} 1208}
1155 1209
1156static void *(*alloc)(void *ptr, long size) EV_THROW = ev_realloc_emul; 1210static void *(*alloc)(void *ptr, long size) EV_THROW = ev_realloc_emul;
1157 1211
1158void ecb_cold 1212void ecb_cold
1813static void noinline ecb_cold 1867static void noinline ecb_cold
1814evpipe_init (EV_P) 1868evpipe_init (EV_P)
1815{ 1869{
1816 if (!ev_is_active (&pipe_w)) 1870 if (!ev_is_active (&pipe_w))
1817 { 1871 {
1872 int fds [2];
1873
1818# if EV_USE_EVENTFD 1874# if EV_USE_EVENTFD
1875 fds [0] = -1;
1819 evfd = eventfd (0, EFD_NONBLOCK | EFD_CLOEXEC); 1876 fds [1] = eventfd (0, EFD_NONBLOCK | EFD_CLOEXEC);
1820 if (evfd < 0 && errno == EINVAL) 1877 if (fds [1] < 0 && errno == EINVAL)
1821 evfd = eventfd (0, 0); 1878 fds [1] = eventfd (0, 0);
1822 1879
1823 if (evfd >= 0) 1880 if (fds [1] < 0)
1824 {
1825 evpipe [0] = -1;
1826 fd_intern (evfd); /* doing it twice doesn't hurt */
1827 ev_io_set (&pipe_w, evfd, EV_READ);
1828 }
1829 else
1830# endif 1881# endif
1831 { 1882 {
1832 while (pipe (evpipe)) 1883 while (pipe (fds))
1833 ev_syserr ("(libev) error creating signal/async pipe"); 1884 ev_syserr ("(libev) error creating signal/async pipe");
1834 1885
1835 fd_intern (evpipe [0]); 1886 fd_intern (fds [0]);
1836 fd_intern (evpipe [1]);
1837 ev_io_set (&pipe_w, evpipe [0], EV_READ);
1838 } 1887 }
1839 1888
1889 fd_intern (fds [1]);
1890
1891 evpipe [0] = fds [0];
1892
1893 if (evpipe [1] < 0)
1894 evpipe [1] = fds [1]; /* first call, set write fd */
1895 else
1896 {
1897 /* on subsequent calls, do not change evpipe [1] */
1898 /* so that evpipe_write can always rely on its value. */
1899 /* this branch does not do anything sensible on windows, */
1900 /* so must not be executed on windows */
1901
1902 dup2 (fds [1], evpipe [1]);
1903 close (fds [1]);
1904 }
1905
1906 ev_io_set (&pipe_w, evpipe [0] < 0 ? evpipe [1] : evpipe [0], EV_READ);
1840 ev_io_start (EV_A_ &pipe_w); 1907 ev_io_start (EV_A_ &pipe_w);
1841 ev_unref (EV_A); /* watcher should not keep loop alive */ 1908 ev_unref (EV_A); /* watcher should not keep loop alive */
1842 } 1909 }
1843} 1910}
1844 1911
1849 1916
1850 if (expect_true (*flag)) 1917 if (expect_true (*flag))
1851 return; 1918 return;
1852 1919
1853 *flag = 1; 1920 *flag = 1;
1854
1855 ECB_MEMORY_FENCE_RELEASE; /* make sure flag is visible before the wakeup */ 1921 ECB_MEMORY_FENCE_RELEASE; /* make sure flag is visible before the wakeup */
1856 1922
1857 pipe_write_skipped = 1; 1923 pipe_write_skipped = 1;
1858 1924
1859 ECB_MEMORY_FENCE; /* make sure pipe_write_skipped is visible before we check pipe_write_wanted */ 1925 ECB_MEMORY_FENCE; /* make sure pipe_write_skipped is visible before we check pipe_write_wanted */
1860 1926
1861 if (pipe_write_wanted) 1927 if (pipe_write_wanted)
1862 { 1928 {
1863 int old_errno; 1929 int old_errno;
1864 1930
1865 pipe_write_skipped = 0; /* just an optimisation, no fence needed */ 1931 pipe_write_skipped = 0;
1932 ECB_MEMORY_FENCE_RELEASE;
1866 1933
1867 old_errno = errno; /* save errno because write will clobber it */ 1934 old_errno = errno; /* save errno because write will clobber it */
1868 1935
1869#if EV_USE_EVENTFD 1936#if EV_USE_EVENTFD
1870 if (evfd >= 0) 1937 if (evpipe [0] < 0)
1871 { 1938 {
1872 uint64_t counter = 1; 1939 uint64_t counter = 1;
1873 write (evfd, &counter, sizeof (uint64_t)); 1940 write (evpipe [1], &counter, sizeof (uint64_t));
1874 } 1941 }
1875 else 1942 else
1876#endif 1943#endif
1877 { 1944 {
1878#ifdef _WIN32 1945#ifdef _WIN32
1898 int i; 1965 int i;
1899 1966
1900 if (revents & EV_READ) 1967 if (revents & EV_READ)
1901 { 1968 {
1902#if EV_USE_EVENTFD 1969#if EV_USE_EVENTFD
1903 if (evfd >= 0) 1970 if (evpipe [0] < 0)
1904 { 1971 {
1905 uint64_t counter; 1972 uint64_t counter;
1906 read (evfd, &counter, sizeof (uint64_t)); 1973 read (evpipe [1], &counter, sizeof (uint64_t));
1907 } 1974 }
1908 else 1975 else
1909#endif 1976#endif
1910 { 1977 {
1911 char dummy[4]; 1978 char dummy[4];
1929#if EV_SIGNAL_ENABLE 1996#if EV_SIGNAL_ENABLE
1930 if (sig_pending) 1997 if (sig_pending)
1931 { 1998 {
1932 sig_pending = 0; 1999 sig_pending = 0;
1933 2000
1934 ECB_MEMORY_FENCE_RELEASE; 2001 ECB_MEMORY_FENCE;
1935 2002
1936 for (i = EV_NSIG - 1; i--; ) 2003 for (i = EV_NSIG - 1; i--; )
1937 if (expect_false (signals [i].pending)) 2004 if (expect_false (signals [i].pending))
1938 ev_feed_signal_event (EV_A_ i + 1); 2005 ev_feed_signal_event (EV_A_ i + 1);
1939 } 2006 }
1942#if EV_ASYNC_ENABLE 2009#if EV_ASYNC_ENABLE
1943 if (async_pending) 2010 if (async_pending)
1944 { 2011 {
1945 async_pending = 0; 2012 async_pending = 0;
1946 2013
1947 ECB_MEMORY_FENCE_RELEASE; 2014 ECB_MEMORY_FENCE;
1948 2015
1949 for (i = asynccnt; i--; ) 2016 for (i = asynccnt; i--; )
1950 if (asyncs [i]->sent) 2017 if (asyncs [i]->sent)
1951 { 2018 {
1952 asyncs [i]->sent = 0; 2019 asyncs [i]->sent = 0;
2020 ECB_MEMORY_FENCE_RELEASE;
1953 ev_feed_event (EV_A_ asyncs [i], EV_ASYNC); 2021 ev_feed_event (EV_A_ asyncs [i], EV_ASYNC);
1954 } 2022 }
1955 } 2023 }
1956#endif 2024#endif
1957} 2025}
1960 2028
1961void 2029void
1962ev_feed_signal (int signum) EV_THROW 2030ev_feed_signal (int signum) EV_THROW
1963{ 2031{
1964#if EV_MULTIPLICITY 2032#if EV_MULTIPLICITY
2033 ECB_MEMORY_FENCE_ACQUIRE;
1965 EV_P = signals [signum - 1].loop; 2034 EV_P = signals [signum - 1].loop;
1966 2035
1967 if (!EV_A) 2036 if (!EV_A)
1968 return; 2037 return;
1969#endif 2038#endif
1970 2039
1971 if (!ev_active (&pipe_w))
1972 return;
1973
1974 signals [signum - 1].pending = 1; 2040 signals [signum - 1].pending = 1;
1975 evpipe_write (EV_A_ &sig_pending); 2041 evpipe_write (EV_A_ &sig_pending);
1976} 2042}
1977 2043
1978static void 2044static void
1988void noinline 2054void noinline
1989ev_feed_signal_event (EV_P_ int signum) EV_THROW 2055ev_feed_signal_event (EV_P_ int signum) EV_THROW
1990{ 2056{
1991 WL w; 2057 WL w;
1992 2058
1993 if (expect_false (signum <= 0 || signum > EV_NSIG)) 2059 if (expect_false (signum <= 0 || signum >= EV_NSIG))
1994 return; 2060 return;
1995 2061
1996 --signum; 2062 --signum;
1997 2063
1998#if EV_MULTIPLICITY 2064#if EV_MULTIPLICITY
2002 if (expect_false (signals [signum].loop != EV_A)) 2068 if (expect_false (signals [signum].loop != EV_A))
2003 return; 2069 return;
2004#endif 2070#endif
2005 2071
2006 signals [signum].pending = 0; 2072 signals [signum].pending = 0;
2073 ECB_MEMORY_FENCE_RELEASE;
2007 2074
2008 for (w = signals [signum].head; w; w = w->next) 2075 for (w = signals [signum].head; w; w = w->next)
2009 ev_feed_event (EV_A_ (W)w, EV_SIGNAL); 2076 ev_feed_event (EV_A_ (W)w, EV_SIGNAL);
2010} 2077}
2011 2078
2295#if EV_ASYNC_ENABLE 2362#if EV_ASYNC_ENABLE
2296 async_pending = 0; 2363 async_pending = 0;
2297#endif 2364#endif
2298 pipe_write_skipped = 0; 2365 pipe_write_skipped = 0;
2299 pipe_write_wanted = 0; 2366 pipe_write_wanted = 0;
2367 evpipe [0] = -1;
2368 evpipe [1] = -1;
2300#if EV_USE_INOTIFY 2369#if EV_USE_INOTIFY
2301 fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2; 2370 fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2;
2302#endif 2371#endif
2303#if EV_USE_SIGNALFD 2372#if EV_USE_SIGNALFD
2304 sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1; 2373 sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1;
2367 if (ev_is_active (&pipe_w)) 2436 if (ev_is_active (&pipe_w))
2368 { 2437 {
2369 /*ev_ref (EV_A);*/ 2438 /*ev_ref (EV_A);*/
2370 /*ev_io_stop (EV_A_ &pipe_w);*/ 2439 /*ev_io_stop (EV_A_ &pipe_w);*/
2371 2440
2372#if EV_USE_EVENTFD
2373 if (evfd >= 0)
2374 close (evfd);
2375#endif
2376
2377 if (evpipe [0] >= 0)
2378 {
2379 EV_WIN32_CLOSE_FD (evpipe [0]); 2441 if (evpipe [0] >= 0) EV_WIN32_CLOSE_FD (evpipe [0]);
2380 EV_WIN32_CLOSE_FD (evpipe [1]); 2442 if (evpipe [1] >= 0) EV_WIN32_CLOSE_FD (evpipe [1]);
2381 }
2382 } 2443 }
2383 2444
2384#if EV_USE_SIGNALFD 2445#if EV_USE_SIGNALFD
2385 if (ev_is_active (&sigfd_w)) 2446 if (ev_is_active (&sigfd_w))
2386 close (sigfd); 2447 close (sigfd);
2472#endif 2533#endif
2473#if EV_USE_INOTIFY 2534#if EV_USE_INOTIFY
2474 infy_fork (EV_A); 2535 infy_fork (EV_A);
2475#endif 2536#endif
2476 2537
2538#if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
2477 if (ev_is_active (&pipe_w)) 2539 if (ev_is_active (&pipe_w))
2478 { 2540 {
2479 /* pipe_write_wanted must be false now, so modifying fd vars should be safe */ 2541 /* pipe_write_wanted must be false now, so modifying fd vars should be safe */
2480 2542
2481 ev_ref (EV_A); 2543 ev_ref (EV_A);
2482 ev_io_stop (EV_A_ &pipe_w); 2544 ev_io_stop (EV_A_ &pipe_w);
2483 2545
2484#if EV_USE_EVENTFD
2485 if (evfd >= 0)
2486 close (evfd);
2487#endif
2488
2489 if (evpipe [0] >= 0) 2546 if (evpipe [0] >= 0)
2490 {
2491 EV_WIN32_CLOSE_FD (evpipe [0]); 2547 EV_WIN32_CLOSE_FD (evpipe [0]);
2492 EV_WIN32_CLOSE_FD (evpipe [1]);
2493 }
2494 2548
2495#if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
2496 evpipe_init (EV_A); 2549 evpipe_init (EV_A);
2497 /* now iterate over everything, in case we missed something */ 2550 /* iterate over everything, in case we missed something before */
2498 pipecb (EV_A_ &pipe_w, EV_READ); 2551 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
2499#endif
2500 } 2552 }
2553#endif
2501 2554
2502 postfork = 0; 2555 postfork = 0;
2503} 2556}
2504 2557
2505#if EV_MULTIPLICITY 2558#if EV_MULTIPLICITY
2678} 2731}
2679 2732
2680void 2733void
2681ev_loop_fork (EV_P) EV_THROW 2734ev_loop_fork (EV_P) EV_THROW
2682{ 2735{
2683 postfork = 1; /* must be in line with ev_default_fork */ 2736 postfork = 1;
2684} 2737}
2685 2738
2686/*****************************************************************************/ 2739/*****************************************************************************/
2687 2740
2688void 2741void
2704} 2757}
2705 2758
2706void noinline 2759void noinline
2707ev_invoke_pending (EV_P) 2760ev_invoke_pending (EV_P)
2708{ 2761{
2709 for (pendingpri = NUMPRI; pendingpri--; ) /* pendingpri is modified during the loop */ 2762 pendingpri = NUMPRI;
2763
2764 while (pendingpri) /* pendingpri possibly gets modified in the inner loop */
2765 {
2766 --pendingpri;
2767
2710 while (pendingcnt [pendingpri]) 2768 while (pendingcnt [pendingpri])
2711 { 2769 {
2712 ANPENDING *p = pendings [pendingpri] + --pendingcnt [pendingpri]; 2770 ANPENDING *p = pendings [pendingpri] + --pendingcnt [pendingpri];
2713 2771
2714 p->w->pending = 0; 2772 p->w->pending = 0;
2715 EV_CB_INVOKE (p->w, p->events); 2773 EV_CB_INVOKE (p->w, p->events);
2716 EV_FREQUENT_CHECK; 2774 EV_FREQUENT_CHECK;
2717 } 2775 }
2776 }
2718} 2777}
2719 2778
2720#if EV_IDLE_ENABLE 2779#if EV_IDLE_ENABLE
2721/* make idle watchers pending. this handles the "call-idle */ 2780/* make idle watchers pending. this handles the "call-idle */
2722/* only when higher priorities are idle" logic */ 2781/* only when higher priorities are idle" logic */
3080 backend_poll (EV_A_ waittime); 3139 backend_poll (EV_A_ waittime);
3081 assert ((loop_done = EVBREAK_CANCEL, 1)); /* assert for side effect */ 3140 assert ((loop_done = EVBREAK_CANCEL, 1)); /* assert for side effect */
3082 3141
3083 pipe_write_wanted = 0; /* just an optimisation, no fence needed */ 3142 pipe_write_wanted = 0; /* just an optimisation, no fence needed */
3084 3143
3144 ECB_MEMORY_FENCE_ACQUIRE;
3085 if (pipe_write_skipped) 3145 if (pipe_write_skipped)
3086 { 3146 {
3087 assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w))); 3147 assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w)));
3088 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM); 3148 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
3089 } 3149 }
3467#if EV_MULTIPLICITY 3527#if EV_MULTIPLICITY
3468 assert (("libev: a signal must not be attached to two different loops", 3528 assert (("libev: a signal must not be attached to two different loops",
3469 !signals [w->signum - 1].loop || signals [w->signum - 1].loop == loop)); 3529 !signals [w->signum - 1].loop || signals [w->signum - 1].loop == loop));
3470 3530
3471 signals [w->signum - 1].loop = EV_A; 3531 signals [w->signum - 1].loop = EV_A;
3532 ECB_MEMORY_FENCE_RELEASE;
3472#endif 3533#endif
3473 3534
3474 EV_FREQUENT_CHECK; 3535 EV_FREQUENT_CHECK;
3475 3536
3476#if EV_USE_SIGNALFD 3537#if EV_USE_SIGNALFD

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines