ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev.c
(Generate patch)

Comparing libev/ev.c (file contents):
Revision 1.432 by root, Mon May 14 19:09:58 2012 UTC vs.
Revision 1.454 by root, Fri Mar 1 11:13:22 2013 UTC

357 357
358#ifndef EV_HEAP_CACHE_AT 358#ifndef EV_HEAP_CACHE_AT
359# define EV_HEAP_CACHE_AT EV_FEATURE_DATA 359# define EV_HEAP_CACHE_AT EV_FEATURE_DATA
360#endif 360#endif
361 361
362#ifdef ANDROID
363/* supposedly, android doesn't typedef fd_mask */
364# undef EV_USE_SELECT
365# define EV_USE_SELECT 0
366/* supposedly, we need to include syscall.h, not sys/syscall.h, so just disable */
367# undef EV_USE_CLOCK_SYSCALL
368# define EV_USE_CLOCK_SYSCALL 0
369#endif
370
371/* aix's poll.h seems to cause lots of trouble */
372#ifdef _AIX
373/* AIX has a completely broken poll.h header */
374# undef EV_USE_POLL
375# define EV_USE_POLL 0
376#endif
377
362/* on linux, we can use a (slow) syscall to avoid a dependency on pthread, */ 378/* on linux, we can use a (slow) syscall to avoid a dependency on pthread, */
363/* which makes programs even slower. might work on other unices, too. */ 379/* which makes programs even slower. might work on other unices, too. */
364#if EV_USE_CLOCK_SYSCALL 380#if EV_USE_CLOCK_SYSCALL
365# include <sys/syscall.h> 381# include <sys/syscall.h>
366# ifdef SYS_clock_gettime 382# ifdef SYS_clock_gettime
372# define EV_USE_CLOCK_SYSCALL 0 388# define EV_USE_CLOCK_SYSCALL 0
373# endif 389# endif
374#endif 390#endif
375 391
376/* this block fixes any misconfiguration where we know we run into trouble otherwise */ 392/* this block fixes any misconfiguration where we know we run into trouble otherwise */
377
378#ifdef _AIX
379/* AIX has a completely broken poll.h header */
380# undef EV_USE_POLL
381# define EV_USE_POLL 0
382#endif
383 393
384#ifndef CLOCK_MONOTONIC 394#ifndef CLOCK_MONOTONIC
385# undef EV_USE_MONOTONIC 395# undef EV_USE_MONOTONIC
386# define EV_USE_MONOTONIC 0 396# define EV_USE_MONOTONIC 0
387#endif 397#endif
504 */ 514 */
505 515
506#ifndef ECB_H 516#ifndef ECB_H
507#define ECB_H 517#define ECB_H
508 518
519/* 16 bits major, 16 bits minor */
520#define ECB_VERSION 0x00010003
521
509#ifdef _WIN32 522#ifdef _WIN32
510 typedef signed char int8_t; 523 typedef signed char int8_t;
511 typedef unsigned char uint8_t; 524 typedef unsigned char uint8_t;
512 typedef signed short int16_t; 525 typedef signed short int16_t;
513 typedef unsigned short uint16_t; 526 typedef unsigned short uint16_t;
518 typedef unsigned long long uint64_t; 531 typedef unsigned long long uint64_t;
519 #else /* _MSC_VER || __BORLANDC__ */ 532 #else /* _MSC_VER || __BORLANDC__ */
520 typedef signed __int64 int64_t; 533 typedef signed __int64 int64_t;
521 typedef unsigned __int64 uint64_t; 534 typedef unsigned __int64 uint64_t;
522 #endif 535 #endif
536 #ifdef _WIN64
537 #define ECB_PTRSIZE 8
538 typedef uint64_t uintptr_t;
539 typedef int64_t intptr_t;
540 #else
541 #define ECB_PTRSIZE 4
542 typedef uint32_t uintptr_t;
543 typedef int32_t intptr_t;
544 #endif
523#else 545#else
524 #include <inttypes.h> 546 #include <inttypes.h>
547 #if UINTMAX_MAX > 0xffffffffU
548 #define ECB_PTRSIZE 8
549 #else
550 #define ECB_PTRSIZE 4
551 #endif
552#endif
553
554/* work around x32 idiocy by defining proper macros */
555#if __x86_64 || _M_AMD64
556 #if __ILP32
557 #define ECB_AMD64_X32 1
558 #else
559 #define ECB_AMD64 1
560 #endif
525#endif 561#endif
526 562
527/* many compilers define _GNUC_ to some versions but then only implement 563/* many compilers define _GNUC_ to some versions but then only implement
528 * what their idiot authors think are the "more important" extensions, 564 * what their idiot authors think are the "more important" extensions,
529 * causing enormous grief in return for some better fake benchmark numbers. 565 * causing enormous grief in return for some better fake benchmark numbers.
537 #else 573 #else
538 #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) 574 #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))
539 #endif 575 #endif
540#endif 576#endif
541 577
578#define ECB_C (__STDC__+0) /* this assumes that __STDC__ is either empty or a number */
579#define ECB_C99 (__STDC_VERSION__ >= 199901L)
580#define ECB_C11 (__STDC_VERSION__ >= 201112L)
581#define ECB_CPP (__cplusplus+0)
582#define ECB_CPP11 (__cplusplus >= 201103L)
583
584#if ECB_CPP
585 #define ECB_EXTERN_C extern "C"
586 #define ECB_EXTERN_C_BEG ECB_EXTERN_C {
587 #define ECB_EXTERN_C_END }
588#else
589 #define ECB_EXTERN_C extern
590 #define ECB_EXTERN_C_BEG
591 #define ECB_EXTERN_C_END
592#endif
593
542/*****************************************************************************/ 594/*****************************************************************************/
543 595
544/* ECB_NO_THREADS - ecb is not used by multiple threads, ever */ 596/* ECB_NO_THREADS - ecb is not used by multiple threads, ever */
545/* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */ 597/* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */
546 598
547#if ECB_NO_THREADS 599#if ECB_NO_THREADS
548# define ECB_NO_SMP 1 600 #define ECB_NO_SMP 1
549#endif 601#endif
550 602
551#if ECB_NO_THREADS || ECB_NO_SMP 603#if ECB_NO_SMP
552 #define ECB_MEMORY_FENCE do { } while (0) 604 #define ECB_MEMORY_FENCE do { } while (0)
553#endif 605#endif
554 606
555#ifndef ECB_MEMORY_FENCE 607#ifndef ECB_MEMORY_FENCE
556 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 608 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
557 #if __i386 || __i386__ 609 #if __i386 || __i386__
558 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") 610 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
559 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE /* non-lock xchg might be enough */ 611 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
560 #define ECB_MEMORY_FENCE_RELEASE do { } while (0) /* unlikely to change in future cpus */ 612 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
561 #elif __amd64 || __amd64__ || __x86_64 || __x86_64__ 613 #elif __amd64 || __amd64__ || __x86_64 || __x86_64__
562 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") 614 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory")
563 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("lfence" : : : "memory") 615 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
564 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("sfence") /* play safe - not needed in any current cpu */ 616 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
565 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ 617 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__
566 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") 618 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory")
567 #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \ 619 #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \
568 || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ 620 || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__
569 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") 621 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory")
570 #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ 622 #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \
571 || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__ 623 || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__
572 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") 624 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory")
573 #elif __sparc || __sparc__ 625 #elif __sparc || __sparc__
574 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad | " : : : "memory") 626 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory")
575 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory") 627 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory")
576 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore") 628 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore")
577 #elif defined __s390__ || defined __s390x__ 629 #elif defined __s390__ || defined __s390x__
578 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory") 630 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory")
579 #elif defined __mips__ 631 #elif defined __mips__
580 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") 632 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory")
581 #elif defined __alpha__ 633 #elif defined __alpha__
582 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory") 634 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory")
635 #elif defined __hppa__
636 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
637 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
638 #elif defined __ia64__
639 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory")
583 #endif 640 #endif
584 #endif 641 #endif
585#endif 642#endif
586 643
587#ifndef ECB_MEMORY_FENCE 644#ifndef ECB_MEMORY_FENCE
645 #if ECB_GCC_VERSION(4,7)
646 /* see comment below (stdatomic.h) about the C11 memory model. */
647 #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST)
648
649 /* The __has_feature syntax from clang is so misdesigned that we cannot use it
650 * without risking compile time errors with other compilers. We *could*
651 * define our own ecb_clang_has_feature, but I just can't be bothered to work
652 * around this shit time and again.
653 * #elif defined __clang && __has_feature (cxx_atomic)
654 * // see comment below (stdatomic.h) about the C11 memory model.
655 * #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST)
656 */
657
588 #if ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ 658 #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__
589 #define ECB_MEMORY_FENCE __sync_synchronize () 659 #define ECB_MEMORY_FENCE __sync_synchronize ()
590 /*#define ECB_MEMORY_FENCE_ACQUIRE ({ char dummy = 0; __sync_lock_test_and_set (&dummy, 1); }) */
591 /*#define ECB_MEMORY_FENCE_RELEASE ({ char dummy = 1; __sync_lock_release (&dummy ); }) */
592 #elif _MSC_VER >= 1400 /* VC++ 2005 */ 660 #elif _MSC_VER >= 1400 /* VC++ 2005 */
593 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) 661 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
594 #define ECB_MEMORY_FENCE _ReadWriteBarrier () 662 #define ECB_MEMORY_FENCE _ReadWriteBarrier ()
595 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */ 663 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */
596 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier () 664 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier ()
606 #define ECB_MEMORY_FENCE __sync () 674 #define ECB_MEMORY_FENCE __sync ()
607 #endif 675 #endif
608#endif 676#endif
609 677
610#ifndef ECB_MEMORY_FENCE 678#ifndef ECB_MEMORY_FENCE
679 #if ECB_C11 && !defined __STDC_NO_ATOMICS__
680 /* we assume that these memory fences work on all variables/all memory accesses, */
681 /* not just C11 atomics and atomic accesses */
682 #include <stdatomic.h>
683 /* Unfortunately, neither gcc 4.7 nor clang 3.1 generate any instructions for */
684 /* any fence other than seq_cst, which isn't very efficient for us. */
685 /* Why that is, we don't know - either the C11 memory model is quite useless */
686 /* for most usages, or gcc and clang have a bug */
687 /* I *currently* lean towards the latter, and inefficiently implement */
688 /* all three of ecb's fences as a seq_cst fence */
689 #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst)
690 #endif
691#endif
692
693#ifndef ECB_MEMORY_FENCE
611 #if !ECB_AVOID_PTHREADS 694 #if !ECB_AVOID_PTHREADS
612 /* 695 /*
613 * if you get undefined symbol references to pthread_mutex_lock, 696 * if you get undefined symbol references to pthread_mutex_lock,
614 * or failure to find pthread.h, then you should implement 697 * or failure to find pthread.h, then you should implement
615 * the ECB_MEMORY_FENCE operations for your cpu/compiler 698 * the ECB_MEMORY_FENCE operations for your cpu/compiler
633 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE 716 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
634#endif 717#endif
635 718
636/*****************************************************************************/ 719/*****************************************************************************/
637 720
638#define ECB_C99 (__STDC_VERSION__ >= 199901L)
639
640#if __cplusplus 721#if __cplusplus
641 #define ecb_inline static inline 722 #define ecb_inline static inline
642#elif ECB_GCC_VERSION(2,5) 723#elif ECB_GCC_VERSION(2,5)
643 #define ecb_inline static __inline__ 724 #define ecb_inline static __inline__
644#elif ECB_C99 725#elif ECB_C99
682#elif ECB_GCC_VERSION(3,0) 763#elif ECB_GCC_VERSION(3,0)
683 #define ecb_decltype(x) __typeof(x) 764 #define ecb_decltype(x) __typeof(x)
684#endif 765#endif
685 766
686#define ecb_noinline ecb_attribute ((__noinline__)) 767#define ecb_noinline ecb_attribute ((__noinline__))
687#define ecb_noreturn ecb_attribute ((__noreturn__))
688#define ecb_unused ecb_attribute ((__unused__)) 768#define ecb_unused ecb_attribute ((__unused__))
689#define ecb_const ecb_attribute ((__const__)) 769#define ecb_const ecb_attribute ((__const__))
690#define ecb_pure ecb_attribute ((__pure__)) 770#define ecb_pure ecb_attribute ((__pure__))
771
772#if ECB_C11
773 #define ecb_noreturn _Noreturn
774#else
775 #define ecb_noreturn ecb_attribute ((__noreturn__))
776#endif
691 777
692#if ECB_GCC_VERSION(4,3) 778#if ECB_GCC_VERSION(4,3)
693 #define ecb_artificial ecb_attribute ((__artificial__)) 779 #define ecb_artificial ecb_attribute ((__artificial__))
694 #define ecb_hot ecb_attribute ((__hot__)) 780 #define ecb_hot ecb_attribute ((__hot__))
695 #define ecb_cold ecb_attribute ((__cold__)) 781 #define ecb_cold ecb_attribute ((__cold__))
786 872
787 return r + ecb_ld32 (x); 873 return r + ecb_ld32 (x);
788 } 874 }
789#endif 875#endif
790 876
877ecb_function_ ecb_bool ecb_is_pot32 (uint32_t x) ecb_const;
878ecb_function_ ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); }
879ecb_function_ ecb_bool ecb_is_pot64 (uint64_t x) ecb_const;
880ecb_function_ ecb_bool ecb_is_pot64 (uint64_t x) { return !(x & (x - 1)); }
881
791ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) ecb_const; 882ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) ecb_const;
792ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) 883ecb_function_ uint8_t ecb_bitrev8 (uint8_t x)
793{ 884{
794 return ( (x * 0x0802U & 0x22110U) 885 return ( (x * 0x0802U & 0x22110U)
795 | (x * 0x8020U & 0x88440U)) * 0x10101U >> 16; 886 | (x * 0x8020U & 0x88440U)) * 0x10101U >> 16;
879 ecb_inline void ecb_unreachable (void) ecb_noreturn; 970 ecb_inline void ecb_unreachable (void) ecb_noreturn;
880 ecb_inline void ecb_unreachable (void) { } 971 ecb_inline void ecb_unreachable (void) { }
881#endif 972#endif
882 973
883/* try to tell the compiler that some condition is definitely true */ 974/* try to tell the compiler that some condition is definitely true */
884#define ecb_assume(cond) do { if (!(cond)) ecb_unreachable (); } while (0) 975#define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0
885 976
886ecb_inline unsigned char ecb_byteorder_helper (void) ecb_const; 977ecb_inline unsigned char ecb_byteorder_helper (void) ecb_const;
887ecb_inline unsigned char 978ecb_inline unsigned char
888ecb_byteorder_helper (void) 979ecb_byteorder_helper (void)
889{ 980{
890 const uint32_t u = 0x11223344; 981 /* the union code still generates code under pressure in gcc, */
891 return *(unsigned char *)&u; 982 /* but less than using pointers, and always seems to */
983 /* successfully return a constant. */
984 /* the reason why we have this horrible preprocessor mess */
985 /* is to avoid it in all cases, at least on common architectures */
986 /* or when using a recent enough gcc version (>= 4.6) */
987#if __i386 || __i386__ || _M_X86 || __amd64 || __amd64__ || _M_X64
988 return 0x44;
989#elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
990 return 0x44;
991#elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
992 return 0x11;
993#else
994 union
995 {
996 uint32_t i;
997 uint8_t c;
998 } u = { 0x11223344 };
999 return u.c;
1000#endif
892} 1001}
893 1002
894ecb_inline ecb_bool ecb_big_endian (void) ecb_const; 1003ecb_inline ecb_bool ecb_big_endian (void) ecb_const;
895ecb_inline ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; } 1004ecb_inline ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; }
896ecb_inline ecb_bool ecb_little_endian (void) ecb_const; 1005ecb_inline ecb_bool ecb_little_endian (void) ecb_const;
927 } 1036 }
928#else 1037#else
929 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) 1038 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0]))
930#endif 1039#endif
931 1040
1041/*******************************************************************************/
1042/* floating point stuff, can be disabled by defining ECB_NO_LIBM */
1043
1044/* basically, everything uses "ieee pure-endian" floating point numbers */
1045/* the only noteworthy exception is ancient armle, which uses order 43218765 */
1046#if 0 \
1047 || __i386 || __i386__ \
1048 || __amd64 || __amd64__ || __x86_64 || __x86_64__ \
1049 || __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \
1050 || defined __arm__ && defined __ARM_EABI__ \
1051 || defined __s390__ || defined __s390x__ \
1052 || defined __mips__ \
1053 || defined __alpha__ \
1054 || defined __hppa__ \
1055 || defined __ia64__ \
1056 || defined _M_IX86 || defined _M_AMD64 || defined _M_IA64
1057 #define ECB_STDFP 1
1058 #include <string.h> /* for memcpy */
1059#else
1060 #define ECB_STDFP 0
1061 #include <math.h> /* for frexp*, ldexp* */
1062#endif
1063
1064#ifndef ECB_NO_LIBM
1065
1066 /* convert a float to ieee single/binary32 */
1067 ecb_function_ uint32_t ecb_float_to_binary32 (float x) ecb_const;
1068 ecb_function_ uint32_t
1069 ecb_float_to_binary32 (float x)
1070 {
1071 uint32_t r;
1072
1073 #if ECB_STDFP
1074 memcpy (&r, &x, 4);
1075 #else
1076 /* slow emulation, works for anything but -0 */
1077 uint32_t m;
1078 int e;
1079
1080 if (x == 0e0f ) return 0x00000000U;
1081 if (x > +3.40282346638528860e+38f) return 0x7f800000U;
1082 if (x < -3.40282346638528860e+38f) return 0xff800000U;
1083 if (x != x ) return 0x7fbfffffU;
1084
1085 m = frexpf (x, &e) * 0x1000000U;
1086
1087 r = m & 0x80000000U;
1088
1089 if (r)
1090 m = -m;
1091
1092 if (e <= -126)
1093 {
1094 m &= 0xffffffU;
1095 m >>= (-125 - e);
1096 e = -126;
1097 }
1098
1099 r |= (e + 126) << 23;
1100 r |= m & 0x7fffffU;
1101 #endif
1102
1103 return r;
1104 }
1105
1106 /* converts an ieee single/binary32 to a float */
1107 ecb_function_ float ecb_binary32_to_float (uint32_t x) ecb_const;
1108 ecb_function_ float
1109 ecb_binary32_to_float (uint32_t x)
1110 {
1111 float r;
1112
1113 #if ECB_STDFP
1114 memcpy (&r, &x, 4);
1115 #else
1116 /* emulation, only works for normals and subnormals and +0 */
1117 int neg = x >> 31;
1118 int e = (x >> 23) & 0xffU;
1119
1120 x &= 0x7fffffU;
1121
1122 if (e)
1123 x |= 0x800000U;
1124 else
1125 e = 1;
1126
1127 /* we distrust ldexpf a bit and do the 2**-24 scaling by an extra multiply */
1128 r = ldexpf (x * (0.5f / 0x800000U), e - 126);
1129
1130 r = neg ? -r : r;
1131 #endif
1132
1133 return r;
1134 }
1135
1136 /* convert a double to ieee double/binary64 */
1137 ecb_function_ uint64_t ecb_double_to_binary64 (double x) ecb_const;
1138 ecb_function_ uint64_t
1139 ecb_double_to_binary64 (double x)
1140 {
1141 uint64_t r;
1142
1143 #if ECB_STDFP
1144 memcpy (&r, &x, 8);
1145 #else
1146 /* slow emulation, works for anything but -0 */
1147 uint64_t m;
1148 int e;
1149
1150 if (x == 0e0 ) return 0x0000000000000000U;
1151 if (x > +1.79769313486231470e+308) return 0x7ff0000000000000U;
1152 if (x < -1.79769313486231470e+308) return 0xfff0000000000000U;
1153 if (x != x ) return 0X7ff7ffffffffffffU;
1154
1155 m = frexp (x, &e) * 0x20000000000000U;
1156
1157 r = m & 0x8000000000000000;;
1158
1159 if (r)
1160 m = -m;
1161
1162 if (e <= -1022)
1163 {
1164 m &= 0x1fffffffffffffU;
1165 m >>= (-1021 - e);
1166 e = -1022;
1167 }
1168
1169 r |= ((uint64_t)(e + 1022)) << 52;
1170 r |= m & 0xfffffffffffffU;
1171 #endif
1172
1173 return r;
1174 }
1175
1176 /* converts an ieee double/binary64 to a double */
1177 ecb_function_ double ecb_binary64_to_double (uint64_t x) ecb_const;
1178 ecb_function_ double
1179 ecb_binary64_to_double (uint64_t x)
1180 {
1181 double r;
1182
1183 #if ECB_STDFP
1184 memcpy (&r, &x, 8);
1185 #else
1186 /* emulation, only works for normals and subnormals and +0 */
1187 int neg = x >> 63;
1188 int e = (x >> 52) & 0x7ffU;
1189
1190 x &= 0xfffffffffffffU;
1191
1192 if (e)
1193 x |= 0x10000000000000U;
1194 else
1195 e = 1;
1196
1197 /* we distrust ldexp a bit and do the 2**-53 scaling by an extra multiply */
1198 r = ldexp (x * (0.5 / 0x10000000000000U), e - 1022);
1199
1200 r = neg ? -r : r;
1201 #endif
1202
1203 return r;
1204 }
1205
1206#endif
1207
932#endif 1208#endif
933 1209
934/* ECB.H END */ 1210/* ECB.H END */
935 1211
936#if ECB_MEMORY_FENCE_NEEDS_PTHREADS 1212#if ECB_MEMORY_FENCE_NEEDS_PTHREADS
1105#endif 1381#endif
1106 1382
1107static void (*syserr_cb)(const char *msg) EV_THROW; 1383static void (*syserr_cb)(const char *msg) EV_THROW;
1108 1384
1109void ecb_cold 1385void ecb_cold
1110ev_set_syserr_cb (void (*cb)(const char *msg)) EV_THROW 1386ev_set_syserr_cb (void (*cb)(const char *msg) EV_THROW) EV_THROW
1111{ 1387{
1112 syserr_cb = cb; 1388 syserr_cb = cb;
1113} 1389}
1114 1390
1115static void noinline ecb_cold 1391static void noinline ecb_cold
1133 abort (); 1409 abort ();
1134 } 1410 }
1135} 1411}
1136 1412
1137static void * 1413static void *
1138ev_realloc_emul (void *ptr, long size) 1414ev_realloc_emul (void *ptr, long size) EV_THROW
1139{ 1415{
1140#if __GLIBC__
1141 return realloc (ptr, size);
1142#else
1143 /* some systems, notably openbsd and darwin, fail to properly 1416 /* some systems, notably openbsd and darwin, fail to properly
1144 * implement realloc (x, 0) (as required by both ansi c-89 and 1417 * implement realloc (x, 0) (as required by both ansi c-89 and
1145 * the single unix specification, so work around them here. 1418 * the single unix specification, so work around them here.
1419 * recently, also (at least) fedora and debian started breaking it,
1420 * despite documenting it otherwise.
1146 */ 1421 */
1147 1422
1148 if (size) 1423 if (size)
1149 return realloc (ptr, size); 1424 return realloc (ptr, size);
1150 1425
1151 free (ptr); 1426 free (ptr);
1152 return 0; 1427 return 0;
1153#endif
1154} 1428}
1155 1429
1156static void *(*alloc)(void *ptr, long size) EV_THROW = ev_realloc_emul; 1430static void *(*alloc)(void *ptr, long size) EV_THROW = ev_realloc_emul;
1157 1431
1158void ecb_cold 1432void ecb_cold
1159ev_set_allocator (void *(*cb)(void *ptr, long size)) EV_THROW 1433ev_set_allocator (void *(*cb)(void *ptr, long size) EV_THROW) EV_THROW
1160{ 1434{
1161 alloc = cb; 1435 alloc = cb;
1162} 1436}
1163 1437
1164inline_speed void * 1438inline_speed void *
1813static void noinline ecb_cold 2087static void noinline ecb_cold
1814evpipe_init (EV_P) 2088evpipe_init (EV_P)
1815{ 2089{
1816 if (!ev_is_active (&pipe_w)) 2090 if (!ev_is_active (&pipe_w))
1817 { 2091 {
2092 int fds [2];
2093
1818# if EV_USE_EVENTFD 2094# if EV_USE_EVENTFD
2095 fds [0] = -1;
1819 evfd = eventfd (0, EFD_NONBLOCK | EFD_CLOEXEC); 2096 fds [1] = eventfd (0, EFD_NONBLOCK | EFD_CLOEXEC);
1820 if (evfd < 0 && errno == EINVAL) 2097 if (fds [1] < 0 && errno == EINVAL)
1821 evfd = eventfd (0, 0); 2098 fds [1] = eventfd (0, 0);
1822 2099
1823 if (evfd >= 0) 2100 if (fds [1] < 0)
1824 {
1825 evpipe [0] = -1;
1826 fd_intern (evfd); /* doing it twice doesn't hurt */
1827 ev_io_set (&pipe_w, evfd, EV_READ);
1828 }
1829 else
1830# endif 2101# endif
1831 { 2102 {
1832 while (pipe (evpipe)) 2103 while (pipe (fds))
1833 ev_syserr ("(libev) error creating signal/async pipe"); 2104 ev_syserr ("(libev) error creating signal/async pipe");
1834 2105
1835 fd_intern (evpipe [0]); 2106 fd_intern (fds [0]);
1836 fd_intern (evpipe [1]);
1837 ev_io_set (&pipe_w, evpipe [0], EV_READ);
1838 } 2107 }
1839 2108
2109 fd_intern (fds [1]);
2110
2111 evpipe [0] = fds [0];
2112
2113 if (evpipe [1] < 0)
2114 evpipe [1] = fds [1]; /* first call, set write fd */
2115 else
2116 {
2117 /* on subsequent calls, do not change evpipe [1] */
2118 /* so that evpipe_write can always rely on its value. */
2119 /* this branch does not do anything sensible on windows, */
2120 /* so must not be executed on windows */
2121
2122 dup2 (fds [1], evpipe [1]);
2123 close (fds [1]);
2124 }
2125
2126 ev_io_set (&pipe_w, evpipe [0] < 0 ? evpipe [1] : evpipe [0], EV_READ);
1840 ev_io_start (EV_A_ &pipe_w); 2127 ev_io_start (EV_A_ &pipe_w);
1841 ev_unref (EV_A); /* watcher should not keep loop alive */ 2128 ev_unref (EV_A); /* watcher should not keep loop alive */
1842 } 2129 }
1843} 2130}
1844 2131
1849 2136
1850 if (expect_true (*flag)) 2137 if (expect_true (*flag))
1851 return; 2138 return;
1852 2139
1853 *flag = 1; 2140 *flag = 1;
1854
1855 ECB_MEMORY_FENCE_RELEASE; /* make sure flag is visible before the wakeup */ 2141 ECB_MEMORY_FENCE_RELEASE; /* make sure flag is visible before the wakeup */
1856 2142
1857 pipe_write_skipped = 1; 2143 pipe_write_skipped = 1;
1858 2144
1859 ECB_MEMORY_FENCE; /* make sure pipe_write_skipped is visible before we check pipe_write_wanted */ 2145 ECB_MEMORY_FENCE; /* make sure pipe_write_skipped is visible before we check pipe_write_wanted */
1860 2146
1861 if (pipe_write_wanted) 2147 if (pipe_write_wanted)
1862 { 2148 {
1863 int old_errno; 2149 int old_errno;
1864 2150
1865 pipe_write_skipped = 0; /* just an optimisation, no fence needed */ 2151 pipe_write_skipped = 0;
2152 ECB_MEMORY_FENCE_RELEASE;
1866 2153
1867 old_errno = errno; /* save errno because write will clobber it */ 2154 old_errno = errno; /* save errno because write will clobber it */
1868 2155
1869#if EV_USE_EVENTFD 2156#if EV_USE_EVENTFD
1870 if (evfd >= 0) 2157 if (evpipe [0] < 0)
1871 { 2158 {
1872 uint64_t counter = 1; 2159 uint64_t counter = 1;
1873 write (evfd, &counter, sizeof (uint64_t)); 2160 write (evpipe [1], &counter, sizeof (uint64_t));
1874 } 2161 }
1875 else 2162 else
1876#endif 2163#endif
1877 { 2164 {
1878#ifdef _WIN32 2165#ifdef _WIN32
1898 int i; 2185 int i;
1899 2186
1900 if (revents & EV_READ) 2187 if (revents & EV_READ)
1901 { 2188 {
1902#if EV_USE_EVENTFD 2189#if EV_USE_EVENTFD
1903 if (evfd >= 0) 2190 if (evpipe [0] < 0)
1904 { 2191 {
1905 uint64_t counter; 2192 uint64_t counter;
1906 read (evfd, &counter, sizeof (uint64_t)); 2193 read (evpipe [1], &counter, sizeof (uint64_t));
1907 } 2194 }
1908 else 2195 else
1909#endif 2196#endif
1910 { 2197 {
1911 char dummy[4]; 2198 char dummy[4];
1929#if EV_SIGNAL_ENABLE 2216#if EV_SIGNAL_ENABLE
1930 if (sig_pending) 2217 if (sig_pending)
1931 { 2218 {
1932 sig_pending = 0; 2219 sig_pending = 0;
1933 2220
1934 ECB_MEMORY_FENCE_RELEASE; 2221 ECB_MEMORY_FENCE;
1935 2222
1936 for (i = EV_NSIG - 1; i--; ) 2223 for (i = EV_NSIG - 1; i--; )
1937 if (expect_false (signals [i].pending)) 2224 if (expect_false (signals [i].pending))
1938 ev_feed_signal_event (EV_A_ i + 1); 2225 ev_feed_signal_event (EV_A_ i + 1);
1939 } 2226 }
1942#if EV_ASYNC_ENABLE 2229#if EV_ASYNC_ENABLE
1943 if (async_pending) 2230 if (async_pending)
1944 { 2231 {
1945 async_pending = 0; 2232 async_pending = 0;
1946 2233
1947 ECB_MEMORY_FENCE_RELEASE; 2234 ECB_MEMORY_FENCE;
1948 2235
1949 for (i = asynccnt; i--; ) 2236 for (i = asynccnt; i--; )
1950 if (asyncs [i]->sent) 2237 if (asyncs [i]->sent)
1951 { 2238 {
1952 asyncs [i]->sent = 0; 2239 asyncs [i]->sent = 0;
2240 ECB_MEMORY_FENCE_RELEASE;
1953 ev_feed_event (EV_A_ asyncs [i], EV_ASYNC); 2241 ev_feed_event (EV_A_ asyncs [i], EV_ASYNC);
1954 } 2242 }
1955 } 2243 }
1956#endif 2244#endif
1957} 2245}
1960 2248
1961void 2249void
1962ev_feed_signal (int signum) EV_THROW 2250ev_feed_signal (int signum) EV_THROW
1963{ 2251{
1964#if EV_MULTIPLICITY 2252#if EV_MULTIPLICITY
2253 EV_P;
2254 ECB_MEMORY_FENCE_ACQUIRE;
1965 EV_P = signals [signum - 1].loop; 2255 EV_A = signals [signum - 1].loop;
1966 2256
1967 if (!EV_A) 2257 if (!EV_A)
1968 return; 2258 return;
1969#endif 2259#endif
1970 2260
1971 if (!ev_active (&pipe_w))
1972 return;
1973
1974 signals [signum - 1].pending = 1; 2261 signals [signum - 1].pending = 1;
1975 evpipe_write (EV_A_ &sig_pending); 2262 evpipe_write (EV_A_ &sig_pending);
1976} 2263}
1977 2264
1978static void 2265static void
1988void noinline 2275void noinline
1989ev_feed_signal_event (EV_P_ int signum) EV_THROW 2276ev_feed_signal_event (EV_P_ int signum) EV_THROW
1990{ 2277{
1991 WL w; 2278 WL w;
1992 2279
1993 if (expect_false (signum <= 0 || signum > EV_NSIG)) 2280 if (expect_false (signum <= 0 || signum >= EV_NSIG))
1994 return; 2281 return;
1995 2282
1996 --signum; 2283 --signum;
1997 2284
1998#if EV_MULTIPLICITY 2285#if EV_MULTIPLICITY
2002 if (expect_false (signals [signum].loop != EV_A)) 2289 if (expect_false (signals [signum].loop != EV_A))
2003 return; 2290 return;
2004#endif 2291#endif
2005 2292
2006 signals [signum].pending = 0; 2293 signals [signum].pending = 0;
2294 ECB_MEMORY_FENCE_RELEASE;
2007 2295
2008 for (w = signals [signum].head; w; w = w->next) 2296 for (w = signals [signum].head; w; w = w->next)
2009 ev_feed_event (EV_A_ (W)w, EV_SIGNAL); 2297 ev_feed_event (EV_A_ (W)w, EV_SIGNAL);
2010} 2298}
2011 2299
2295#if EV_ASYNC_ENABLE 2583#if EV_ASYNC_ENABLE
2296 async_pending = 0; 2584 async_pending = 0;
2297#endif 2585#endif
2298 pipe_write_skipped = 0; 2586 pipe_write_skipped = 0;
2299 pipe_write_wanted = 0; 2587 pipe_write_wanted = 0;
2588 evpipe [0] = -1;
2589 evpipe [1] = -1;
2300#if EV_USE_INOTIFY 2590#if EV_USE_INOTIFY
2301 fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2; 2591 fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2;
2302#endif 2592#endif
2303#if EV_USE_SIGNALFD 2593#if EV_USE_SIGNALFD
2304 sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1; 2594 sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1;
2355 EV_INVOKE_PENDING; 2645 EV_INVOKE_PENDING;
2356 } 2646 }
2357#endif 2647#endif
2358 2648
2359#if EV_CHILD_ENABLE 2649#if EV_CHILD_ENABLE
2360 if (ev_is_active (&childev)) 2650 if (ev_is_default_loop (EV_A) && ev_is_active (&childev))
2361 { 2651 {
2362 ev_ref (EV_A); /* child watcher */ 2652 ev_ref (EV_A); /* child watcher */
2363 ev_signal_stop (EV_A_ &childev); 2653 ev_signal_stop (EV_A_ &childev);
2364 } 2654 }
2365#endif 2655#endif
2367 if (ev_is_active (&pipe_w)) 2657 if (ev_is_active (&pipe_w))
2368 { 2658 {
2369 /*ev_ref (EV_A);*/ 2659 /*ev_ref (EV_A);*/
2370 /*ev_io_stop (EV_A_ &pipe_w);*/ 2660 /*ev_io_stop (EV_A_ &pipe_w);*/
2371 2661
2372#if EV_USE_EVENTFD
2373 if (evfd >= 0)
2374 close (evfd);
2375#endif
2376
2377 if (evpipe [0] >= 0)
2378 {
2379 EV_WIN32_CLOSE_FD (evpipe [0]); 2662 if (evpipe [0] >= 0) EV_WIN32_CLOSE_FD (evpipe [0]);
2380 EV_WIN32_CLOSE_FD (evpipe [1]); 2663 if (evpipe [1] >= 0) EV_WIN32_CLOSE_FD (evpipe [1]);
2381 }
2382 } 2664 }
2383 2665
2384#if EV_USE_SIGNALFD 2666#if EV_USE_SIGNALFD
2385 if (ev_is_active (&sigfd_w)) 2667 if (ev_is_active (&sigfd_w))
2386 close (sigfd); 2668 close (sigfd);
2472#endif 2754#endif
2473#if EV_USE_INOTIFY 2755#if EV_USE_INOTIFY
2474 infy_fork (EV_A); 2756 infy_fork (EV_A);
2475#endif 2757#endif
2476 2758
2759#if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
2477 if (ev_is_active (&pipe_w)) 2760 if (ev_is_active (&pipe_w))
2478 { 2761 {
2479 /* pipe_write_wanted must be false now, so modifying fd vars should be safe */ 2762 /* pipe_write_wanted must be false now, so modifying fd vars should be safe */
2480 2763
2481 ev_ref (EV_A); 2764 ev_ref (EV_A);
2482 ev_io_stop (EV_A_ &pipe_w); 2765 ev_io_stop (EV_A_ &pipe_w);
2483 2766
2484#if EV_USE_EVENTFD
2485 if (evfd >= 0)
2486 close (evfd);
2487#endif
2488
2489 if (evpipe [0] >= 0) 2767 if (evpipe [0] >= 0)
2490 {
2491 EV_WIN32_CLOSE_FD (evpipe [0]); 2768 EV_WIN32_CLOSE_FD (evpipe [0]);
2492 EV_WIN32_CLOSE_FD (evpipe [1]);
2493 }
2494 2769
2495#if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
2496 evpipe_init (EV_A); 2770 evpipe_init (EV_A);
2497 /* now iterate over everything, in case we missed something */ 2771 /* iterate over everything, in case we missed something before */
2498 pipecb (EV_A_ &pipe_w, EV_READ); 2772 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
2499#endif
2500 } 2773 }
2774#endif
2501 2775
2502 postfork = 0; 2776 postfork = 0;
2503} 2777}
2504 2778
2505#if EV_MULTIPLICITY 2779#if EV_MULTIPLICITY
2678} 2952}
2679 2953
2680void 2954void
2681ev_loop_fork (EV_P) EV_THROW 2955ev_loop_fork (EV_P) EV_THROW
2682{ 2956{
2683 postfork = 1; /* must be in line with ev_default_fork */ 2957 postfork = 1;
2684} 2958}
2685 2959
2686/*****************************************************************************/ 2960/*****************************************************************************/
2687 2961
2688void 2962void
2704} 2978}
2705 2979
2706void noinline 2980void noinline
2707ev_invoke_pending (EV_P) 2981ev_invoke_pending (EV_P)
2708{ 2982{
2709 for (pendingpri = NUMPRI; pendingpri--; ) /* pendingpri is modified during the loop */ 2983 pendingpri = NUMPRI;
2984
2985 while (pendingpri) /* pendingpri possibly gets modified in the inner loop */
2986 {
2987 --pendingpri;
2988
2710 while (pendingcnt [pendingpri]) 2989 while (pendingcnt [pendingpri])
2711 { 2990 {
2712 ANPENDING *p = pendings [pendingpri] + --pendingcnt [pendingpri]; 2991 ANPENDING *p = pendings [pendingpri] + --pendingcnt [pendingpri];
2713 2992
2714 p->w->pending = 0; 2993 p->w->pending = 0;
2715 EV_CB_INVOKE (p->w, p->events); 2994 EV_CB_INVOKE (p->w, p->events);
2716 EV_FREQUENT_CHECK; 2995 EV_FREQUENT_CHECK;
2717 } 2996 }
2997 }
2718} 2998}
2719 2999
2720#if EV_IDLE_ENABLE 3000#if EV_IDLE_ENABLE
2721/* make idle watchers pending. this handles the "call-idle */ 3001/* make idle watchers pending. this handles the "call-idle */
2722/* only when higher priorities are idle" logic */ 3002/* only when higher priorities are idle" logic */
2812{ 3092{
2813 EV_FREQUENT_CHECK; 3093 EV_FREQUENT_CHECK;
2814 3094
2815 while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now) 3095 while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now)
2816 { 3096 {
2817 int feed_count = 0;
2818
2819 do 3097 do
2820 { 3098 {
2821 ev_periodic *w = (ev_periodic *)ANHE_w (periodics [HEAP0]); 3099 ev_periodic *w = (ev_periodic *)ANHE_w (periodics [HEAP0]);
2822 3100
2823 /*assert (("libev: inactive timer on periodic heap detected", ev_is_active (w)));*/ 3101 /*assert (("libev: inactive timer on periodic heap detected", ev_is_active (w)));*/
3082 backend_poll (EV_A_ waittime); 3360 backend_poll (EV_A_ waittime);
3083 assert ((loop_done = EVBREAK_CANCEL, 1)); /* assert for side effect */ 3361 assert ((loop_done = EVBREAK_CANCEL, 1)); /* assert for side effect */
3084 3362
3085 pipe_write_wanted = 0; /* just an optimisation, no fence needed */ 3363 pipe_write_wanted = 0; /* just an optimisation, no fence needed */
3086 3364
3365 ECB_MEMORY_FENCE_ACQUIRE;
3087 if (pipe_write_skipped) 3366 if (pipe_write_skipped)
3088 { 3367 {
3089 assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w))); 3368 assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w)));
3090 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM); 3369 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
3091 } 3370 }
3469#if EV_MULTIPLICITY 3748#if EV_MULTIPLICITY
3470 assert (("libev: a signal must not be attached to two different loops", 3749 assert (("libev: a signal must not be attached to two different loops",
3471 !signals [w->signum - 1].loop || signals [w->signum - 1].loop == loop)); 3750 !signals [w->signum - 1].loop || signals [w->signum - 1].loop == loop));
3472 3751
3473 signals [w->signum - 1].loop = EV_A; 3752 signals [w->signum - 1].loop = EV_A;
3753 ECB_MEMORY_FENCE_RELEASE;
3474#endif 3754#endif
3475 3755
3476 EV_FREQUENT_CHECK; 3756 EV_FREQUENT_CHECK;
3477 3757
3478#if EV_USE_SIGNALFD 3758#if EV_USE_SIGNALFD
3633# define EV_INOTIFY_BUFSIZE (sizeof (struct inotify_event) * 2 + NAME_MAX) 3913# define EV_INOTIFY_BUFSIZE (sizeof (struct inotify_event) * 2 + NAME_MAX)
3634 3914
3635static void noinline 3915static void noinline
3636infy_add (EV_P_ ev_stat *w) 3916infy_add (EV_P_ ev_stat *w)
3637{ 3917{
3638 w->wd = inotify_add_watch (fs_fd, w->path, IN_ATTRIB | IN_DELETE_SELF | IN_MOVE_SELF | IN_MODIFY | IN_DONT_FOLLOW | IN_MASK_ADD); 3918 w->wd = inotify_add_watch (fs_fd, w->path,
3919 IN_ATTRIB | IN_DELETE_SELF | IN_MOVE_SELF | IN_MODIFY
3920 | IN_CREATE | IN_DELETE | IN_MOVED_FROM | IN_MOVED_TO
3921 | IN_DONT_FOLLOW | IN_MASK_ADD);
3639 3922
3640 if (w->wd >= 0) 3923 if (w->wd >= 0)
3641 { 3924 {
3642 struct statfs sfs; 3925 struct statfs sfs;
3643 3926
3647 3930
3648 if (!fs_2625) 3931 if (!fs_2625)
3649 w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL; 3932 w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL;
3650 else if (!statfs (w->path, &sfs) 3933 else if (!statfs (w->path, &sfs)
3651 && (sfs.f_type == 0x1373 /* devfs */ 3934 && (sfs.f_type == 0x1373 /* devfs */
3935 || sfs.f_type == 0x4006 /* fat */
3936 || sfs.f_type == 0x4d44 /* msdos */
3652 || sfs.f_type == 0xEF53 /* ext2/3 */ 3937 || sfs.f_type == 0xEF53 /* ext2/3 */
3938 || sfs.f_type == 0x72b6 /* jffs2 */
3939 || sfs.f_type == 0x858458f6 /* ramfs */
3940 || sfs.f_type == 0x5346544e /* ntfs */
3653 || sfs.f_type == 0x3153464a /* jfs */ 3941 || sfs.f_type == 0x3153464a /* jfs */
3942 || sfs.f_type == 0x9123683e /* btrfs */
3654 || sfs.f_type == 0x52654973 /* reiser3 */ 3943 || sfs.f_type == 0x52654973 /* reiser3 */
3655 || sfs.f_type == 0x01021994 /* tempfs */ 3944 || sfs.f_type == 0x01021994 /* tmpfs */
3656 || sfs.f_type == 0x58465342 /* xfs */)) 3945 || sfs.f_type == 0x58465342 /* xfs */))
3657 w->timer.repeat = 0.; /* filesystem is local, kernel new enough */ 3946 w->timer.repeat = 0.; /* filesystem is local, kernel new enough */
3658 else 3947 else
3659 w->timer.repeat = w->interval ? w->interval : NFS_STAT_INTERVAL; /* remote, use reduced frequency */ 3948 w->timer.repeat = w->interval ? w->interval : NFS_STAT_INTERVAL; /* remote, use reduced frequency */
3660 } 3949 }

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines