ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev.c
(Generate patch)

Comparing libev/ev.c (file contents):
Revision 1.433 by root, Tue May 15 13:03:20 2012 UTC vs.
Revision 1.456 by root, Thu Jul 4 22:32:23 2013 UTC

357 357
358#ifndef EV_HEAP_CACHE_AT 358#ifndef EV_HEAP_CACHE_AT
359# define EV_HEAP_CACHE_AT EV_FEATURE_DATA 359# define EV_HEAP_CACHE_AT EV_FEATURE_DATA
360#endif 360#endif
361 361
362#ifdef ANDROID
363/* supposedly, android doesn't typedef fd_mask */
364# undef EV_USE_SELECT
365# define EV_USE_SELECT 0
366/* supposedly, we need to include syscall.h, not sys/syscall.h, so just disable */
367# undef EV_USE_CLOCK_SYSCALL
368# define EV_USE_CLOCK_SYSCALL 0
369#endif
370
371/* aix's poll.h seems to cause lots of trouble */
372#ifdef _AIX
373/* AIX has a completely broken poll.h header */
374# undef EV_USE_POLL
375# define EV_USE_POLL 0
376#endif
377
362/* on linux, we can use a (slow) syscall to avoid a dependency on pthread, */ 378/* on linux, we can use a (slow) syscall to avoid a dependency on pthread, */
363/* which makes programs even slower. might work on other unices, too. */ 379/* which makes programs even slower. might work on other unices, too. */
364#if EV_USE_CLOCK_SYSCALL 380#if EV_USE_CLOCK_SYSCALL
365# include <sys/syscall.h> 381# include <sys/syscall.h>
366# ifdef SYS_clock_gettime 382# ifdef SYS_clock_gettime
372# define EV_USE_CLOCK_SYSCALL 0 388# define EV_USE_CLOCK_SYSCALL 0
373# endif 389# endif
374#endif 390#endif
375 391
376/* this block fixes any misconfiguration where we know we run into trouble otherwise */ 392/* this block fixes any misconfiguration where we know we run into trouble otherwise */
377
378#ifdef _AIX
379/* AIX has a completely broken poll.h header */
380# undef EV_USE_POLL
381# define EV_USE_POLL 0
382#endif
383 393
384#ifndef CLOCK_MONOTONIC 394#ifndef CLOCK_MONOTONIC
385# undef EV_USE_MONOTONIC 395# undef EV_USE_MONOTONIC
386# define EV_USE_MONOTONIC 0 396# define EV_USE_MONOTONIC 0
387#endif 397#endif
504 */ 514 */
505 515
506#ifndef ECB_H 516#ifndef ECB_H
507#define ECB_H 517#define ECB_H
508 518
519/* 16 bits major, 16 bits minor */
520#define ECB_VERSION 0x00010003
521
509#ifdef _WIN32 522#ifdef _WIN32
510 typedef signed char int8_t; 523 typedef signed char int8_t;
511 typedef unsigned char uint8_t; 524 typedef unsigned char uint8_t;
512 typedef signed short int16_t; 525 typedef signed short int16_t;
513 typedef unsigned short uint16_t; 526 typedef unsigned short uint16_t;
518 typedef unsigned long long uint64_t; 531 typedef unsigned long long uint64_t;
519 #else /* _MSC_VER || __BORLANDC__ */ 532 #else /* _MSC_VER || __BORLANDC__ */
520 typedef signed __int64 int64_t; 533 typedef signed __int64 int64_t;
521 typedef unsigned __int64 uint64_t; 534 typedef unsigned __int64 uint64_t;
522 #endif 535 #endif
536 #ifdef _WIN64
537 #define ECB_PTRSIZE 8
538 typedef uint64_t uintptr_t;
539 typedef int64_t intptr_t;
540 #else
541 #define ECB_PTRSIZE 4
542 typedef uint32_t uintptr_t;
543 typedef int32_t intptr_t;
544 #endif
523#else 545#else
524 #include <inttypes.h> 546 #include <inttypes.h>
547 #if UINTMAX_MAX > 0xffffffffU
548 #define ECB_PTRSIZE 8
549 #else
550 #define ECB_PTRSIZE 4
551 #endif
552#endif
553
554/* work around x32 idiocy by defining proper macros */
555#if __x86_64 || _M_AMD64
556 #if __ILP32
557 #define ECB_AMD64_X32 1
558 #else
559 #define ECB_AMD64 1
560 #endif
525#endif 561#endif
526 562
527/* many compilers define _GNUC_ to some versions but then only implement 563/* many compilers define _GNUC_ to some versions but then only implement
528 * what their idiot authors think are the "more important" extensions, 564 * what their idiot authors think are the "more important" extensions,
529 * causing enormous grief in return for some better fake benchmark numbers. 565 * causing enormous grief in return for some better fake benchmark numbers.
537 #else 573 #else
538 #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) 574 #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))
539 #endif 575 #endif
540#endif 576#endif
541 577
578#define ECB_C (__STDC__+0) /* this assumes that __STDC__ is either empty or a number */
579#define ECB_C99 (__STDC_VERSION__ >= 199901L)
580#define ECB_C11 (__STDC_VERSION__ >= 201112L)
581#define ECB_CPP (__cplusplus+0)
582#define ECB_CPP11 (__cplusplus >= 201103L)
583
584#if ECB_CPP
585 #define ECB_EXTERN_C extern "C"
586 #define ECB_EXTERN_C_BEG ECB_EXTERN_C {
587 #define ECB_EXTERN_C_END }
588#else
589 #define ECB_EXTERN_C extern
590 #define ECB_EXTERN_C_BEG
591 #define ECB_EXTERN_C_END
592#endif
593
542/*****************************************************************************/ 594/*****************************************************************************/
543 595
544/* ECB_NO_THREADS - ecb is not used by multiple threads, ever */ 596/* ECB_NO_THREADS - ecb is not used by multiple threads, ever */
545/* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */ 597/* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */
546 598
547#if ECB_NO_THREADS 599#if ECB_NO_THREADS
548# define ECB_NO_SMP 1 600 #define ECB_NO_SMP 1
549#endif 601#endif
550 602
551#if ECB_NO_THREADS || ECB_NO_SMP 603#if ECB_NO_SMP
552 #define ECB_MEMORY_FENCE do { } while (0) 604 #define ECB_MEMORY_FENCE do { } while (0)
553#endif 605#endif
554 606
555#ifndef ECB_MEMORY_FENCE 607#ifndef ECB_MEMORY_FENCE
556 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 608 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
557 #if __i386 || __i386__ 609 #if __i386 || __i386__
558 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") 610 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
559 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE /* non-lock xchg might be enough */ 611 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
560 #define ECB_MEMORY_FENCE_RELEASE do { } while (0) /* unlikely to change in future cpus */ 612 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
561 #elif __amd64 || __amd64__ || __x86_64 || __x86_64__ 613 #elif __amd64 || __amd64__ || __x86_64 || __x86_64__
562 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") 614 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory")
563 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("lfence" : : : "memory") 615 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
564 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("sfence") /* play safe - not needed in any current cpu */ 616 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
565 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ 617 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__
566 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") 618 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory")
567 #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \ 619 #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \
568 || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ 620 || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__
569 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") 621 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory")
570 #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ 622 #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \
571 || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__ 623 || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__
572 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") 624 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory")
573 #elif __sparc || __sparc__ 625 #elif __sparc || __sparc__
574 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad | " : : : "memory") 626 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory")
575 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory") 627 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory")
576 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore") 628 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore")
577 #elif defined __s390__ || defined __s390x__ 629 #elif defined __s390__ || defined __s390x__
578 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory") 630 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory")
579 #elif defined __mips__ 631 #elif defined __mips__
632 /* GNU/Linux emulates sync on mips1 architectures, so we force it's use */
633 /* anybody else who still uses mips1 is supposed to send in their version, with detection code. */
580 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") 634 #define ECB_MEMORY_FENCE __asm__ __volatile__ (".set mips2; sync; .set mips0" : : : "memory")
581 #elif defined __alpha__ 635 #elif defined __alpha__
582 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory") 636 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory")
637 #elif defined __hppa__
638 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
639 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
640 #elif defined __ia64__
641 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory")
583 #endif 642 #endif
584 #endif 643 #endif
585#endif 644#endif
586 645
587#ifndef ECB_MEMORY_FENCE 646#ifndef ECB_MEMORY_FENCE
647 #if ECB_GCC_VERSION(4,7)
648 /* see comment below (stdatomic.h) about the C11 memory model. */
649 #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST)
650
651 /* The __has_feature syntax from clang is so misdesigned that we cannot use it
652 * without risking compile time errors with other compilers. We *could*
653 * define our own ecb_clang_has_feature, but I just can't be bothered to work
654 * around this shit time and again.
655 * #elif defined __clang && __has_feature (cxx_atomic)
656 * // see comment below (stdatomic.h) about the C11 memory model.
657 * #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST)
658 */
659
588 #if ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ 660 #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__
589 #define ECB_MEMORY_FENCE __sync_synchronize () 661 #define ECB_MEMORY_FENCE __sync_synchronize ()
590 /*#define ECB_MEMORY_FENCE_ACQUIRE ({ char dummy = 0; __sync_lock_test_and_set (&dummy, 1); }) */
591 /*#define ECB_MEMORY_FENCE_RELEASE ({ char dummy = 1; __sync_lock_release (&dummy ); }) */
592 #elif _MSC_VER >= 1400 /* VC++ 2005 */ 662 #elif _MSC_VER >= 1400 /* VC++ 2005 */
593 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) 663 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
594 #define ECB_MEMORY_FENCE _ReadWriteBarrier () 664 #define ECB_MEMORY_FENCE _ReadWriteBarrier ()
595 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */ 665 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */
596 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier () 666 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier ()
606 #define ECB_MEMORY_FENCE __sync () 676 #define ECB_MEMORY_FENCE __sync ()
607 #endif 677 #endif
608#endif 678#endif
609 679
610#ifndef ECB_MEMORY_FENCE 680#ifndef ECB_MEMORY_FENCE
681 #if ECB_C11 && !defined __STDC_NO_ATOMICS__
682 /* we assume that these memory fences work on all variables/all memory accesses, */
683 /* not just C11 atomics and atomic accesses */
684 #include <stdatomic.h>
685 /* Unfortunately, neither gcc 4.7 nor clang 3.1 generate any instructions for */
686 /* any fence other than seq_cst, which isn't very efficient for us. */
687 /* Why that is, we don't know - either the C11 memory model is quite useless */
688 /* for most usages, or gcc and clang have a bug */
689 /* I *currently* lean towards the latter, and inefficiently implement */
690 /* all three of ecb's fences as a seq_cst fence */
691 #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst)
692 #endif
693#endif
694
695#ifndef ECB_MEMORY_FENCE
611 #if !ECB_AVOID_PTHREADS 696 #if !ECB_AVOID_PTHREADS
612 /* 697 /*
613 * if you get undefined symbol references to pthread_mutex_lock, 698 * if you get undefined symbol references to pthread_mutex_lock,
614 * or failure to find pthread.h, then you should implement 699 * or failure to find pthread.h, then you should implement
615 * the ECB_MEMORY_FENCE operations for your cpu/compiler 700 * the ECB_MEMORY_FENCE operations for your cpu/compiler
633 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE 718 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
634#endif 719#endif
635 720
636/*****************************************************************************/ 721/*****************************************************************************/
637 722
638#define ECB_C99 (__STDC_VERSION__ >= 199901L)
639
640#if __cplusplus 723#if __cplusplus
641 #define ecb_inline static inline 724 #define ecb_inline static inline
642#elif ECB_GCC_VERSION(2,5) 725#elif ECB_GCC_VERSION(2,5)
643 #define ecb_inline static __inline__ 726 #define ecb_inline static __inline__
644#elif ECB_C99 727#elif ECB_C99
682#elif ECB_GCC_VERSION(3,0) 765#elif ECB_GCC_VERSION(3,0)
683 #define ecb_decltype(x) __typeof(x) 766 #define ecb_decltype(x) __typeof(x)
684#endif 767#endif
685 768
686#define ecb_noinline ecb_attribute ((__noinline__)) 769#define ecb_noinline ecb_attribute ((__noinline__))
687#define ecb_noreturn ecb_attribute ((__noreturn__))
688#define ecb_unused ecb_attribute ((__unused__)) 770#define ecb_unused ecb_attribute ((__unused__))
689#define ecb_const ecb_attribute ((__const__)) 771#define ecb_const ecb_attribute ((__const__))
690#define ecb_pure ecb_attribute ((__pure__)) 772#define ecb_pure ecb_attribute ((__pure__))
773
774#if ECB_C11
775 #define ecb_noreturn _Noreturn
776#else
777 #define ecb_noreturn ecb_attribute ((__noreturn__))
778#endif
691 779
692#if ECB_GCC_VERSION(4,3) 780#if ECB_GCC_VERSION(4,3)
693 #define ecb_artificial ecb_attribute ((__artificial__)) 781 #define ecb_artificial ecb_attribute ((__artificial__))
694 #define ecb_hot ecb_attribute ((__hot__)) 782 #define ecb_hot ecb_attribute ((__hot__))
695 #define ecb_cold ecb_attribute ((__cold__)) 783 #define ecb_cold ecb_attribute ((__cold__))
786 874
787 return r + ecb_ld32 (x); 875 return r + ecb_ld32 (x);
788 } 876 }
789#endif 877#endif
790 878
879ecb_function_ ecb_bool ecb_is_pot32 (uint32_t x) ecb_const;
880ecb_function_ ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); }
881ecb_function_ ecb_bool ecb_is_pot64 (uint64_t x) ecb_const;
882ecb_function_ ecb_bool ecb_is_pot64 (uint64_t x) { return !(x & (x - 1)); }
883
791ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) ecb_const; 884ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) ecb_const;
792ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) 885ecb_function_ uint8_t ecb_bitrev8 (uint8_t x)
793{ 886{
794 return ( (x * 0x0802U & 0x22110U) 887 return ( (x * 0x0802U & 0x22110U)
795 | (x * 0x8020U & 0x88440U)) * 0x10101U >> 16; 888 | (x * 0x8020U & 0x88440U)) * 0x10101U >> 16;
879 ecb_inline void ecb_unreachable (void) ecb_noreturn; 972 ecb_inline void ecb_unreachable (void) ecb_noreturn;
880 ecb_inline void ecb_unreachable (void) { } 973 ecb_inline void ecb_unreachable (void) { }
881#endif 974#endif
882 975
883/* try to tell the compiler that some condition is definitely true */ 976/* try to tell the compiler that some condition is definitely true */
884#define ecb_assume(cond) do { if (!(cond)) ecb_unreachable (); } while (0) 977#define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0
885 978
886ecb_inline unsigned char ecb_byteorder_helper (void) ecb_const; 979ecb_inline unsigned char ecb_byteorder_helper (void) ecb_const;
887ecb_inline unsigned char 980ecb_inline unsigned char
888ecb_byteorder_helper (void) 981ecb_byteorder_helper (void)
889{ 982{
890 const uint32_t u = 0x11223344; 983 /* the union code still generates code under pressure in gcc, */
891 return *(unsigned char *)&u; 984 /* but less than using pointers, and always seems to */
985 /* successfully return a constant. */
986 /* the reason why we have this horrible preprocessor mess */
987 /* is to avoid it in all cases, at least on common architectures */
988 /* or when using a recent enough gcc version (>= 4.6) */
989#if __i386 || __i386__ || _M_X86 || __amd64 || __amd64__ || _M_X64
990 return 0x44;
991#elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
992 return 0x44;
993#elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
994 return 0x11;
995#else
996 union
997 {
998 uint32_t i;
999 uint8_t c;
1000 } u = { 0x11223344 };
1001 return u.c;
1002#endif
892} 1003}
893 1004
894ecb_inline ecb_bool ecb_big_endian (void) ecb_const; 1005ecb_inline ecb_bool ecb_big_endian (void) ecb_const;
895ecb_inline ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; } 1006ecb_inline ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; }
896ecb_inline ecb_bool ecb_little_endian (void) ecb_const; 1007ecb_inline ecb_bool ecb_little_endian (void) ecb_const;
927 } 1038 }
928#else 1039#else
929 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) 1040 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0]))
930#endif 1041#endif
931 1042
1043/*******************************************************************************/
1044/* floating point stuff, can be disabled by defining ECB_NO_LIBM */
1045
1046/* basically, everything uses "ieee pure-endian" floating point numbers */
1047/* the only noteworthy exception is ancient armle, which uses order 43218765 */
1048#if 0 \
1049 || __i386 || __i386__ \
1050 || __amd64 || __amd64__ || __x86_64 || __x86_64__ \
1051 || __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \
1052 || defined __arm__ && defined __ARM_EABI__ \
1053 || defined __s390__ || defined __s390x__ \
1054 || defined __mips__ \
1055 || defined __alpha__ \
1056 || defined __hppa__ \
1057 || defined __ia64__ \
1058 || defined _M_IX86 || defined _M_AMD64 || defined _M_IA64
1059 #define ECB_STDFP 1
1060 #include <string.h> /* for memcpy */
1061#else
1062 #define ECB_STDFP 0
1063 #include <math.h> /* for frexp*, ldexp* */
1064#endif
1065
1066#ifndef ECB_NO_LIBM
1067
1068 /* convert a float to ieee single/binary32 */
1069 ecb_function_ uint32_t ecb_float_to_binary32 (float x) ecb_const;
1070 ecb_function_ uint32_t
1071 ecb_float_to_binary32 (float x)
1072 {
1073 uint32_t r;
1074
1075 #if ECB_STDFP
1076 memcpy (&r, &x, 4);
1077 #else
1078 /* slow emulation, works for anything but -0 */
1079 uint32_t m;
1080 int e;
1081
1082 if (x == 0e0f ) return 0x00000000U;
1083 if (x > +3.40282346638528860e+38f) return 0x7f800000U;
1084 if (x < -3.40282346638528860e+38f) return 0xff800000U;
1085 if (x != x ) return 0x7fbfffffU;
1086
1087 m = frexpf (x, &e) * 0x1000000U;
1088
1089 r = m & 0x80000000U;
1090
1091 if (r)
1092 m = -m;
1093
1094 if (e <= -126)
1095 {
1096 m &= 0xffffffU;
1097 m >>= (-125 - e);
1098 e = -126;
1099 }
1100
1101 r |= (e + 126) << 23;
1102 r |= m & 0x7fffffU;
1103 #endif
1104
1105 return r;
1106 }
1107
1108 /* converts an ieee single/binary32 to a float */
1109 ecb_function_ float ecb_binary32_to_float (uint32_t x) ecb_const;
1110 ecb_function_ float
1111 ecb_binary32_to_float (uint32_t x)
1112 {
1113 float r;
1114
1115 #if ECB_STDFP
1116 memcpy (&r, &x, 4);
1117 #else
1118 /* emulation, only works for normals and subnormals and +0 */
1119 int neg = x >> 31;
1120 int e = (x >> 23) & 0xffU;
1121
1122 x &= 0x7fffffU;
1123
1124 if (e)
1125 x |= 0x800000U;
1126 else
1127 e = 1;
1128
1129 /* we distrust ldexpf a bit and do the 2**-24 scaling by an extra multiply */
1130 r = ldexpf (x * (0.5f / 0x800000U), e - 126);
1131
1132 r = neg ? -r : r;
1133 #endif
1134
1135 return r;
1136 }
1137
1138 /* convert a double to ieee double/binary64 */
1139 ecb_function_ uint64_t ecb_double_to_binary64 (double x) ecb_const;
1140 ecb_function_ uint64_t
1141 ecb_double_to_binary64 (double x)
1142 {
1143 uint64_t r;
1144
1145 #if ECB_STDFP
1146 memcpy (&r, &x, 8);
1147 #else
1148 /* slow emulation, works for anything but -0 */
1149 uint64_t m;
1150 int e;
1151
1152 if (x == 0e0 ) return 0x0000000000000000U;
1153 if (x > +1.79769313486231470e+308) return 0x7ff0000000000000U;
1154 if (x < -1.79769313486231470e+308) return 0xfff0000000000000U;
1155 if (x != x ) return 0X7ff7ffffffffffffU;
1156
1157 m = frexp (x, &e) * 0x20000000000000U;
1158
1159 r = m & 0x8000000000000000;;
1160
1161 if (r)
1162 m = -m;
1163
1164 if (e <= -1022)
1165 {
1166 m &= 0x1fffffffffffffU;
1167 m >>= (-1021 - e);
1168 e = -1022;
1169 }
1170
1171 r |= ((uint64_t)(e + 1022)) << 52;
1172 r |= m & 0xfffffffffffffU;
1173 #endif
1174
1175 return r;
1176 }
1177
1178 /* converts an ieee double/binary64 to a double */
1179 ecb_function_ double ecb_binary64_to_double (uint64_t x) ecb_const;
1180 ecb_function_ double
1181 ecb_binary64_to_double (uint64_t x)
1182 {
1183 double r;
1184
1185 #if ECB_STDFP
1186 memcpy (&r, &x, 8);
1187 #else
1188 /* emulation, only works for normals and subnormals and +0 */
1189 int neg = x >> 63;
1190 int e = (x >> 52) & 0x7ffU;
1191
1192 x &= 0xfffffffffffffU;
1193
1194 if (e)
1195 x |= 0x10000000000000U;
1196 else
1197 e = 1;
1198
1199 /* we distrust ldexp a bit and do the 2**-53 scaling by an extra multiply */
1200 r = ldexp (x * (0.5 / 0x10000000000000U), e - 1022);
1201
1202 r = neg ? -r : r;
1203 #endif
1204
1205 return r;
1206 }
1207
1208#endif
1209
932#endif 1210#endif
933 1211
934/* ECB.H END */ 1212/* ECB.H END */
935 1213
936#if ECB_MEMORY_FENCE_NEEDS_PTHREADS 1214#if ECB_MEMORY_FENCE_NEEDS_PTHREADS
1105#endif 1383#endif
1106 1384
1107static void (*syserr_cb)(const char *msg) EV_THROW; 1385static void (*syserr_cb)(const char *msg) EV_THROW;
1108 1386
1109void ecb_cold 1387void ecb_cold
1110ev_set_syserr_cb (void (*cb)(const char *msg)) EV_THROW 1388ev_set_syserr_cb (void (*cb)(const char *msg) EV_THROW) EV_THROW
1111{ 1389{
1112 syserr_cb = cb; 1390 syserr_cb = cb;
1113} 1391}
1114 1392
1115static void noinline ecb_cold 1393static void noinline ecb_cold
1133 abort (); 1411 abort ();
1134 } 1412 }
1135} 1413}
1136 1414
1137static void * 1415static void *
1138ev_realloc_emul (void *ptr, long size) 1416ev_realloc_emul (void *ptr, long size) EV_THROW
1139{ 1417{
1140#if __GLIBC__
1141 return realloc (ptr, size);
1142#else
1143 /* some systems, notably openbsd and darwin, fail to properly 1418 /* some systems, notably openbsd and darwin, fail to properly
1144 * implement realloc (x, 0) (as required by both ansi c-89 and 1419 * implement realloc (x, 0) (as required by both ansi c-89 and
1145 * the single unix specification, so work around them here. 1420 * the single unix specification, so work around them here.
1421 * recently, also (at least) fedora and debian started breaking it,
1422 * despite documenting it otherwise.
1146 */ 1423 */
1147 1424
1148 if (size) 1425 if (size)
1149 return realloc (ptr, size); 1426 return realloc (ptr, size);
1150 1427
1151 free (ptr); 1428 free (ptr);
1152 return 0; 1429 return 0;
1153#endif
1154} 1430}
1155 1431
1156static void *(*alloc)(void *ptr, long size) EV_THROW = ev_realloc_emul; 1432static void *(*alloc)(void *ptr, long size) EV_THROW = ev_realloc_emul;
1157 1433
1158void ecb_cold 1434void ecb_cold
1159ev_set_allocator (void *(*cb)(void *ptr, long size)) EV_THROW 1435ev_set_allocator (void *(*cb)(void *ptr, long size) EV_THROW) EV_THROW
1160{ 1436{
1161 alloc = cb; 1437 alloc = cb;
1162} 1438}
1163 1439
1164inline_speed void * 1440inline_speed void *
1813static void noinline ecb_cold 2089static void noinline ecb_cold
1814evpipe_init (EV_P) 2090evpipe_init (EV_P)
1815{ 2091{
1816 if (!ev_is_active (&pipe_w)) 2092 if (!ev_is_active (&pipe_w))
1817 { 2093 {
2094 int fds [2];
2095
1818# if EV_USE_EVENTFD 2096# if EV_USE_EVENTFD
2097 fds [0] = -1;
1819 evfd = eventfd (0, EFD_NONBLOCK | EFD_CLOEXEC); 2098 fds [1] = eventfd (0, EFD_NONBLOCK | EFD_CLOEXEC);
1820 if (evfd < 0 && errno == EINVAL) 2099 if (fds [1] < 0 && errno == EINVAL)
1821 evfd = eventfd (0, 0); 2100 fds [1] = eventfd (0, 0);
1822 2101
1823 if (evfd >= 0) 2102 if (fds [1] < 0)
1824 {
1825 evpipe [0] = -1;
1826 fd_intern (evfd); /* doing it twice doesn't hurt */
1827 ev_io_set (&pipe_w, evfd, EV_READ);
1828 }
1829 else
1830# endif 2103# endif
1831 { 2104 {
1832 while (pipe (evpipe)) 2105 while (pipe (fds))
1833 ev_syserr ("(libev) error creating signal/async pipe"); 2106 ev_syserr ("(libev) error creating signal/async pipe");
1834 2107
1835 fd_intern (evpipe [0]); 2108 fd_intern (fds [0]);
1836 fd_intern (evpipe [1]);
1837 ev_io_set (&pipe_w, evpipe [0], EV_READ);
1838 } 2109 }
1839 2110
2111 evpipe [0] = fds [0];
2112
2113 if (evpipe [1] < 0)
2114 evpipe [1] = fds [1]; /* first call, set write fd */
2115 else
2116 {
2117 /* on subsequent calls, do not change evpipe [1] */
2118 /* so that evpipe_write can always rely on its value. */
2119 /* this branch does not do anything sensible on windows, */
2120 /* so must not be executed on windows */
2121
2122 dup2 (fds [1], evpipe [1]);
2123 close (fds [1]);
2124 }
2125
2126 fd_intern (evpipe [1]);
2127
2128 ev_io_set (&pipe_w, evpipe [0] < 0 ? evpipe [1] : evpipe [0], EV_READ);
1840 ev_io_start (EV_A_ &pipe_w); 2129 ev_io_start (EV_A_ &pipe_w);
1841 ev_unref (EV_A); /* watcher should not keep loop alive */ 2130 ev_unref (EV_A); /* watcher should not keep loop alive */
1842 } 2131 }
1843} 2132}
1844 2133
1849 2138
1850 if (expect_true (*flag)) 2139 if (expect_true (*flag))
1851 return; 2140 return;
1852 2141
1853 *flag = 1; 2142 *flag = 1;
1854
1855 ECB_MEMORY_FENCE_RELEASE; /* make sure flag is visible before the wakeup */ 2143 ECB_MEMORY_FENCE_RELEASE; /* make sure flag is visible before the wakeup */
1856 2144
1857 pipe_write_skipped = 1; 2145 pipe_write_skipped = 1;
1858 2146
1859 ECB_MEMORY_FENCE; /* make sure pipe_write_skipped is visible before we check pipe_write_wanted */ 2147 ECB_MEMORY_FENCE; /* make sure pipe_write_skipped is visible before we check pipe_write_wanted */
1860 2148
1861 if (pipe_write_wanted) 2149 if (pipe_write_wanted)
1862 { 2150 {
1863 int old_errno; 2151 int old_errno;
1864 2152
1865 pipe_write_skipped = 0; /* just an optimisation, no fence needed */ 2153 pipe_write_skipped = 0;
2154 ECB_MEMORY_FENCE_RELEASE;
1866 2155
1867 old_errno = errno; /* save errno because write will clobber it */ 2156 old_errno = errno; /* save errno because write will clobber it */
1868 2157
1869#if EV_USE_EVENTFD 2158#if EV_USE_EVENTFD
1870 if (evfd >= 0) 2159 if (evpipe [0] < 0)
1871 { 2160 {
1872 uint64_t counter = 1; 2161 uint64_t counter = 1;
1873 write (evfd, &counter, sizeof (uint64_t)); 2162 write (evpipe [1], &counter, sizeof (uint64_t));
1874 } 2163 }
1875 else 2164 else
1876#endif 2165#endif
1877 { 2166 {
1878#ifdef _WIN32 2167#ifdef _WIN32
1898 int i; 2187 int i;
1899 2188
1900 if (revents & EV_READ) 2189 if (revents & EV_READ)
1901 { 2190 {
1902#if EV_USE_EVENTFD 2191#if EV_USE_EVENTFD
1903 if (evfd >= 0) 2192 if (evpipe [0] < 0)
1904 { 2193 {
1905 uint64_t counter; 2194 uint64_t counter;
1906 read (evfd, &counter, sizeof (uint64_t)); 2195 read (evpipe [1], &counter, sizeof (uint64_t));
1907 } 2196 }
1908 else 2197 else
1909#endif 2198#endif
1910 { 2199 {
1911 char dummy[4]; 2200 char dummy[4];
1929#if EV_SIGNAL_ENABLE 2218#if EV_SIGNAL_ENABLE
1930 if (sig_pending) 2219 if (sig_pending)
1931 { 2220 {
1932 sig_pending = 0; 2221 sig_pending = 0;
1933 2222
1934 ECB_MEMORY_FENCE_RELEASE; 2223 ECB_MEMORY_FENCE;
1935 2224
1936 for (i = EV_NSIG - 1; i--; ) 2225 for (i = EV_NSIG - 1; i--; )
1937 if (expect_false (signals [i].pending)) 2226 if (expect_false (signals [i].pending))
1938 ev_feed_signal_event (EV_A_ i + 1); 2227 ev_feed_signal_event (EV_A_ i + 1);
1939 } 2228 }
1942#if EV_ASYNC_ENABLE 2231#if EV_ASYNC_ENABLE
1943 if (async_pending) 2232 if (async_pending)
1944 { 2233 {
1945 async_pending = 0; 2234 async_pending = 0;
1946 2235
1947 ECB_MEMORY_FENCE_RELEASE; 2236 ECB_MEMORY_FENCE;
1948 2237
1949 for (i = asynccnt; i--; ) 2238 for (i = asynccnt; i--; )
1950 if (asyncs [i]->sent) 2239 if (asyncs [i]->sent)
1951 { 2240 {
1952 asyncs [i]->sent = 0; 2241 asyncs [i]->sent = 0;
2242 ECB_MEMORY_FENCE_RELEASE;
1953 ev_feed_event (EV_A_ asyncs [i], EV_ASYNC); 2243 ev_feed_event (EV_A_ asyncs [i], EV_ASYNC);
1954 } 2244 }
1955 } 2245 }
1956#endif 2246#endif
1957} 2247}
1960 2250
1961void 2251void
1962ev_feed_signal (int signum) EV_THROW 2252ev_feed_signal (int signum) EV_THROW
1963{ 2253{
1964#if EV_MULTIPLICITY 2254#if EV_MULTIPLICITY
2255 EV_P;
2256 ECB_MEMORY_FENCE_ACQUIRE;
1965 EV_P = signals [signum - 1].loop; 2257 EV_A = signals [signum - 1].loop;
1966 2258
1967 if (!EV_A) 2259 if (!EV_A)
1968 return; 2260 return;
1969#endif 2261#endif
1970 2262
1971 if (!ev_active (&pipe_w))
1972 return;
1973
1974 signals [signum - 1].pending = 1; 2263 signals [signum - 1].pending = 1;
1975 evpipe_write (EV_A_ &sig_pending); 2264 evpipe_write (EV_A_ &sig_pending);
1976} 2265}
1977 2266
1978static void 2267static void
1988void noinline 2277void noinline
1989ev_feed_signal_event (EV_P_ int signum) EV_THROW 2278ev_feed_signal_event (EV_P_ int signum) EV_THROW
1990{ 2279{
1991 WL w; 2280 WL w;
1992 2281
1993 if (expect_false (signum <= 0 || signum > EV_NSIG)) 2282 if (expect_false (signum <= 0 || signum >= EV_NSIG))
1994 return; 2283 return;
1995 2284
1996 --signum; 2285 --signum;
1997 2286
1998#if EV_MULTIPLICITY 2287#if EV_MULTIPLICITY
2002 if (expect_false (signals [signum].loop != EV_A)) 2291 if (expect_false (signals [signum].loop != EV_A))
2003 return; 2292 return;
2004#endif 2293#endif
2005 2294
2006 signals [signum].pending = 0; 2295 signals [signum].pending = 0;
2296 ECB_MEMORY_FENCE_RELEASE;
2007 2297
2008 for (w = signals [signum].head; w; w = w->next) 2298 for (w = signals [signum].head; w; w = w->next)
2009 ev_feed_event (EV_A_ (W)w, EV_SIGNAL); 2299 ev_feed_event (EV_A_ (W)w, EV_SIGNAL);
2010} 2300}
2011 2301
2295#if EV_ASYNC_ENABLE 2585#if EV_ASYNC_ENABLE
2296 async_pending = 0; 2586 async_pending = 0;
2297#endif 2587#endif
2298 pipe_write_skipped = 0; 2588 pipe_write_skipped = 0;
2299 pipe_write_wanted = 0; 2589 pipe_write_wanted = 0;
2590 evpipe [0] = -1;
2591 evpipe [1] = -1;
2300#if EV_USE_INOTIFY 2592#if EV_USE_INOTIFY
2301 fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2; 2593 fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2;
2302#endif 2594#endif
2303#if EV_USE_SIGNALFD 2595#if EV_USE_SIGNALFD
2304 sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1; 2596 sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1;
2367 if (ev_is_active (&pipe_w)) 2659 if (ev_is_active (&pipe_w))
2368 { 2660 {
2369 /*ev_ref (EV_A);*/ 2661 /*ev_ref (EV_A);*/
2370 /*ev_io_stop (EV_A_ &pipe_w);*/ 2662 /*ev_io_stop (EV_A_ &pipe_w);*/
2371 2663
2372#if EV_USE_EVENTFD
2373 if (evfd >= 0)
2374 close (evfd);
2375#endif
2376
2377 if (evpipe [0] >= 0)
2378 {
2379 EV_WIN32_CLOSE_FD (evpipe [0]); 2664 if (evpipe [0] >= 0) EV_WIN32_CLOSE_FD (evpipe [0]);
2380 EV_WIN32_CLOSE_FD (evpipe [1]); 2665 if (evpipe [1] >= 0) EV_WIN32_CLOSE_FD (evpipe [1]);
2381 }
2382 } 2666 }
2383 2667
2384#if EV_USE_SIGNALFD 2668#if EV_USE_SIGNALFD
2385 if (ev_is_active (&sigfd_w)) 2669 if (ev_is_active (&sigfd_w))
2386 close (sigfd); 2670 close (sigfd);
2472#endif 2756#endif
2473#if EV_USE_INOTIFY 2757#if EV_USE_INOTIFY
2474 infy_fork (EV_A); 2758 infy_fork (EV_A);
2475#endif 2759#endif
2476 2760
2761#if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
2477 if (ev_is_active (&pipe_w)) 2762 if (ev_is_active (&pipe_w))
2478 { 2763 {
2479 /* pipe_write_wanted must be false now, so modifying fd vars should be safe */ 2764 /* pipe_write_wanted must be false now, so modifying fd vars should be safe */
2480 2765
2481 ev_ref (EV_A); 2766 ev_ref (EV_A);
2482 ev_io_stop (EV_A_ &pipe_w); 2767 ev_io_stop (EV_A_ &pipe_w);
2483 2768
2484#if EV_USE_EVENTFD
2485 if (evfd >= 0)
2486 close (evfd);
2487#endif
2488
2489 if (evpipe [0] >= 0) 2769 if (evpipe [0] >= 0)
2490 {
2491 EV_WIN32_CLOSE_FD (evpipe [0]); 2770 EV_WIN32_CLOSE_FD (evpipe [0]);
2492 EV_WIN32_CLOSE_FD (evpipe [1]);
2493 }
2494 2771
2495#if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
2496 evpipe_init (EV_A); 2772 evpipe_init (EV_A);
2497 /* now iterate over everything, in case we missed something */ 2773 /* iterate over everything, in case we missed something before */
2498 pipecb (EV_A_ &pipe_w, EV_READ); 2774 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
2499#endif
2500 } 2775 }
2776#endif
2501 2777
2502 postfork = 0; 2778 postfork = 0;
2503} 2779}
2504 2780
2505#if EV_MULTIPLICITY 2781#if EV_MULTIPLICITY
2678} 2954}
2679 2955
2680void 2956void
2681ev_loop_fork (EV_P) EV_THROW 2957ev_loop_fork (EV_P) EV_THROW
2682{ 2958{
2683 postfork = 1; /* must be in line with ev_default_fork */ 2959 postfork = 1;
2684} 2960}
2685 2961
2686/*****************************************************************************/ 2962/*****************************************************************************/
2687 2963
2688void 2964void
2704} 2980}
2705 2981
2706void noinline 2982void noinline
2707ev_invoke_pending (EV_P) 2983ev_invoke_pending (EV_P)
2708{ 2984{
2709 for (pendingpri = NUMPRI; pendingpri--; ) /* pendingpri is modified during the loop */ 2985 pendingpri = NUMPRI;
2986
2987 while (pendingpri) /* pendingpri possibly gets modified in the inner loop */
2988 {
2989 --pendingpri;
2990
2710 while (pendingcnt [pendingpri]) 2991 while (pendingcnt [pendingpri])
2711 { 2992 {
2712 ANPENDING *p = pendings [pendingpri] + --pendingcnt [pendingpri]; 2993 ANPENDING *p = pendings [pendingpri] + --pendingcnt [pendingpri];
2713 2994
2714 p->w->pending = 0; 2995 p->w->pending = 0;
2715 EV_CB_INVOKE (p->w, p->events); 2996 EV_CB_INVOKE (p->w, p->events);
2716 EV_FREQUENT_CHECK; 2997 EV_FREQUENT_CHECK;
2717 } 2998 }
2999 }
2718} 3000}
2719 3001
2720#if EV_IDLE_ENABLE 3002#if EV_IDLE_ENABLE
2721/* make idle watchers pending. this handles the "call-idle */ 3003/* make idle watchers pending. this handles the "call-idle */
2722/* only when higher priorities are idle" logic */ 3004/* only when higher priorities are idle" logic */
2812{ 3094{
2813 EV_FREQUENT_CHECK; 3095 EV_FREQUENT_CHECK;
2814 3096
2815 while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now) 3097 while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now)
2816 { 3098 {
2817 int feed_count = 0;
2818
2819 do 3099 do
2820 { 3100 {
2821 ev_periodic *w = (ev_periodic *)ANHE_w (periodics [HEAP0]); 3101 ev_periodic *w = (ev_periodic *)ANHE_w (periodics [HEAP0]);
2822 3102
2823 /*assert (("libev: inactive timer on periodic heap detected", ev_is_active (w)));*/ 3103 /*assert (("libev: inactive timer on periodic heap detected", ev_is_active (w)));*/
3082 backend_poll (EV_A_ waittime); 3362 backend_poll (EV_A_ waittime);
3083 assert ((loop_done = EVBREAK_CANCEL, 1)); /* assert for side effect */ 3363 assert ((loop_done = EVBREAK_CANCEL, 1)); /* assert for side effect */
3084 3364
3085 pipe_write_wanted = 0; /* just an optimisation, no fence needed */ 3365 pipe_write_wanted = 0; /* just an optimisation, no fence needed */
3086 3366
3367 ECB_MEMORY_FENCE_ACQUIRE;
3087 if (pipe_write_skipped) 3368 if (pipe_write_skipped)
3088 { 3369 {
3089 assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w))); 3370 assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w)));
3090 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM); 3371 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
3091 } 3372 }
3469#if EV_MULTIPLICITY 3750#if EV_MULTIPLICITY
3470 assert (("libev: a signal must not be attached to two different loops", 3751 assert (("libev: a signal must not be attached to two different loops",
3471 !signals [w->signum - 1].loop || signals [w->signum - 1].loop == loop)); 3752 !signals [w->signum - 1].loop || signals [w->signum - 1].loop == loop));
3472 3753
3473 signals [w->signum - 1].loop = EV_A; 3754 signals [w->signum - 1].loop = EV_A;
3755 ECB_MEMORY_FENCE_RELEASE;
3474#endif 3756#endif
3475 3757
3476 EV_FREQUENT_CHECK; 3758 EV_FREQUENT_CHECK;
3477 3759
3478#if EV_USE_SIGNALFD 3760#if EV_USE_SIGNALFD
3633# define EV_INOTIFY_BUFSIZE (sizeof (struct inotify_event) * 2 + NAME_MAX) 3915# define EV_INOTIFY_BUFSIZE (sizeof (struct inotify_event) * 2 + NAME_MAX)
3634 3916
3635static void noinline 3917static void noinline
3636infy_add (EV_P_ ev_stat *w) 3918infy_add (EV_P_ ev_stat *w)
3637{ 3919{
3638 w->wd = inotify_add_watch (fs_fd, w->path, IN_ATTRIB | IN_DELETE_SELF | IN_MOVE_SELF | IN_MODIFY | IN_DONT_FOLLOW | IN_MASK_ADD); 3920 w->wd = inotify_add_watch (fs_fd, w->path,
3921 IN_ATTRIB | IN_DELETE_SELF | IN_MOVE_SELF | IN_MODIFY
3922 | IN_CREATE | IN_DELETE | IN_MOVED_FROM | IN_MOVED_TO
3923 | IN_DONT_FOLLOW | IN_MASK_ADD);
3639 3924
3640 if (w->wd >= 0) 3925 if (w->wd >= 0)
3641 { 3926 {
3642 struct statfs sfs; 3927 struct statfs sfs;
3643 3928
3647 3932
3648 if (!fs_2625) 3933 if (!fs_2625)
3649 w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL; 3934 w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL;
3650 else if (!statfs (w->path, &sfs) 3935 else if (!statfs (w->path, &sfs)
3651 && (sfs.f_type == 0x1373 /* devfs */ 3936 && (sfs.f_type == 0x1373 /* devfs */
3937 || sfs.f_type == 0x4006 /* fat */
3938 || sfs.f_type == 0x4d44 /* msdos */
3652 || sfs.f_type == 0xEF53 /* ext2/3 */ 3939 || sfs.f_type == 0xEF53 /* ext2/3 */
3940 || sfs.f_type == 0x72b6 /* jffs2 */
3941 || sfs.f_type == 0x858458f6 /* ramfs */
3942 || sfs.f_type == 0x5346544e /* ntfs */
3653 || sfs.f_type == 0x3153464a /* jfs */ 3943 || sfs.f_type == 0x3153464a /* jfs */
3944 || sfs.f_type == 0x9123683e /* btrfs */
3654 || sfs.f_type == 0x52654973 /* reiser3 */ 3945 || sfs.f_type == 0x52654973 /* reiser3 */
3655 || sfs.f_type == 0x01021994 /* tempfs */ 3946 || sfs.f_type == 0x01021994 /* tmpfs */
3656 || sfs.f_type == 0x58465342 /* xfs */)) 3947 || sfs.f_type == 0x58465342 /* xfs */))
3657 w->timer.repeat = 0.; /* filesystem is local, kernel new enough */ 3948 w->timer.repeat = 0.; /* filesystem is local, kernel new enough */
3658 else 3949 else
3659 w->timer.repeat = w->interval ? w->interval : NFS_STAT_INTERVAL; /* remote, use reduced frequency */ 3950 w->timer.repeat = w->interval ? w->interval : NFS_STAT_INTERVAL; /* remote, use reduced frequency */
3660 } 3951 }

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines