ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libecb/ecb.h
(Generate patch)

Comparing libecb/ecb.h (file contents):
Revision 1.93 by root, Tue May 29 19:10:50 2012 UTC vs.
Revision 1.109 by root, Fri Jun 29 14:05:50 2012 UTC

54 #else 54 #else
55 #define ECB_PTRSIZE 4 55 #define ECB_PTRSIZE 4
56 typedef uint32_t uintptr_t; 56 typedef uint32_t uintptr_t;
57 typedef int32_t intptr_t; 57 typedef int32_t intptr_t;
58 #endif 58 #endif
59 typedef intptr_t ptrdiff_t;
60#else 59#else
61 #include <inttypes.h> 60 #include <inttypes.h>
62 #if UINTMAX_MAX > 0xffffffffU 61 #if UINTMAX_MAX > 0xffffffffU
63 #define ECB_PTRSIZE 8 62 #define ECB_PTRSIZE 8
64 #else 63 #else
83 82
84#define ECB_C (__STDC__+0) /* this assumes that __STDC__ is either empty or a number */ 83#define ECB_C (__STDC__+0) /* this assumes that __STDC__ is either empty or a number */
85#define ECB_C99 (__STDC_VERSION__ >= 199901L) 84#define ECB_C99 (__STDC_VERSION__ >= 199901L)
86#define ECB_C11 (__STDC_VERSION__ >= 201112L) 85#define ECB_C11 (__STDC_VERSION__ >= 201112L)
87#define ECB_CPP (__cplusplus+0) 86#define ECB_CPP (__cplusplus+0)
88#define ECB_CPP98 (__cplusplus >= 199711L)
89#define ECB_CPP11 (__cplusplus >= 201103L) 87#define ECB_CPP11 (__cplusplus >= 201103L)
88
89#if ECB_CPP
90 #define ECB_EXTERN_C extern "C"
91 #define ECB_EXTERN_C_BEG ECB_EXTERN_C {
92 #define ECB_EXTERN_C_END }
93#else
94 #define ECB_EXTERN_C extern
95 #define ECB_EXTERN_C_BEG
96 #define ECB_EXTERN_C_END
97#endif
90 98
91/*****************************************************************************/ 99/*****************************************************************************/
92 100
93/* ECB_NO_THREADS - ecb is not used by multiple threads, ever */ 101/* ECB_NO_THREADS - ecb is not used by multiple threads, ever */
94/* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */ 102/* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */
95 103
96#if ECB_NO_THREADS 104#if ECB_NO_THREADS
97# define ECB_NO_SMP 1 105 #define ECB_NO_SMP 1
98#endif 106#endif
99 107
100#if ECB_NO_SMP 108#if ECB_NO_SMP
101 #define ECB_MEMORY_FENCE do { } while (0) 109 #define ECB_MEMORY_FENCE do { } while (0)
102#endif
103
104#ifndef ECB_MEMORY_FENCE
105 #if ECB_C11 && !defined __STDC_NO_ATOMICS__
106 /* we assume that these memory fences work on all variables/all memory accesses, */
107 /* not just C11 atomics and atomic accesses */
108 #include <stdatomic.h>
109 #if 0
110 #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_acq_rel)
111 #define ECB_MEMORY_FENCE_ACQUIRE atomic_thread_fence (memory_order_acquire)
112 #define ECB_MEMORY_FENCE_RELEASE atomic_thread_fence (memory_order_release)
113 #else
114 /* the above *should* be enough in my book, but after experiences with gcc-4.7 */
115 /* and clang, better play safe */
116 #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst)
117 #endif
118 #endif
119#endif 110#endif
120 111
121#ifndef ECB_MEMORY_FENCE 112#ifndef ECB_MEMORY_FENCE
122 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 113 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
123 #if __i386 || __i386__ 114 #if __i386 || __i386__
124 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") 115 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
125 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE /* non-lock xchg might be enough */ 116 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
126 #define ECB_MEMORY_FENCE_RELEASE do { } while (0) /* unlikely to change in future cpus */ 117 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
127 #elif __amd64 || __amd64__ || __x86_64 || __x86_64__ 118 #elif __amd64 || __amd64__ || __x86_64 || __x86_64__
128 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") 119 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory")
129 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("lfence" : : : "memory") 120 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
130 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("sfence") /* play safe - not needed in any current cpu */ 121 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
131 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ 122 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__
132 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") 123 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory")
133 #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \ 124 #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \
134 || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ 125 || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__
135 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") 126 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory")
136 #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ 127 #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \
137 || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__ 128 || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__
138 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") 129 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory")
139 #elif __sparc || __sparc__ 130 #elif __sparc || __sparc__
140 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad | " : : : "memory") 131 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory")
141 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory") 132 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory")
142 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore") 133 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore")
143 #elif defined __s390__ || defined __s390x__ 134 #elif defined __s390__ || defined __s390x__
144 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory") 135 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory")
145 #elif defined __mips__ 136 #elif defined __mips__
146 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") 137 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory")
147 #elif defined __alpha__ 138 #elif defined __alpha__
148 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory") 139 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory")
140 #elif defined __hppa__
141 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
142 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
143 #elif defined __ia64__
144 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory")
149 #endif 145 #endif
150 #endif 146 #endif
151#endif 147#endif
152 148
153#ifndef ECB_MEMORY_FENCE 149#ifndef ECB_MEMORY_FENCE
154 #if ECB_GCC_VERSION(4,7) 150 #if ECB_GCC_VERSION(4,7)
155 /* unsolved mystery: ACQ_REL should be enough, but doesn't generate any code */ 151 /* see comment below (stdatomic.h) about the C11 memory model. */
156 /* which in turn actually breaks libev */
157 #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST) 152 #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST)
158 #elif defined __clang && __has_feature (cxx_atomic) 153 /*#elif defined __clang && __has_feature (cxx_atomic)*/
159 /* see above */ 154 /* see comment below (stdatomic.h) about the C11 memory model. */
160 #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST) 155 /*#define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST)*/
161 #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ 156 #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__
162 #define ECB_MEMORY_FENCE __sync_synchronize () 157 #define ECB_MEMORY_FENCE __sync_synchronize ()
163 /*#define ECB_MEMORY_FENCE_ACQUIRE ({ char dummy = 0; __sync_lock_test_and_set (&dummy, 1); }) */
164 /*#define ECB_MEMORY_FENCE_RELEASE ({ char dummy = 1; __sync_lock_release (&dummy ); }) */
165 #elif _MSC_VER >= 1400 /* VC++ 2005 */ 158 #elif _MSC_VER >= 1400 /* VC++ 2005 */
166 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) 159 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
167 #define ECB_MEMORY_FENCE _ReadWriteBarrier () 160 #define ECB_MEMORY_FENCE _ReadWriteBarrier ()
168 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */ 161 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */
169 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier () 162 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier ()
179 #define ECB_MEMORY_FENCE __sync () 172 #define ECB_MEMORY_FENCE __sync ()
180 #endif 173 #endif
181#endif 174#endif
182 175
183#ifndef ECB_MEMORY_FENCE 176#ifndef ECB_MEMORY_FENCE
177 #if ECB_C11 && !defined __STDC_NO_ATOMICS__
178 /* we assume that these memory fences work on all variables/all memory accesses, */
179 /* not just C11 atomics and atomic accesses */
180 #include <stdatomic.h>
181 /* Unfortunately, neither gcc 4.7 nor clang 3.1 generate any instructions for */
182 /* any fence other than seq_cst, which isn't very efficient for us. */
183 /* Why that is, we don't know - either the C11 memory model is quite useless */
184 /* for most usages, or gcc and clang have a bug */
185 /* I *currently* lean towards the latter, and inefficiently implement */
186 /* all three of ecb's fences as a seq_cst fence */
187 #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst)
188 #endif
189#endif
190
191#ifndef ECB_MEMORY_FENCE
184 #if !ECB_AVOID_PTHREADS 192 #if !ECB_AVOID_PTHREADS
185 /* 193 /*
186 * if you get undefined symbol references to pthread_mutex_lock, 194 * if you get undefined symbol references to pthread_mutex_lock,
187 * or failure to find pthread.h, then you should implement 195 * or failure to find pthread.h, then you should implement
188 * the ECB_MEMORY_FENCE operations for your cpu/compiler 196 * the ECB_MEMORY_FENCE operations for your cpu/compiler
460 ecb_inline void ecb_unreachable (void) ecb_noreturn; 468 ecb_inline void ecb_unreachable (void) ecb_noreturn;
461 ecb_inline void ecb_unreachable (void) { } 469 ecb_inline void ecb_unreachable (void) { }
462#endif 470#endif
463 471
464/* try to tell the compiler that some condition is definitely true */ 472/* try to tell the compiler that some condition is definitely true */
465#define ecb_assume(cond) do { if (!(cond)) ecb_unreachable (); } while (0) 473#define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0
466 474
467ecb_inline unsigned char ecb_byteorder_helper (void) ecb_const; 475ecb_inline unsigned char ecb_byteorder_helper (void) ecb_const;
468ecb_inline unsigned char 476ecb_inline unsigned char
469ecb_byteorder_helper (void) 477ecb_byteorder_helper (void)
470{ 478{
471 const uint32_t u = 0x11223344; 479 /* the union code still generates code under pressure in gcc, */
472 return *(unsigned char *)&u; 480 /* but less than using pointers, and always seem to */
481 /* successfully return a constant. */
482 /* the reason why we have this horrible preprocessor mess */
483 /* is to avoid it in all cases, at least on common architectures */
484 /* and yes, gcc defines __BYTE_ORDER__, g++ does not */
485#if __i386 || __i386__ || _M_X86 || __amd64 || __amd64__ || _M_X64
486 return 0x44;
487#elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
488 return 0x44;
489#elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
490 retrurn 0x11;
491#else
492 union
493 {
494 uint32_t i;
495 uint8_t c;
496 } u = { 0x11223344 };
497 return u.c;
498#endif
473} 499}
474 500
475ecb_inline ecb_bool ecb_big_endian (void) ecb_const; 501ecb_inline ecb_bool ecb_big_endian (void) ecb_const;
476ecb_inline ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; } 502ecb_inline ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; }
477ecb_inline ecb_bool ecb_little_endian (void) ecb_const; 503ecb_inline ecb_bool ecb_little_endian (void) ecb_const;
508 } 534 }
509#else 535#else
510 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) 536 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0]))
511#endif 537#endif
512 538
539/*******************************************************************************/
540/* floating point stuff, can be disabled by defining ECB_NO_LIBM */
541
542/* basically, everything uses "ieee pure-endian" floating point numbers */
543/* the only noteworthy exception is ancient armle, which uses order 43218765 */
544#if 0 \
545 || __i386 || __i386__ \
546 || __amd64 || __amd64__ || __x86_64 || __x86_64__ \
547 || __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \
548 || defined __arm__ && defined __ARM_EABI__ \
549 || defined __s390__ || defined __s390x__ \
550 || defined __mips__ \
551 || defined __alpha__ \
552 || defined __hppa__ \
553 || defined __ia64__ \
554 || defined _M_IX86 || defined _M_AMD64 || defined _M_IA64
555 #define ECB_STDFP 1
556 #include <string.h> /* for memcpy */
557#else
558 #define ECB_STDFP 0
559 #include <math.h> /* for frexp*, ldexp* */
560#endif
561
562#ifndef ECB_NO_LIBM
563
564 /* convert a float to ieee single/binary32 */
565 ecb_function_ uint32_t ecb_float_to_binary32 (float x) ecb_const;
566 ecb_function_ uint32_t
567 ecb_float_to_binary32 (float x)
568 {
569 uint32_t r;
570
571 #if ECB_STDFP
572 memcpy (&r, &x, 4);
573 #else
574 /* slow emulation, works for anything but -0 */
575 uint32_t m;
576 int e;
577
578 if (x == 0e0f ) return 0x00000000U;
579 if (x > +3.40282346638528860e+38f) return 0x7f800000U;
580 if (x < -3.40282346638528860e+38f) return 0xff800000U;
581 if (x != x ) return 0x7fbfffffU;
582
583 m = frexpf (x, &e) * 0x1000000U;
584
585 r = m & 0x80000000U;
586
587 if (r)
588 m = -m;
589
590 if (e <= -126)
591 {
592 m &= 0xffffffU;
593 m >>= (-125 - e);
594 e = -126;
595 }
596
597 r |= (e + 126) << 23;
598 r |= m & 0x7fffffU;
513#endif 599 #endif
514 600
601 return r;
602 }
603
604 /* converts an ieee single/binary32 to a float */
605 ecb_function_ float ecb_binary32_to_float (uint32_t x) ecb_const;
606 ecb_function_ float
607 ecb_binary32_to_float (uint32_t x)
608 {
609 float r;
610
611 #if ECB_STDFP
612 memcpy (&r, &x, 4);
613 #else
614 /* emulation, only works for normals and subnormals and +0 */
615 int neg = x >> 31;
616 int e = (x >> 23) & 0xffU;
617
618 x &= 0x7fffffU;
619
620 if (e)
621 x |= 0x800000U;
622 else
623 e = 1;
624
625 /* we distrust ldexpf a bit and do the 2**-24 scaling by an extra multiply */
626 r = ldexpf (x * (0.5f / 0x800000U), e - 126);
627
628 r = neg ? -r : r;
629 #endif
630
631 return r;
632 }
633
634 /* convert a double to ieee double/binary64 */
635 ecb_function_ uint64_t ecb_double_to_binary64 (double x) ecb_const;
636 ecb_function_ uint64_t
637 ecb_double_to_binary64 (double x)
638 {
639 uint64_t r;
640
641 #if ECB_STDFP
642 memcpy (&r, &x, 8);
643 #else
644 /* slow emulation, works for anything but -0 */
645 uint64_t m;
646 int e;
647
648 if (x == 0e0 ) return 0x0000000000000000U;
649 if (x > +1.79769313486231470e+308) return 0x7ff0000000000000U;
650 if (x < -1.79769313486231470e+308) return 0xfff0000000000000U;
651 if (x != x ) return 0X7ff7ffffffffffffU;
652
653 m = frexp (x, &e) * 0x20000000000000U;
654
655 r = m & 0x8000000000000000;;
656
657 if (r)
658 m = -m;
659
660 if (e <= -1022)
661 {
662 m &= 0x1fffffffffffffU;
663 m >>= (-1021 - e);
664 e = -1022;
665 }
666
667 r |= ((uint64_t)(e + 1022)) << 52;
668 r |= m & 0xfffffffffffffU;
669 #endif
670
671 return r;
672 }
673
674 /* converts an ieee double/binary64 to a double */
675 ecb_function_ double ecb_binary64_to_double (uint64_t x) ecb_const;
676 ecb_function_ double
677 ecb_binary64_to_double (uint64_t x)
678 {
679 double r;
680
681 #if ECB_STDFP
682 memcpy (&r, &x, 8);
683 #else
684 /* emulation, only works for normals and subnormals and +0 */
685 int neg = x >> 63;
686 int e = (x >> 52) & 0x7ffU;
687
688 x &= 0xfffffffffffffU;
689
690 if (e)
691 x |= 0x10000000000000U;
692 else
693 e = 1;
694
695 /* we distrust ldexp a bit and do the 2**-53 scaling by an extra multiply */
696 r = ldexp (x * (0.5 / 0x10000000000000U), e - 1022);
697
698 r = neg ? -r : r;
699 #endif
700
701 return r;
702 }
703
704#endif
705
706#endif
707

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines