… | |
… | |
83 | |
83 | |
84 | #define ECB_C (__STDC__+0) /* this assumes that __STDC__ is either empty or a number */ |
84 | #define ECB_C (__STDC__+0) /* this assumes that __STDC__ is either empty or a number */ |
85 | #define ECB_C99 (__STDC_VERSION__ >= 199901L) |
85 | #define ECB_C99 (__STDC_VERSION__ >= 199901L) |
86 | #define ECB_C11 (__STDC_VERSION__ >= 201112L) |
86 | #define ECB_C11 (__STDC_VERSION__ >= 201112L) |
87 | #define ECB_CPP (__cplusplus+0) |
87 | #define ECB_CPP (__cplusplus+0) |
88 | #define ECB_CPP98 (__cplusplus >= 199711L) |
|
|
89 | #define ECB_CPP11 (__cplusplus >= 201103L) |
88 | #define ECB_CPP11 (__cplusplus >= 201103L) |
90 | |
89 | |
91 | /*****************************************************************************/ |
90 | /*****************************************************************************/ |
92 | |
91 | |
93 | /* ECB_NO_THREADS - ecb is not used by multiple threads, ever */ |
92 | /* ECB_NO_THREADS - ecb is not used by multiple threads, ever */ |
94 | /* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */ |
93 | /* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */ |
95 | |
94 | |
96 | #if ECB_NO_THREADS |
95 | #if ECB_NO_THREADS |
97 | # define ECB_NO_SMP 1 |
96 | #define ECB_NO_SMP 1 |
98 | #endif |
97 | #endif |
99 | |
98 | |
100 | #if ECB_NO_THREADS || ECB_NO_SMP |
99 | #if ECB_NO_SMP |
101 | #define ECB_MEMORY_FENCE do { } while (0) |
100 | #define ECB_MEMORY_FENCE do { } while (0) |
102 | #endif |
|
|
103 | |
|
|
104 | #ifndef ECB_MEMORY_FENCE |
|
|
105 | #if ECB_C11 && !defined __STDC_NO_ATOMICS__ |
|
|
106 | /* we assume that these memory fences work on all variables/all memory accesses, */ |
|
|
107 | /* not just C11 atomics and atomic accesses */ |
|
|
108 | #include <stdatomic.h> |
|
|
109 | #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_acq_rel) |
|
|
110 | #define ECB_MEMORY_FENCE_ACQUIRE atomic_thread_fence (memory_order_acquire) |
|
|
111 | #define ECB_MEMORY_FENCE_RELEASE atomic_thread_fence (memory_order_release) |
|
|
112 | #endif |
|
|
113 | #endif |
101 | #endif |
114 | |
102 | |
115 | #ifndef ECB_MEMORY_FENCE |
103 | #ifndef ECB_MEMORY_FENCE |
116 | #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 |
104 | #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 |
117 | #if __i386 || __i386__ |
105 | #if __i386 || __i386__ |
118 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") |
106 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") |
119 | #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE /* non-lock xchg might be enough */ |
107 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") |
120 | #define ECB_MEMORY_FENCE_RELEASE do { } while (0) /* unlikely to change in future cpus */ |
108 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") |
121 | #elif __amd64 || __amd64__ || __x86_64 || __x86_64__ |
109 | #elif __amd64 || __amd64__ || __x86_64 || __x86_64__ |
122 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") |
110 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") |
123 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("lfence" : : : "memory") |
111 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") |
124 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("sfence") /* play safe - not needed in any current cpu */ |
112 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") |
125 | #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ |
113 | #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ |
126 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") |
114 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") |
127 | #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \ |
115 | #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \ |
128 | || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ |
116 | || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ |
129 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") |
117 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") |
130 | #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ |
118 | #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ |
131 | || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__ |
119 | || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__ |
132 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") |
120 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") |
133 | #elif __sparc || __sparc__ |
121 | #elif __sparc || __sparc__ |
134 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad | " : : : "memory") |
122 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory") |
135 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory") |
123 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory") |
136 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore") |
124 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore") |
137 | #elif defined __s390__ || defined __s390x__ |
125 | #elif defined __s390__ || defined __s390x__ |
138 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory") |
126 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory") |
139 | #elif defined __mips__ |
127 | #elif defined __mips__ |
140 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") |
128 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") |
141 | #elif defined __alpha__ |
129 | #elif defined __alpha__ |
142 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory") |
130 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory") |
|
|
131 | #elif defined __hppa__ |
|
|
132 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory") |
|
|
133 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") |
|
|
134 | #elif defined __ia64__ |
|
|
135 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory") |
143 | #endif |
136 | #endif |
144 | #endif |
137 | #endif |
145 | #endif |
138 | #endif |
146 | |
139 | |
147 | #ifndef ECB_MEMORY_FENCE |
140 | #ifndef ECB_MEMORY_FENCE |
|
|
141 | #if ECB_GCC_VERSION(4,7) |
|
|
142 | /* see comment below (stdatomic.h) about the C11 memory model. */ |
|
|
143 | #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST) |
|
|
144 | #elif defined __clang && __has_feature (cxx_atomic) |
|
|
145 | /* see comment below (stdatomic.h) about the C11 memory model. */ |
|
|
146 | #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST) |
148 | #if ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ |
147 | #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ |
149 | #define ECB_MEMORY_FENCE __sync_synchronize () |
148 | #define ECB_MEMORY_FENCE __sync_synchronize () |
150 | /*#define ECB_MEMORY_FENCE_ACQUIRE ({ char dummy = 0; __sync_lock_test_and_set (&dummy, 1); }) */ |
|
|
151 | /*#define ECB_MEMORY_FENCE_RELEASE ({ char dummy = 1; __sync_lock_release (&dummy ); }) */ |
|
|
152 | #elif _MSC_VER >= 1400 /* VC++ 2005 */ |
149 | #elif _MSC_VER >= 1400 /* VC++ 2005 */ |
153 | #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) |
150 | #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) |
154 | #define ECB_MEMORY_FENCE _ReadWriteBarrier () |
151 | #define ECB_MEMORY_FENCE _ReadWriteBarrier () |
155 | #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */ |
152 | #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */ |
156 | #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier () |
153 | #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier () |
… | |
… | |
166 | #define ECB_MEMORY_FENCE __sync () |
163 | #define ECB_MEMORY_FENCE __sync () |
167 | #endif |
164 | #endif |
168 | #endif |
165 | #endif |
169 | |
166 | |
170 | #ifndef ECB_MEMORY_FENCE |
167 | #ifndef ECB_MEMORY_FENCE |
|
|
168 | #if ECB_C11 && !defined __STDC_NO_ATOMICS__ |
|
|
169 | /* we assume that these memory fences work on all variables/all memory accesses, */ |
|
|
170 | /* not just C11 atomics and atomic accesses */ |
|
|
171 | #include <stdatomic.h> |
|
|
172 | /* Unfortunately, neither gcc 4.7 nor clang 3.1 generate any instructions for */ |
|
|
173 | /* any fence other than seq_cst, which isn't very efficient for us. */ |
|
|
174 | /* Why that is, we don't know - either the C11 memory model is quite useless */ |
|
|
175 | /* for most usages, or gcc and clang have a bug */ |
|
|
176 | /* I *currently* lean towards the latter, and inefficiently implement */ |
|
|
177 | /* all three of ecb's fences as a seq_cst fence */ |
|
|
178 | #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst) |
|
|
179 | #endif |
|
|
180 | #endif |
|
|
181 | |
|
|
182 | #ifndef ECB_MEMORY_FENCE |
171 | #if !ECB_AVOID_PTHREADS |
183 | #if !ECB_AVOID_PTHREADS |
172 | /* |
184 | /* |
173 | * if you get undefined symbol references to pthread_mutex_lock, |
185 | * if you get undefined symbol references to pthread_mutex_lock, |
174 | * or failure to find pthread.h, then you should implement |
186 | * or failure to find pthread.h, then you should implement |
175 | * the ECB_MEMORY_FENCE operations for your cpu/compiler |
187 | * the ECB_MEMORY_FENCE operations for your cpu/compiler |
… | |
… | |
453 | |
465 | |
454 | ecb_inline unsigned char ecb_byteorder_helper (void) ecb_const; |
466 | ecb_inline unsigned char ecb_byteorder_helper (void) ecb_const; |
455 | ecb_inline unsigned char |
467 | ecb_inline unsigned char |
456 | ecb_byteorder_helper (void) |
468 | ecb_byteorder_helper (void) |
457 | { |
469 | { |
458 | const uint32_t u = 0x11223344; |
470 | /* the union code still generates code under pressure in gcc, */ |
459 | return *(unsigned char *)&u; |
471 | /* but less than using pointers, and always seem to */ |
|
|
472 | /* successfully return a constant. */ |
|
|
473 | /* the reason why we have this horrible preprocessor mess */ |
|
|
474 | /* is to avoid it in all cases, at leats on common architectures */ |
|
|
475 | #if __i386 || __i386__ || _M_X86 || __amd64 || __amd64__ || _M_X64 |
|
|
476 | return 0x44; |
|
|
477 | #elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ |
|
|
478 | return 0x44; |
|
|
479 | #elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ |
|
|
480 | retrurn 0x11; |
|
|
481 | #else |
|
|
482 | union |
|
|
483 | { |
|
|
484 | uint32_t i; |
|
|
485 | uint8_t c; |
|
|
486 | } u = { 0x11223344 }; |
|
|
487 | return u.c; |
|
|
488 | #endif |
460 | } |
489 | } |
461 | |
490 | |
462 | ecb_inline ecb_bool ecb_big_endian (void) ecb_const; |
491 | ecb_inline ecb_bool ecb_big_endian (void) ecb_const; |
463 | ecb_inline ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; } |
492 | ecb_inline ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; } |
464 | ecb_inline ecb_bool ecb_little_endian (void) ecb_const; |
493 | ecb_inline ecb_bool ecb_little_endian (void) ecb_const; |