1 | /* |
1 | /* |
2 | * libecb - http://software.schmorp.de/pkg/libecb |
2 | * libecb - http://software.schmorp.de/pkg/libecb |
3 | * |
3 | * |
4 | * Copyright (©) 2009-2012 Marc Alexander Lehmann <libecb@schmorp.de> |
4 | * Copyright (©) 2009-2013 Marc Alexander Lehmann <libecb@schmorp.de> |
5 | * Copyright (©) 2011 Emanuele Giaquinta |
5 | * Copyright (©) 2011 Emanuele Giaquinta |
6 | * All rights reserved. |
6 | * All rights reserved. |
7 | * |
7 | * |
8 | * Redistribution and use in source and binary forms, with or without modifica- |
8 | * Redistribution and use in source and binary forms, with or without modifica- |
9 | * tion, are permitted provided that the following conditions are met: |
9 | * tion, are permitted provided that the following conditions are met: |
… | |
… | |
28 | */ |
28 | */ |
29 | |
29 | |
30 | #ifndef ECB_H |
30 | #ifndef ECB_H |
31 | #define ECB_H |
31 | #define ECB_H |
32 | |
32 | |
|
|
33 | /* 16 bits major, 16 bits minor */ |
|
|
34 | #define ECB_VERSION 0x00010003 |
|
|
35 | |
33 | #ifdef _WIN32 |
36 | #ifdef _WIN32 |
34 | typedef signed char int8_t; |
37 | typedef signed char int8_t; |
35 | typedef unsigned char uint8_t; |
38 | typedef unsigned char uint8_t; |
36 | typedef signed short int16_t; |
39 | typedef signed short int16_t; |
37 | typedef unsigned short uint16_t; |
40 | typedef unsigned short uint16_t; |
… | |
… | |
42 | typedef unsigned long long uint64_t; |
45 | typedef unsigned long long uint64_t; |
43 | #else /* _MSC_VER || __BORLANDC__ */ |
46 | #else /* _MSC_VER || __BORLANDC__ */ |
44 | typedef signed __int64 int64_t; |
47 | typedef signed __int64 int64_t; |
45 | typedef unsigned __int64 uint64_t; |
48 | typedef unsigned __int64 uint64_t; |
46 | #endif |
49 | #endif |
|
|
50 | #ifdef _WIN64 |
|
|
51 | #define ECB_PTRSIZE 8 |
|
|
52 | typedef uint64_t uintptr_t; |
|
|
53 | typedef int64_t intptr_t; |
|
|
54 | #else |
|
|
55 | #define ECB_PTRSIZE 4 |
|
|
56 | typedef uint32_t uintptr_t; |
|
|
57 | typedef int32_t intptr_t; |
|
|
58 | #endif |
47 | #else |
59 | #else |
48 | #include <inttypes.h> |
60 | #include <inttypes.h> |
|
|
61 | #if UINTMAX_MAX > 0xffffffffU |
|
|
62 | #define ECB_PTRSIZE 8 |
|
|
63 | #else |
|
|
64 | #define ECB_PTRSIZE 4 |
|
|
65 | #endif |
|
|
66 | #endif |
|
|
67 | |
|
|
68 | /* work around x32 idiocy by defining proper macros */ |
|
|
69 | #if __amd64 || __x86_64 || _M_AMD64 || _M_X64 |
|
|
70 | #if _ILP32 |
|
|
71 | #define ECB_AMD64_X32 1 |
|
|
72 | #else |
|
|
73 | #define ECB_AMD64 1 |
|
|
74 | #endif |
49 | #endif |
75 | #endif |
50 | |
76 | |
51 | /* many compilers define _GNUC_ to some versions but then only implement |
77 | /* many compilers define _GNUC_ to some versions but then only implement |
52 | * what their idiot authors think are the "more important" extensions, |
78 | * what their idiot authors think are the "more important" extensions, |
53 | * causing enormous grief in return for some better fake benchmark numbers. |
79 | * causing enormous grief in return for some better fake benchmark numbers. |
54 | * or so. |
80 | * or so. |
55 | * we try to detect these and simply assume they are not gcc - if they have |
81 | * we try to detect these and simply assume they are not gcc - if they have |
56 | * an issue with that they should have done it right in the first place. |
82 | * an issue with that they should have done it right in the first place. |
57 | */ |
83 | */ |
58 | #ifndef ECB_GCC_VERSION |
84 | #ifndef ECB_GCC_VERSION |
59 | #if !defined(__GNUC_MINOR__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_C) || defined(__SUNPRO_CC) || defined(__llvm__) || defined(__clang__) |
85 | #if !defined __GNUC_MINOR__ || defined __INTEL_COMPILER || defined __SUNPRO_C || defined __SUNPRO_CC || defined __llvm__ || defined __clang__ |
60 | #define ECB_GCC_VERSION(major,minor) 0 |
86 | #define ECB_GCC_VERSION(major,minor) 0 |
61 | #else |
87 | #else |
62 | #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) |
88 | #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) |
63 | #endif |
89 | #endif |
64 | #endif |
90 | #endif |
65 | |
91 | |
|
|
92 | #define ECB_C (__STDC__+0) /* this assumes that __STDC__ is either empty or a number */ |
|
|
93 | #define ECB_C99 (__STDC_VERSION__ >= 199901L) |
|
|
94 | #define ECB_C11 (__STDC_VERSION__ >= 201112L) |
|
|
95 | #define ECB_CPP (__cplusplus+0) |
|
|
96 | #define ECB_CPP11 (__cplusplus >= 201103L) |
|
|
97 | |
|
|
98 | #if ECB_CPP |
|
|
99 | #define ECB_EXTERN_C extern "C" |
|
|
100 | #define ECB_EXTERN_C_BEG ECB_EXTERN_C { |
|
|
101 | #define ECB_EXTERN_C_END } |
|
|
102 | #else |
|
|
103 | #define ECB_EXTERN_C extern |
|
|
104 | #define ECB_EXTERN_C_BEG |
|
|
105 | #define ECB_EXTERN_C_END |
|
|
106 | #endif |
|
|
107 | |
66 | /*****************************************************************************/ |
108 | /*****************************************************************************/ |
67 | |
109 | |
68 | /* ECB_NO_THREADS - ecb is not used by multiple threads, ever */ |
110 | /* ECB_NO_THREADS - ecb is not used by multiple threads, ever */ |
69 | /* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */ |
111 | /* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */ |
70 | |
112 | |
71 | #if ECB_NO_THREADS || ECB_NO_SMP |
113 | #if ECB_NO_THREADS |
|
|
114 | #define ECB_NO_SMP 1 |
|
|
115 | #endif |
|
|
116 | |
|
|
117 | #if ECB_NO_SMP |
72 | #define ECB_MEMORY_FENCE do { } while (0) |
118 | #define ECB_MEMORY_FENCE do { } while (0) |
73 | #endif |
119 | #endif |
74 | |
120 | |
75 | #ifndef ECB_MEMORY_FENCE |
121 | #ifndef ECB_MEMORY_FENCE |
76 | #if ECB_GCC_VERSION(2,5) || defined(__INTEL_COMPILER) || defined(__clang__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 |
122 | #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 |
77 | #if __i386__ |
123 | #if __i386 || __i386__ |
78 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") |
124 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") |
79 | #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE /* non-lock xchg might be enough */ |
125 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") |
80 | #define ECB_MEMORY_FENCE_RELEASE do { } while (0) /* unlikely to change in future cpus */ |
126 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") |
81 | #elif __amd64 |
127 | #elif __amd64 || __amd64__ || __x86_64 || __x86_64__ |
82 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") |
128 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") |
83 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("lfence" : : : "memory") |
129 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") |
84 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("sfence") /* play safe - not needed in any current cpu */ |
130 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") |
85 | #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ |
131 | #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ |
86 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") |
132 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") |
87 | #elif defined(__ARM_ARCH_6__ ) || defined(__ARM_ARCH_6J__ ) \ |
133 | #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \ |
88 | || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6ZK__) |
134 | || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ |
89 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") |
135 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") |
90 | #elif defined(__ARM_ARCH_7__ ) || defined(__ARM_ARCH_7A__ ) \ |
136 | #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ |
91 | || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7R__ ) |
137 | || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__ |
92 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") |
138 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") |
93 | #elif defined(__sparc) |
139 | #elif (__sparc || __sparc__) && !__sparcv8 |
94 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #StoreLoad | #LoadLoad | #StoreStore" : : : "memory") |
140 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory") |
95 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadLoad" : : : "memory") |
141 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory") |
|
|
142 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore") |
|
|
143 | #elif defined __s390__ || defined __s390x__ |
|
|
144 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory") |
|
|
145 | #elif defined __mips__ |
|
|
146 | /* GNU/Linux emulates sync on mips1 architectures, so we force its use */ |
|
|
147 | /* anybody else who still uses mips1 is supposed to send in their version, with detection code. */ |
|
|
148 | #define ECB_MEMORY_FENCE __asm__ __volatile__ (".set mips2; sync; .set mips0" : : : "memory") |
|
|
149 | #elif defined __alpha__ |
|
|
150 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory") |
|
|
151 | #elif defined __hppa__ |
|
|
152 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory") |
96 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #StoreStore") |
153 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") |
|
|
154 | #elif defined __ia64__ |
|
|
155 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory") |
|
|
156 | #elif defined __m68k__ |
|
|
157 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory") |
|
|
158 | #elif defined __m88k__ |
|
|
159 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("tb1 0,%%r0,128" : : : "memory") |
|
|
160 | #elif defined __sh__ |
|
|
161 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory") |
97 | #endif |
162 | #endif |
98 | #endif |
163 | #endif |
99 | #endif |
164 | #endif |
100 | |
165 | |
101 | #ifndef ECB_MEMORY_FENCE |
166 | #ifndef ECB_MEMORY_FENCE |
|
|
167 | #if ECB_GCC_VERSION(4,7) |
|
|
168 | /* see comment below (stdatomic.h) about the C11 memory model. */ |
|
|
169 | #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST) |
|
|
170 | |
|
|
171 | /* The __has_feature syntax from clang is so misdesigned that we cannot use it |
|
|
172 | * without risking compile time errors with other compilers. We *could* |
|
|
173 | * define our own ecb_clang_has_feature, but I just can't be bothered to work |
|
|
174 | * around this shit time and again. |
|
|
175 | * #elif defined __clang && __has_feature (cxx_atomic) |
|
|
176 | * // see comment below (stdatomic.h) about the C11 memory model. |
|
|
177 | * #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST) |
|
|
178 | */ |
|
|
179 | |
102 | #if ECB_GCC_VERSION(4,4) || defined(__INTEL_COMPILER) || defined(__clang__) |
180 | #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ |
103 | #define ECB_MEMORY_FENCE __sync_synchronize () |
181 | #define ECB_MEMORY_FENCE __sync_synchronize () |
104 | /*#define ECB_MEMORY_FENCE_ACQUIRE ({ char dummy = 0; __sync_lock_test_and_set (&dummy, 1); }) */ |
182 | #elif _MSC_VER >= 1500 /* VC++ 2008 */ |
105 | /*#define ECB_MEMORY_FENCE_RELEASE ({ char dummy = 1; __sync_lock_release (&dummy ); }) */ |
183 | /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */ |
|
|
184 | #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) |
|
|
185 | #define ECB_MEMORY_FENCE _ReadWriteBarrier (); MemoryBarrier() |
|
|
186 | #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier (); MemoryBarrier() /* according to msdn, _ReadBarrier is not a load fence */ |
|
|
187 | #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier (); MemoryBarrier() |
106 | #elif _MSC_VER >= 1400 /* VC++ 2005 */ |
188 | #elif _MSC_VER >= 1400 /* VC++ 2005 */ |
107 | #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) |
189 | #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) |
108 | #define ECB_MEMORY_FENCE _ReadWriteBarrier () |
190 | #define ECB_MEMORY_FENCE _ReadWriteBarrier () |
109 | #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */ |
191 | #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */ |
110 | #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier () |
192 | #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier () |
111 | #elif defined(_WIN32) |
193 | #elif defined _WIN32 |
112 | #include <WinNT.h> |
194 | #include <WinNT.h> |
113 | #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */ |
195 | #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */ |
114 | #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 |
196 | #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 |
115 | #include <mbarrier.h> |
197 | #include <mbarrier.h> |
116 | #define ECB_MEMORY_FENCE __machine_rw_barrier () |
198 | #define ECB_MEMORY_FENCE __machine_rw_barrier () |
117 | #define ECB_MEMORY_FENCE_ACQUIRE __machine_r_barrier () |
199 | #define ECB_MEMORY_FENCE_ACQUIRE __machine_r_barrier () |
118 | #define ECB_MEMORY_FENCE_RELEASE __machine_w_barrier () |
200 | #define ECB_MEMORY_FENCE_RELEASE __machine_w_barrier () |
|
|
201 | #elif __xlC__ |
|
|
202 | #define ECB_MEMORY_FENCE __sync () |
|
|
203 | #endif |
|
|
204 | #endif |
|
|
205 | |
|
|
206 | #ifndef ECB_MEMORY_FENCE |
|
|
207 | #if ECB_C11 && !defined __STDC_NO_ATOMICS__ |
|
|
208 | /* we assume that these memory fences work on all variables/all memory accesses, */ |
|
|
209 | /* not just C11 atomics and atomic accesses */ |
|
|
210 | #include <stdatomic.h> |
|
|
211 | /* Unfortunately, neither gcc 4.7 nor clang 3.1 generate any instructions for */ |
|
|
212 | /* any fence other than seq_cst, which isn't very efficient for us. */ |
|
|
213 | /* Why that is, we don't know - either the C11 memory model is quite useless */ |
|
|
214 | /* for most usages, or gcc and clang have a bug */ |
|
|
215 | /* I *currently* lean towards the latter, and inefficiently implement */ |
|
|
216 | /* all three of ecb's fences as a seq_cst fence */ |
|
|
217 | #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst) |
119 | #endif |
218 | #endif |
120 | #endif |
219 | #endif |
121 | |
220 | |
122 | #ifndef ECB_MEMORY_FENCE |
221 | #ifndef ECB_MEMORY_FENCE |
123 | #if !ECB_AVOID_PTHREADS |
222 | #if !ECB_AVOID_PTHREADS |
… | |
… | |
135 | static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER; |
234 | static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER; |
136 | #define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0) |
235 | #define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0) |
137 | #endif |
236 | #endif |
138 | #endif |
237 | #endif |
139 | |
238 | |
140 | #if !defined(ECB_MEMORY_FENCE_ACQUIRE) && defined(ECB_MEMORY_FENCE) |
239 | #if !defined ECB_MEMORY_FENCE_ACQUIRE && defined ECB_MEMORY_FENCE |
141 | #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE |
240 | #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE |
142 | #endif |
241 | #endif |
143 | |
242 | |
144 | #if !defined(ECB_MEMORY_FENCE_RELEASE) && defined(ECB_MEMORY_FENCE) |
243 | #if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE |
145 | #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE |
244 | #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE |
146 | #endif |
245 | #endif |
147 | |
246 | |
148 | /*****************************************************************************/ |
247 | /*****************************************************************************/ |
149 | |
|
|
150 | #define ECB_C99 (__STDC_VERSION__ >= 199901L) |
|
|
151 | |
248 | |
152 | #if __cplusplus |
249 | #if __cplusplus |
153 | #define ecb_inline static inline |
250 | #define ecb_inline static inline |
154 | #elif ECB_GCC_VERSION(2,5) |
251 | #elif ECB_GCC_VERSION(2,5) |
155 | #define ecb_inline static __inline__ |
252 | #define ecb_inline static __inline__ |
… | |
… | |
194 | #elif ECB_GCC_VERSION(3,0) |
291 | #elif ECB_GCC_VERSION(3,0) |
195 | #define ecb_decltype(x) __typeof(x) |
292 | #define ecb_decltype(x) __typeof(x) |
196 | #endif |
293 | #endif |
197 | |
294 | |
198 | #define ecb_noinline ecb_attribute ((__noinline__)) |
295 | #define ecb_noinline ecb_attribute ((__noinline__)) |
199 | #define ecb_noreturn ecb_attribute ((__noreturn__)) |
|
|
200 | #define ecb_unused ecb_attribute ((__unused__)) |
296 | #define ecb_unused ecb_attribute ((__unused__)) |
201 | #define ecb_const ecb_attribute ((__const__)) |
297 | #define ecb_const ecb_attribute ((__const__)) |
202 | #define ecb_pure ecb_attribute ((__pure__)) |
298 | #define ecb_pure ecb_attribute ((__pure__)) |
|
|
299 | |
|
|
300 | #if ECB_C11 |
|
|
301 | #define ecb_noreturn _Noreturn |
|
|
302 | #else |
|
|
303 | #define ecb_noreturn ecb_attribute ((__noreturn__)) |
|
|
304 | #endif |
203 | |
305 | |
204 | #if ECB_GCC_VERSION(4,3) |
306 | #if ECB_GCC_VERSION(4,3) |
205 | #define ecb_artificial ecb_attribute ((__artificial__)) |
307 | #define ecb_artificial ecb_attribute ((__artificial__)) |
206 | #define ecb_hot ecb_attribute ((__hot__)) |
308 | #define ecb_hot ecb_attribute ((__hot__)) |
207 | #define ecb_cold ecb_attribute ((__cold__)) |
309 | #define ecb_cold ecb_attribute ((__cold__)) |
… | |
… | |
298 | |
400 | |
299 | return r + ecb_ld32 (x); |
401 | return r + ecb_ld32 (x); |
300 | } |
402 | } |
301 | #endif |
403 | #endif |
302 | |
404 | |
|
|
405 | ecb_function_ ecb_bool ecb_is_pot32 (uint32_t x) ecb_const; |
|
|
406 | ecb_function_ ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); } |
|
|
407 | ecb_function_ ecb_bool ecb_is_pot64 (uint64_t x) ecb_const; |
|
|
408 | ecb_function_ ecb_bool ecb_is_pot64 (uint64_t x) { return !(x & (x - 1)); } |
|
|
409 | |
303 | ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) ecb_const; |
410 | ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) ecb_const; |
304 | ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) |
411 | ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) |
305 | { |
412 | { |
306 | return ( (x * 0x0802U & 0x22110U) |
413 | return ( (x * 0x0802U & 0x22110U) |
307 | | (x * 0x8020U & 0x88440U)) * 0x10101U >> 16; |
414 | | (x * 0x8020U & 0x88440U)) * 0x10101U >> 16; |
… | |
… | |
386 | |
493 | |
387 | #if ECB_GCC_VERSION(4,5) |
494 | #if ECB_GCC_VERSION(4,5) |
388 | #define ecb_unreachable() __builtin_unreachable () |
495 | #define ecb_unreachable() __builtin_unreachable () |
389 | #else |
496 | #else |
390 | /* this seems to work fine, but gcc always emits a warning for it :/ */ |
497 | /* this seems to work fine, but gcc always emits a warning for it :/ */ |
391 | ecb_function_ void ecb_unreachable (void) ecb_noreturn; |
498 | ecb_inline void ecb_unreachable (void) ecb_noreturn; |
392 | ecb_function_ void ecb_unreachable (void) { } |
499 | ecb_inline void ecb_unreachable (void) { } |
393 | #endif |
500 | #endif |
394 | |
501 | |
395 | /* try to tell the compiler that some condition is definitely true */ |
502 | /* try to tell the compiler that some condition is definitely true */ |
396 | #define ecb_assume(cond) do { if (!(cond)) ecb_unreachable (); } while (0) |
503 | #define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0 |
397 | |
504 | |
398 | ecb_function_ unsigned char ecb_byteorder_helper (void) ecb_const; |
505 | ecb_inline unsigned char ecb_byteorder_helper (void) ecb_const; |
399 | ecb_function_ unsigned char |
506 | ecb_inline unsigned char |
400 | ecb_byteorder_helper (void) |
507 | ecb_byteorder_helper (void) |
401 | { |
508 | { |
402 | const uint32_t u = 0x11223344; |
509 | /* the union code still generates code under pressure in gcc, */ |
403 | return *(unsigned char *)&u; |
510 | /* but less than using pointers, and always seems to */ |
|
|
511 | /* successfully return a constant. */ |
|
|
512 | /* the reason why we have this horrible preprocessor mess */ |
|
|
513 | /* is to avoid it in all cases, at least on common architectures */ |
|
|
514 | /* or when using a recent enough gcc version (>= 4.6) */ |
|
|
515 | #if __i386 || __i386__ || _M_X86 || __amd64 || __amd64__ || _M_X64 |
|
|
516 | return 0x44; |
|
|
517 | #elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ |
|
|
518 | return 0x44; |
|
|
519 | #elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ |
|
|
520 | return 0x11; |
|
|
521 | #else |
|
|
522 | union |
|
|
523 | { |
|
|
524 | uint32_t i; |
|
|
525 | uint8_t c; |
|
|
526 | } u = { 0x11223344 }; |
|
|
527 | return u.c; |
|
|
528 | #endif |
404 | } |
529 | } |
405 | |
530 | |
406 | ecb_function_ ecb_bool ecb_big_endian (void) ecb_const; |
531 | ecb_inline ecb_bool ecb_big_endian (void) ecb_const; |
407 | ecb_function_ ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; } |
532 | ecb_inline ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; } |
408 | ecb_function_ ecb_bool ecb_little_endian (void) ecb_const; |
533 | ecb_inline ecb_bool ecb_little_endian (void) ecb_const; |
409 | ecb_function_ ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44; } |
534 | ecb_inline ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44; } |
410 | |
535 | |
411 | #if ECB_GCC_VERSION(3,0) || ECB_C99 |
536 | #if ECB_GCC_VERSION(3,0) || ECB_C99 |
412 | #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0)) |
537 | #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0)) |
413 | #else |
538 | #else |
414 | #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n))) |
539 | #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n))) |
… | |
… | |
439 | } |
564 | } |
440 | #else |
565 | #else |
441 | #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) |
566 | #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) |
442 | #endif |
567 | #endif |
443 | |
568 | |
|
|
569 | /*******************************************************************************/ |
|
|
570 | /* floating point stuff, can be disabled by defining ECB_NO_LIBM */ |
|
|
571 | |
|
|
572 | /* basically, everything uses "ieee pure-endian" floating point numbers */ |
|
|
573 | /* the only noteworthy exception is ancient armle, which uses order 43218765 */ |
|
|
574 | #if 0 \ |
|
|
575 | || __i386 || __i386__ \ |
|
|
576 | || __amd64 || __amd64__ || __x86_64 || __x86_64__ \ |
|
|
577 | || __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \ |
|
|
578 | || defined __arm__ && defined __ARM_EABI__ \ |
|
|
579 | || defined __s390__ || defined __s390x__ \ |
|
|
580 | || defined __mips__ \ |
|
|
581 | || defined __alpha__ \ |
|
|
582 | || defined __hppa__ \ |
|
|
583 | || defined __ia64__ \ |
|
|
584 | || defined __m68k__ \ |
|
|
585 | || defined __m88k__ \ |
|
|
586 | || defined __sh__ \ |
|
|
587 | || defined _M_IX86 || defined _M_AMD64 || defined _M_IA64 |
|
|
588 | #define ECB_STDFP 1 |
|
|
589 | #include <string.h> /* for memcpy */ |
|
|
590 | #else |
|
|
591 | #define ECB_STDFP 0 |
|
|
592 | #endif |
|
|
593 | |
|
|
594 | #ifndef ECB_NO_LIBM |
|
|
595 | |
|
|
596 | #include <math.h> /* for frexp*, ldexp*, INFINITY, NAN */ |
|
|
597 | |
|
|
598 | /* only the oldest of old doesn't have this one. solaris. */ |
|
|
599 | #ifdef INFINITY |
|
|
600 | #define ECB_INFINITY INFINITY |
|
|
601 | #else |
|
|
602 | #define ECB_INFINITY HUGE_VAL |
444 | #endif |
603 | #endif |
445 | |
604 | |
|
|
605 | #ifdef NAN |
|
|
606 | #define ECB_NAN NAN |
|
|
607 | #else |
|
|
608 | #define ECB_NAN ECB_INFINITY |
|
|
609 | #endif |
|
|
610 | |
|
|
611 | /* converts an ieee half/binary16 to a float */ |
|
|
612 | ecb_function_ float ecb_binary16_to_float (uint16_t x) ecb_const; |
|
|
613 | ecb_function_ float |
|
|
614 | ecb_binary16_to_float (uint16_t x) |
|
|
615 | { |
|
|
616 | int e = (x >> 10) & 0x1f; |
|
|
617 | int m = x & 0x3ff; |
|
|
618 | float r; |
|
|
619 | |
|
|
620 | if (!e ) r = ldexpf (m , -24); |
|
|
621 | else if (e != 31) r = ldexpf (m + 0x400, e - 25); |
|
|
622 | else if (m ) r = ECB_NAN; |
|
|
623 | else r = ECB_INFINITY; |
|
|
624 | |
|
|
625 | return x & 0x8000 ? -r : r; |
|
|
626 | } |
|
|
627 | |
|
|
628 | /* convert a float to ieee single/binary32 */ |
|
|
629 | ecb_function_ uint32_t ecb_float_to_binary32 (float x) ecb_const; |
|
|
630 | ecb_function_ uint32_t |
|
|
631 | ecb_float_to_binary32 (float x) |
|
|
632 | { |
|
|
633 | uint32_t r; |
|
|
634 | |
|
|
635 | #if ECB_STDFP |
|
|
636 | memcpy (&r, &x, 4); |
|
|
637 | #else |
|
|
638 | /* slow emulation, works for anything but -0 */ |
|
|
639 | uint32_t m; |
|
|
640 | int e; |
|
|
641 | |
|
|
642 | if (x == 0e0f ) return 0x00000000U; |
|
|
643 | if (x > +3.40282346638528860e+38f) return 0x7f800000U; |
|
|
644 | if (x < -3.40282346638528860e+38f) return 0xff800000U; |
|
|
645 | if (x != x ) return 0x7fbfffffU; |
|
|
646 | |
|
|
647 | m = frexpf (x, &e) * 0x1000000U; |
|
|
648 | |
|
|
649 | r = m & 0x80000000U; |
|
|
650 | |
|
|
651 | if (r) |
|
|
652 | m = -m; |
|
|
653 | |
|
|
654 | if (e <= -126) |
|
|
655 | { |
|
|
656 | m &= 0xffffffU; |
|
|
657 | m >>= (-125 - e); |
|
|
658 | e = -126; |
|
|
659 | } |
|
|
660 | |
|
|
661 | r |= (e + 126) << 23; |
|
|
662 | r |= m & 0x7fffffU; |
|
|
663 | #endif |
|
|
664 | |
|
|
665 | return r; |
|
|
666 | } |
|
|
667 | |
|
|
668 | /* converts an ieee single/binary32 to a float */ |
|
|
669 | ecb_function_ float ecb_binary32_to_float (uint32_t x) ecb_const; |
|
|
670 | ecb_function_ float |
|
|
671 | ecb_binary32_to_float (uint32_t x) |
|
|
672 | { |
|
|
673 | float r; |
|
|
674 | |
|
|
675 | #if ECB_STDFP |
|
|
676 | memcpy (&r, &x, 4); |
|
|
677 | #else |
|
|
678 | /* emulation, only works for normals and subnormals and +0 */ |
|
|
679 | int neg = x >> 31; |
|
|
680 | int e = (x >> 23) & 0xffU; |
|
|
681 | |
|
|
682 | x &= 0x7fffffU; |
|
|
683 | |
|
|
684 | if (e) |
|
|
685 | x |= 0x800000U; |
|
|
686 | else |
|
|
687 | e = 1; |
|
|
688 | |
|
|
689 | /* we distrust ldexpf a bit and do the 2**-24 scaling by an extra multiply */ |
|
|
690 | r = ldexpf (x * (0.5f / 0x800000U), e - 126); |
|
|
691 | |
|
|
692 | r = neg ? -r : r; |
|
|
693 | #endif |
|
|
694 | |
|
|
695 | return r; |
|
|
696 | } |
|
|
697 | |
|
|
698 | /* convert a double to ieee double/binary64 */ |
|
|
699 | ecb_function_ uint64_t ecb_double_to_binary64 (double x) ecb_const; |
|
|
700 | ecb_function_ uint64_t |
|
|
701 | ecb_double_to_binary64 (double x) |
|
|
702 | { |
|
|
703 | uint64_t r; |
|
|
704 | |
|
|
705 | #if ECB_STDFP |
|
|
706 | memcpy (&r, &x, 8); |
|
|
707 | #else |
|
|
708 | /* slow emulation, works for anything but -0 */ |
|
|
709 | uint64_t m; |
|
|
710 | int e; |
|
|
711 | |
|
|
712 | if (x == 0e0 ) return 0x0000000000000000U; |
|
|
713 | if (x > +1.79769313486231470e+308) return 0x7ff0000000000000U; |
|
|
714 | if (x < -1.79769313486231470e+308) return 0xfff0000000000000U; |
|
|
715 | if (x != x ) return 0X7ff7ffffffffffffU; |
|
|
716 | |
|
|
717 | m = frexp (x, &e) * 0x20000000000000U; |
|
|
718 | |
|
|
719 | r = m & 0x8000000000000000;; |
|
|
720 | |
|
|
721 | if (r) |
|
|
722 | m = -m; |
|
|
723 | |
|
|
724 | if (e <= -1022) |
|
|
725 | { |
|
|
726 | m &= 0x1fffffffffffffU; |
|
|
727 | m >>= (-1021 - e); |
|
|
728 | e = -1022; |
|
|
729 | } |
|
|
730 | |
|
|
731 | r |= ((uint64_t)(e + 1022)) << 52; |
|
|
732 | r |= m & 0xfffffffffffffU; |
|
|
733 | #endif |
|
|
734 | |
|
|
735 | return r; |
|
|
736 | } |
|
|
737 | |
|
|
738 | /* converts an ieee double/binary64 to a double */ |
|
|
739 | ecb_function_ double ecb_binary64_to_double (uint64_t x) ecb_const; |
|
|
740 | ecb_function_ double |
|
|
741 | ecb_binary64_to_double (uint64_t x) |
|
|
742 | { |
|
|
743 | double r; |
|
|
744 | |
|
|
745 | #if ECB_STDFP |
|
|
746 | memcpy (&r, &x, 8); |
|
|
747 | #else |
|
|
748 | /* emulation, only works for normals and subnormals and +0 */ |
|
|
749 | int neg = x >> 63; |
|
|
750 | int e = (x >> 52) & 0x7ffU; |
|
|
751 | |
|
|
752 | x &= 0xfffffffffffffU; |
|
|
753 | |
|
|
754 | if (e) |
|
|
755 | x |= 0x10000000000000U; |
|
|
756 | else |
|
|
757 | e = 1; |
|
|
758 | |
|
|
759 | /* we distrust ldexp a bit and do the 2**-53 scaling by an extra multiply */ |
|
|
760 | r = ldexp (x * (0.5 / 0x10000000000000U), e - 1022); |
|
|
761 | |
|
|
762 | r = neg ? -r : r; |
|
|
763 | #endif |
|
|
764 | |
|
|
765 | return r; |
|
|
766 | } |
|
|
767 | |
|
|
768 | #endif |
|
|
769 | |
|
|
770 | #endif |
|
|
771 | |