… | |
… | |
38 | typedef signed int int32_t; |
38 | typedef signed int int32_t; |
39 | typedef unsigned int uint32_t; |
39 | typedef unsigned int uint32_t; |
40 | #if __GNUC__ |
40 | #if __GNUC__ |
41 | typedef signed long long int64_t; |
41 | typedef signed long long int64_t; |
42 | typedef unsigned long long uint64_t; |
42 | typedef unsigned long long uint64_t; |
43 | #else |
43 | #else /* _MSC_VER || __BORLANDC__ */ |
44 | typedef signed __int64 int64_t; |
44 | typedef signed __int64 int64_t; |
45 | typedef unsigned __int64 uint64_t; |
45 | typedef unsigned __int64 uint64_t; |
46 | #endif |
46 | #endif |
47 | #else |
47 | #else |
48 | #include <inttypes.h> |
48 | #include <inttypes.h> |
49 | #endif |
49 | #endif |
50 | |
50 | |
51 | /* many compilers define _GNUC_ to some versions but then only implement |
51 | /* many compilers define _GNUC_ to some versions but then only implement |
52 | * what their idiot authors think are the "more important" extensions, |
52 | * what their idiot authors think are the "more important" extensions, |
53 | * causing enourmous grief in return for some better fake benchmark numbers. |
53 | * causing enormous grief in return for some better fake benchmark numbers. |
54 | * or so. |
54 | * or so. |
55 | * we try to detect these and simply assume they are not gcc - if they have |
55 | * we try to detect these and simply assume they are not gcc - if they have |
56 | * an issue with that they should have done it right in the first place. |
56 | * an issue with that they should have done it right in the first place. |
57 | */ |
57 | */ |
58 | #ifndef ECB_GCC_VERSION |
58 | #ifndef ECB_GCC_VERSION |
… | |
… | |
61 | #else |
61 | #else |
62 | #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) |
62 | #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) |
63 | #endif |
63 | #endif |
64 | #endif |
64 | #endif |
65 | |
65 | |
|
|
66 | /*****************************************************************************/ |
|
|
67 | |
|
|
68 | /* ECB_NO_THREADS - ecb is not used by multiple threads, ever */ |
|
|
69 | /* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */ |
|
|
70 | |
|
|
71 | #if ECB_NO_THREADS || ECB_NO_SMP |
|
|
72 | #define ECB_MEMORY_FENCE do { } while (0) |
|
|
73 | #define ECB_MEMORY_FENCE_ACQUIRE do { } while (0) |
|
|
74 | #define ECB_MEMORY_FENCE_RELEASE do { } while (0) |
|
|
75 | #endif |
|
|
76 | |
|
|
77 | #ifndef ECB_MEMORY_FENCE |
|
|
78 | #if ECB_GCC_VERSION(2,5) |
|
|
79 | #if __x86 |
|
|
80 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") |
|
|
81 | #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE /* non-lock xchg might be enough */ |
|
|
82 | #define ECB_MEMORY_FENCE_RELEASE do { } while (0) /* unlikely to change in future cpus */ |
|
|
83 | #elif __amd64 |
|
|
84 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") |
|
|
85 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("lfence" : : : "memory") |
|
|
86 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("sfence") /* play safe - not needed in any current cpu */ |
|
|
87 | #endif |
|
|
88 | #endif |
|
|
89 | #endif |
|
|
90 | |
|
|
91 | #ifndef ECB_MEMORY_FENCE |
|
|
92 | #if ECB_GCC_VERSION(4,4) |
|
|
93 | #define ECB_MEMORY_FENCE __sync_synchronize () |
|
|
94 | #define ECB_MEMORY_FENCE_ACQUIRE ({ char dummy = 0; __sync_lock_test_and_set (&dummy, 1); }) |
|
|
95 | #define ECB_MEMORY_FENCE_RELEASE ({ char dummy = 1; __sync_lock_release (&dummy ); }) |
|
|
96 | #elif _MSC_VER >= 1400 /* VC++ 2005 */ |
|
|
97 | #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) |
|
|
98 | #define ECB_MEMORY_FENCE _ReadWriteBarrier () |
|
|
99 | #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */ |
|
|
100 | #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier () |
|
|
101 | #elif defined(_WIN32) |
|
|
102 | #include <WinNT.h> |
|
|
103 | #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */ |
|
|
104 | #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE |
|
|
105 | #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE |
|
|
106 | #endif |
|
|
107 | #endif |
|
|
108 | |
|
|
109 | #ifndef ECB_MEMORY_FENCE |
|
|
110 | /* |
|
|
111 | * if you get undefined symbol references to pthread_mutex_lock, |
|
|
112 | * or failure to find pthread.h, then you should implement |
|
|
113 | * the ECB_MEMORY_FENCE operations for your cpu/compiler |
|
|
114 | * OR provide pthread.h and link against the posix thread library |
|
|
115 | * of your system. |
|
|
116 | */ |
|
|
117 | #include <pthread.h> |
|
|
118 | |
|
|
119 | static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER; |
|
|
120 | #define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0) |
|
|
121 | #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE |
|
|
122 | #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE |
|
|
123 | #endif |
|
|
124 | |
|
|
125 | /*****************************************************************************/ |
|
|
126 | |
66 | #define ECB_C99 (__STDC_VERSION__ >= 199901L) |
127 | #define ECB_C99 (__STDC_VERSION__ >= 199901L) |
67 | |
128 | |
68 | #if __cplusplus |
129 | #if __cplusplus |
69 | #define ecb_inline static inline |
130 | #define ecb_inline static inline |
70 | #elif ECB_GCC_VERSION(2,5) |
131 | #elif ECB_GCC_VERSION(2,5) |
… | |
… | |
136 | #define ecb_likely(expr) ecb_expect_true (expr) |
197 | #define ecb_likely(expr) ecb_expect_true (expr) |
137 | #define ecb_unlikely(expr) ecb_expect_false (expr) |
198 | #define ecb_unlikely(expr) ecb_expect_false (expr) |
138 | |
199 | |
139 | /* count trailing zero bits and count # of one bits */ |
200 | /* count trailing zero bits and count # of one bits */ |
140 | #if ECB_GCC_VERSION(3,4) |
201 | #if ECB_GCC_VERSION(3,4) |
|
|
202 | /* we assume int == 32 bit, long == 32 or 64 bit and long long == 64 bit */ |
|
|
203 | #define ecb_ld32(x) (__builtin_clz (x) ^ 31) |
|
|
204 | #define ecb_ld64(x) (__builtin_clzll (x) ^ 63) |
141 | #define ecb_ctz32(x) __builtin_ctz (x) |
205 | #define ecb_ctz32(x) __builtin_ctz (x) |
|
|
206 | #define ecb_ctz64(x) __builtin_ctzll (x) |
142 | #define ecb_popcount32(x) __builtin_popcount (x) |
207 | #define ecb_popcount32(x) __builtin_popcount (x) |
|
|
208 | /* no popcountll */ |
143 | #else |
209 | #else |
144 | ecb_function_ int ecb_ctz32 (uint32_t x) ecb_const; |
210 | ecb_function_ int ecb_ctz32 (uint32_t x) ecb_const; |
145 | ecb_function_ int |
211 | ecb_function_ int |
146 | ecb_ctz32 (uint32_t x) |
212 | ecb_ctz32 (uint32_t x) |
147 | { |
213 | { |
148 | int r = 0; |
214 | int r = 0; |
149 | |
215 | |
150 | x &= ~x + 1; /* this isolates the lowest bit */ |
216 | x &= ~x + 1; /* this isolates the lowest bit */ |
151 | |
217 | |
|
|
218 | #if ECB_branchless_on_i386 |
|
|
219 | r += !!(x & 0xaaaaaaaa) << 0; |
|
|
220 | r += !!(x & 0xcccccccc) << 1; |
|
|
221 | r += !!(x & 0xf0f0f0f0) << 2; |
|
|
222 | r += !!(x & 0xff00ff00) << 3; |
|
|
223 | r += !!(x & 0xffff0000) << 4; |
|
|
224 | #else |
152 | if (x & 0xaaaaaaaa) r += 1; |
225 | if (x & 0xaaaaaaaa) r += 1; |
153 | if (x & 0xcccccccc) r += 2; |
226 | if (x & 0xcccccccc) r += 2; |
154 | if (x & 0xf0f0f0f0) r += 4; |
227 | if (x & 0xf0f0f0f0) r += 4; |
155 | if (x & 0xff00ff00) r += 8; |
228 | if (x & 0xff00ff00) r += 8; |
156 | if (x & 0xffff0000) r += 16; |
229 | if (x & 0xffff0000) r += 16; |
|
|
230 | #endif |
157 | |
231 | |
158 | return r; |
232 | return r; |
|
|
233 | } |
|
|
234 | |
|
|
235 | ecb_function_ int ecb_ctz64 (uint64_t x) ecb_const; |
|
|
236 | ecb_function_ int |
|
|
237 | ecb_ctz64 (uint64_t x) |
|
|
238 | { |
|
|
239 | int shift = x & 0xffffffffU ? 0 : 32; |
|
|
240 | return ecb_ctz32 (x >> shift) + shift; |
159 | } |
241 | } |
160 | |
242 | |
161 | ecb_function_ int ecb_popcount32 (uint32_t x) ecb_const; |
243 | ecb_function_ int ecb_popcount32 (uint32_t x) ecb_const; |
162 | ecb_function_ int |
244 | ecb_function_ int |
163 | ecb_popcount32 (uint32_t x) |
245 | ecb_popcount32 (uint32_t x) |
… | |
… | |
167 | x = ((x >> 4) + x) & 0x0f0f0f0f; |
249 | x = ((x >> 4) + x) & 0x0f0f0f0f; |
168 | x *= 0x01010101; |
250 | x *= 0x01010101; |
169 | |
251 | |
170 | return x >> 24; |
252 | return x >> 24; |
171 | } |
253 | } |
|
|
254 | |
|
|
255 | ecb_function_ int ecb_ld32 (uint32_t x) ecb_const; |
|
|
256 | ecb_function_ int ecb_ld32 (uint32_t x) |
|
|
257 | { |
|
|
258 | int r = 0; |
|
|
259 | |
|
|
260 | if (x >> 16) { x >>= 16; r += 16; } |
|
|
261 | if (x >> 8) { x >>= 8; r += 8; } |
|
|
262 | if (x >> 4) { x >>= 4; r += 4; } |
|
|
263 | if (x >> 2) { x >>= 2; r += 2; } |
|
|
264 | if (x >> 1) { r += 1; } |
|
|
265 | |
|
|
266 | return r; |
|
|
267 | } |
|
|
268 | |
|
|
269 | ecb_function_ int ecb_ld64 (uint64_t x) ecb_const; |
|
|
270 | ecb_function_ int ecb_ld64 (uint64_t x) |
|
|
271 | { |
|
|
272 | int r = 0; |
|
|
273 | |
|
|
274 | if (x >> 32) { x >>= 32; r += 32; } |
|
|
275 | |
|
|
276 | return r + ecb_ld32 (x); |
|
|
277 | } |
172 | #endif |
278 | #endif |
|
|
279 | |
|
|
280 | /* popcount64 is only available on 64 bit cpus as gcc builtin */ |
|
|
281 | /* so for this version we are lazy */ |
|
|
282 | ecb_function_ int ecb_popcount64 (uint64_t x) ecb_const; |
|
|
283 | ecb_function_ int |
|
|
284 | ecb_popcount64 (uint64_t x) |
|
|
285 | { |
|
|
286 | return ecb_popcount32 (x) + ecb_popcount32 (x >> 32); |
|
|
287 | } |
|
|
288 | |
|
|
289 | ecb_inline uint8_t ecb_rotl8 (uint8_t x, unsigned int count) ecb_const; |
|
|
290 | ecb_inline uint8_t ecb_rotr8 (uint8_t x, unsigned int count) ecb_const; |
|
|
291 | ecb_inline uint16_t ecb_rotl16 (uint16_t x, unsigned int count) ecb_const; |
|
|
292 | ecb_inline uint16_t ecb_rotr16 (uint16_t x, unsigned int count) ecb_const; |
|
|
293 | ecb_inline uint32_t ecb_rotl32 (uint32_t x, unsigned int count) ecb_const; |
|
|
294 | ecb_inline uint32_t ecb_rotr32 (uint32_t x, unsigned int count) ecb_const; |
|
|
295 | ecb_inline uint64_t ecb_rotl64 (uint64_t x, unsigned int count) ecb_const; |
|
|
296 | ecb_inline uint64_t ecb_rotr64 (uint64_t x, unsigned int count) ecb_const; |
|
|
297 | |
|
|
298 | ecb_inline uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> ( 8 - count)) | (x << count); } |
|
|
299 | ecb_inline uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << ( 8 - count)) | (x >> count); } |
|
|
300 | ecb_inline uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (16 - count)) | (x << count); } |
|
|
301 | ecb_inline uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (16 - count)) | (x >> count); } |
|
|
302 | ecb_inline uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); } |
|
|
303 | ecb_inline uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); } |
|
|
304 | ecb_inline uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); } |
|
|
305 | ecb_inline uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); } |
173 | |
306 | |
174 | #if ECB_GCC_VERSION(4,3) |
307 | #if ECB_GCC_VERSION(4,3) |
175 | #define ecb_bswap32(x) __builtin_bswap32 (x) |
|
|
176 | #define ecb_bswap16(x) (__builtin_bswap32(x) >> 16) |
308 | #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16) |
|
|
309 | #define ecb_bswap32(x) __builtin_bswap32 (x) |
|
|
310 | #define ecb_bswap64(x) __builtin_bswap64 (x) |
177 | #else |
311 | #else |
|
|
312 | ecb_function_ uint16_t ecb_bswap16 (uint16_t x) ecb_const; |
|
|
313 | ecb_function_ uint16_t |
|
|
314 | ecb_bswap16 (uint16_t x) |
|
|
315 | { |
|
|
316 | return ecb_rotl16 (x, 8); |
|
|
317 | } |
|
|
318 | |
178 | ecb_function_ uint32_t ecb_bswap32 (uint32_t x) ecb_const; |
319 | ecb_function_ uint32_t ecb_bswap32 (uint32_t x) ecb_const; |
179 | ecb_function_ uint32_t |
320 | ecb_function_ uint32_t |
180 | ecb_bswap32 (uint32_t x) |
321 | ecb_bswap32 (uint32_t x) |
181 | { |
322 | { |
182 | return (x >> 24) |
323 | return (((uint32_t)ecb_bswap16 (x)) << 16) | ecb_bswap16 (x >> 16); |
183 | | ((x >> 8) & 0x0000ff00) |
|
|
184 | | ((x << 8) & 0x00ff0000) |
|
|
185 | | (x << 24); |
|
|
186 | } |
324 | } |
187 | |
325 | |
188 | ecb_function_ uint32_t ecb_bswap16 (uint32_t x) ecb_const; |
326 | ecb_function_ uint64_t ecb_bswap64 (uint64_t x) ecb_const; |
189 | ecb_function_ uint32_t |
327 | ecb_function_ uint64_t |
190 | ecb_bswap16 (uint32_t x) |
328 | ecb_bswap64 (uint64_t x) |
191 | { |
329 | { |
192 | return ((x >> 8) & 0xff) |
330 | return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32); |
193 | | ((x << 8) & 0x00ff0000) |
|
|
194 | | (x << 24); |
|
|
195 | } |
331 | } |
196 | #endif |
332 | #endif |
197 | |
333 | |
198 | #if ECB_GCC_VERSION(4,5) |
334 | #if ECB_GCC_VERSION(4,5) |
199 | #define ecb_unreachable() __builtin_unreachable () |
335 | #define ecb_unreachable() __builtin_unreachable () |
… | |
… | |
234 | } |
370 | } |
235 | #else |
371 | #else |
236 | #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) |
372 | #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) |
237 | #endif |
373 | #endif |
238 | |
374 | |
239 | ecb_inline uint32_t ecb_rotr32 (uint32_t x, unsigned int count) ecb_const; |
|
|
240 | ecb_inline uint32_t |
|
|
241 | ecb_rotr32 (uint32_t x, unsigned int count) |
|
|
242 | { |
|
|
243 | return (x << (32 - count)) | (x >> count); |
|
|
244 | } |
|
|
245 | |
|
|
246 | ecb_inline uint32_t ecb_rotl32 (uint32_t x, unsigned int count) ecb_const; |
|
|
247 | ecb_inline uint32_t |
|
|
248 | ecb_rotl32 (uint32_t x, unsigned int count) |
|
|
249 | { |
|
|
250 | return (x >> (32 - count)) | (x << count); |
|
|
251 | } |
|
|
252 | |
|
|
253 | #endif |
375 | #endif |
254 | |
376 | |