… | |
… | |
38 | typedef signed int int32_t; |
38 | typedef signed int int32_t; |
39 | typedef unsigned int uint32_t; |
39 | typedef unsigned int uint32_t; |
40 | #if __GNUC__ |
40 | #if __GNUC__ |
41 | typedef signed long long int64_t; |
41 | typedef signed long long int64_t; |
42 | typedef unsigned long long uint64_t; |
42 | typedef unsigned long long uint64_t; |
43 | #else |
43 | #else /* _MSC_VER || __BORLANDC__ */ |
44 | typedef signed __int64 int64_t; |
44 | typedef signed __int64 int64_t; |
45 | typedef unsigned __int64 uint64_t; |
45 | typedef unsigned __int64 uint64_t; |
46 | #endif |
46 | #endif |
47 | #else |
47 | #else |
48 | #include <inttypes.h> |
48 | #include <inttypes.h> |
49 | #endif |
49 | #endif |
50 | |
50 | |
51 | /* many compilers define _GNUC_ to some versions but then only implement |
51 | /* many compilers define _GNUC_ to some versions but then only implement |
52 | * what their idiot authors think are the "more important" extensions, |
52 | * what their idiot authors think are the "more important" extensions, |
53 | * causing enourmous grief in return for some better fake benchmark numbers. |
53 | * causing enormous grief in return for some better fake benchmark numbers. |
54 | * or so. |
54 | * or so. |
55 | * we try to detect these and simply assume they are not gcc - if they have |
55 | * we try to detect these and simply assume they are not gcc - if they have |
56 | * an issue with that they should have done it right in the first place. |
56 | * an issue with that they should have done it right in the first place. |
57 | */ |
57 | */ |
58 | #ifndef ECB_GCC_VERSION |
58 | #ifndef ECB_GCC_VERSION |
… | |
… | |
61 | #else |
61 | #else |
62 | #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) |
62 | #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) |
63 | #endif |
63 | #endif |
64 | #endif |
64 | #endif |
65 | |
65 | |
|
|
66 | /*****************************************************************************/ |
|
|
67 | |
|
|
68 | /* ECB_NO_THREADS - ecb is not used by multiple threads, ever */ |
|
|
69 | /* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */ |
|
|
70 | |
|
|
71 | #if ECB_NO_THREADS || ECB_NO_SMP |
|
|
72 | #define ECB_MEMORY_FENCE do { } while (0) |
|
|
73 | #define ECB_MEMORY_FENCE_ACQUIRE do { } while (0) |
|
|
74 | #define ECB_MEMORY_FENCE_RELEASE do { } while (0) |
|
|
75 | #endif |
|
|
76 | |
|
|
77 | #ifndef ECB_MEMORY_FENCE |
|
|
78 | #if ECB_GCC_VERSION(2,5) |
|
|
79 | #if __x86 |
|
|
80 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") |
|
|
81 | #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE /* non-lock xchg might be enough */ |
|
|
82 | #define ECB_MEMORY_FENCE_RELEASE do { } while (0) /* unlikely to change in future cpus */ |
|
|
83 | #elif __amd64 |
|
|
84 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") |
|
|
85 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("lfence" : : : "memory") |
|
|
86 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("sfence") /* play safe - not needed in any current cpu */ |
|
|
87 | #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ |
|
|
88 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") |
|
|
89 | #elif defined(__ARM_ARCH_6__ ) || defined(__ARM_ARCH_6J__ ) \ |
|
|
90 | || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6ZK__) \ |
|
|
91 | || defined(__ARM_ARCH_7__ ) || defined(__ARM_ARCH_7A__ ) \ |
|
|
92 | || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7R__ ) |
|
|
93 | #define ECB_MEMORY_FENCE \ |
|
|
94 | do { \ |
|
|
95 | int null = 0; \ |
|
|
96 | __asm__ __volatile__ ("mcr p15,0,%0,c6,c10,5", : "=&r" (null) : : "memory"); \ |
|
|
97 | while (0) |
|
|
98 | #endif |
|
|
99 | #endif |
|
|
100 | #endif |
|
|
101 | |
|
|
102 | #ifndef ECB_MEMORY_FENCE |
|
|
103 | #if ECB_GCC_VERSION(4,4) || defined(__INTEL_COMPILER) |
|
|
104 | #define ECB_MEMORY_FENCE __sync_synchronize () |
|
|
105 | /*#define ECB_MEMORY_FENCE_ACQUIRE ({ char dummy = 0; __sync_lock_test_and_set (&dummy, 1); }) */ |
|
|
106 | /*#define ECB_MEMORY_FENCE_RELEASE ({ char dummy = 1; __sync_lock_release (&dummy ); }) */ |
|
|
107 | #elif _MSC_VER >= 1400 /* VC++ 2005 */ |
|
|
108 | #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) |
|
|
109 | #define ECB_MEMORY_FENCE _ReadWriteBarrier () |
|
|
110 | #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */ |
|
|
111 | #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier () |
|
|
112 | #elif defined(_WIN32) |
|
|
113 | #include <WinNT.h> |
|
|
114 | #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */ |
|
|
115 | #endif |
|
|
116 | #endif |
|
|
117 | |
|
|
118 | #ifndef ECB_MEMORY_FENCE |
|
|
119 | #if !ECB_AVOID_PTHREADS |
|
|
120 | /* |
|
|
121 | * if you get undefined symbol references to pthread_mutex_lock, |
|
|
122 | * or failure to find pthread.h, then you should implement |
|
|
123 | * the ECB_MEMORY_FENCE operations for your cpu/compiler |
|
|
124 | * OR provide pthread.h and link against the posix thread library |
|
|
125 | * of your system. |
|
|
126 | */ |
|
|
127 | #include <pthread.h> |
|
|
128 | #define ECB_NEEDS_PTHREADS 1 |
|
|
129 | #define ECB_MEMORY_FENCE_NEEDS_PTHREADS 1 |
|
|
130 | |
|
|
131 | static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER; |
|
|
132 | #define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0) |
|
|
133 | #endif |
|
|
134 | #endif |
|
|
135 | |
|
|
136 | #if !defined(ECB_MEMORY_FENCE_ACQUIRE) && defined(ECB_MEMORY_FENCE) |
|
|
137 | #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE |
|
|
138 | #endif |
|
|
139 | |
|
|
140 | #if !defined(ECB_MEMORY_FENCE_RELEASE) && defined(ECB_MEMORY_FENCE) |
|
|
141 | #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE |
|
|
142 | #endif |
|
|
143 | |
|
|
144 | /*****************************************************************************/ |
|
|
145 | |
66 | #define ECB_C99 (__STDC_VERSION__ >= 199901L) |
146 | #define ECB_C99 (__STDC_VERSION__ >= 199901L) |
67 | |
147 | |
68 | #if __cplusplus |
148 | #if __cplusplus |
69 | #define ecb_inline static inline |
149 | #define ecb_inline static inline |
70 | #elif ECB_GCC_VERSION(2,5) |
150 | #elif ECB_GCC_VERSION(2,5) |
… | |
… | |
152 | { |
232 | { |
153 | int r = 0; |
233 | int r = 0; |
154 | |
234 | |
155 | x &= ~x + 1; /* this isolates the lowest bit */ |
235 | x &= ~x + 1; /* this isolates the lowest bit */ |
156 | |
236 | |
|
|
237 | #if ECB_branchless_on_i386 |
|
|
238 | r += !!(x & 0xaaaaaaaa) << 0; |
|
|
239 | r += !!(x & 0xcccccccc) << 1; |
|
|
240 | r += !!(x & 0xf0f0f0f0) << 2; |
|
|
241 | r += !!(x & 0xff00ff00) << 3; |
|
|
242 | r += !!(x & 0xffff0000) << 4; |
|
|
243 | #else |
157 | if (x & 0xaaaaaaaa) r += 1; |
244 | if (x & 0xaaaaaaaa) r += 1; |
158 | if (x & 0xcccccccc) r += 2; |
245 | if (x & 0xcccccccc) r += 2; |
159 | if (x & 0xf0f0f0f0) r += 4; |
246 | if (x & 0xf0f0f0f0) r += 4; |
160 | if (x & 0xff00ff00) r += 8; |
247 | if (x & 0xff00ff00) r += 8; |
161 | if (x & 0xffff0000) r += 16; |
248 | if (x & 0xffff0000) r += 16; |
|
|
249 | #endif |
162 | |
250 | |
163 | return r; |
251 | return r; |
164 | } |
252 | } |
165 | |
253 | |
166 | ecb_function_ int ecb_ctz64 (uint64_t x) ecb_const; |
254 | ecb_function_ int ecb_ctz64 (uint64_t x) ecb_const; |
167 | ecb_function_ int |
255 | ecb_function_ int |
168 | ecb_ctz64 (uint64_t x) |
256 | ecb_ctz64 (uint64_t x) |
169 | { |
257 | { |
170 | int shift = x & 0xffffffffU ? 0 : 32; |
258 | int shift = x & 0xffffffffU ? 0 : 32; |
171 | return ecb_ctz (x >> shift) + shift; |
259 | return ecb_ctz32 (x >> shift) + shift; |
172 | } |
260 | } |
173 | |
261 | |
174 | ecb_function_ int ecb_popcount32 (uint32_t x) ecb_const; |
262 | ecb_function_ int ecb_popcount32 (uint32_t x) ecb_const; |
175 | ecb_function_ int |
263 | ecb_function_ int |
176 | ecb_popcount32 (uint32_t x) |
264 | ecb_popcount32 (uint32_t x) |
… | |
… | |
181 | x *= 0x01010101; |
269 | x *= 0x01010101; |
182 | |
270 | |
183 | return x >> 24; |
271 | return x >> 24; |
184 | } |
272 | } |
185 | |
273 | |
186 | /* you have the choice beetween something with a table lookup, */ |
|
|
187 | /* something using lots of bit arithmetic and a simple loop */ |
|
|
188 | /* we went for the loop */ |
|
|
189 | ecb_function_ int ecb_ld32 (uint32_t x) ecb_const; |
274 | ecb_function_ int ecb_ld32 (uint32_t x) ecb_const; |
190 | ecb_function_ int ecb_ld32 (uint32_t x) |
275 | ecb_function_ int ecb_ld32 (uint32_t x) |
191 | { |
276 | { |
192 | int r = -1; |
277 | int r = 0; |
193 | |
278 | |
194 | do |
279 | if (x >> 16) { x >>= 16; r += 16; } |
195 | { |
280 | if (x >> 8) { x >>= 8; r += 8; } |
196 | x >>= 1; |
281 | if (x >> 4) { x >>= 4; r += 4; } |
197 | ++r; |
282 | if (x >> 2) { x >>= 2; r += 2; } |
198 | } |
283 | if (x >> 1) { r += 1; } |
199 | while (x); |
|
|
200 | |
284 | |
201 | return r; |
285 | return r; |
202 | } |
286 | } |
203 | |
287 | |
204 | ecb_function_ int ecb_ld64 (uint64_t x) ecb_const; |
288 | ecb_function_ int ecb_ld64 (uint64_t x) ecb_const; |
205 | ecb_function_ int ecb_ld64 (uint64_t x) |
289 | ecb_function_ int ecb_ld64 (uint64_t x) |
206 | { |
290 | { |
207 | int r = -1; |
291 | int r = 0; |
208 | |
292 | |
209 | do |
293 | if (x >> 32) { x >>= 32; r += 32; } |
210 | { |
|
|
211 | x >>= 1; |
|
|
212 | ++r; |
|
|
213 | } |
|
|
214 | while (x); |
|
|
215 | |
294 | |
216 | return r; |
295 | return r + ecb_ld32 (x); |
217 | } |
296 | } |
218 | #endif |
297 | #endif |
219 | |
298 | |
220 | /* popcount64 is only available on 64 bit cpus as gcc builtin */ |
299 | /* popcount64 is only available on 64 bit cpus as gcc builtin */ |
221 | /* so for this version we are lazy */ |
300 | /* so for this version we are lazy */ |
… | |
… | |
224 | ecb_popcount64 (uint64_t x) |
303 | ecb_popcount64 (uint64_t x) |
225 | { |
304 | { |
226 | return ecb_popcount32 (x) + ecb_popcount32 (x >> 32); |
305 | return ecb_popcount32 (x) + ecb_popcount32 (x >> 32); |
227 | } |
306 | } |
228 | |
307 | |
|
|
308 | ecb_inline uint8_t ecb_rotl8 (uint8_t x, unsigned int count) ecb_const; |
|
|
309 | ecb_inline uint8_t ecb_rotr8 (uint8_t x, unsigned int count) ecb_const; |
|
|
310 | ecb_inline uint16_t ecb_rotl16 (uint16_t x, unsigned int count) ecb_const; |
|
|
311 | ecb_inline uint16_t ecb_rotr16 (uint16_t x, unsigned int count) ecb_const; |
|
|
312 | ecb_inline uint32_t ecb_rotl32 (uint32_t x, unsigned int count) ecb_const; |
|
|
313 | ecb_inline uint32_t ecb_rotr32 (uint32_t x, unsigned int count) ecb_const; |
|
|
314 | ecb_inline uint64_t ecb_rotl64 (uint64_t x, unsigned int count) ecb_const; |
|
|
315 | ecb_inline uint64_t ecb_rotr64 (uint64_t x, unsigned int count) ecb_const; |
|
|
316 | |
|
|
317 | ecb_inline uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> ( 8 - count)) | (x << count); } |
|
|
318 | ecb_inline uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << ( 8 - count)) | (x >> count); } |
|
|
319 | ecb_inline uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (16 - count)) | (x << count); } |
|
|
320 | ecb_inline uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (16 - count)) | (x >> count); } |
|
|
321 | ecb_inline uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); } |
|
|
322 | ecb_inline uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); } |
|
|
323 | ecb_inline uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); } |
|
|
324 | ecb_inline uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); } |
|
|
325 | |
229 | #if ECB_GCC_VERSION(4,3) |
326 | #if ECB_GCC_VERSION(4,3) |
230 | #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16) |
327 | #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16) |
231 | #define ecb_bswap32(x) __builtin_bswap32 (x) |
328 | #define ecb_bswap32(x) __builtin_bswap32 (x) |
232 | #define ecb_bswap64(x) __builtin_bswap64 (x) |
329 | #define ecb_bswap64(x) __builtin_bswap64 (x) |
233 | #else |
330 | #else |
234 | ecb_function_ uint32_t ecb_bswap16 (uint32_t x) ecb_const; |
331 | ecb_function_ uint16_t ecb_bswap16 (uint16_t x) ecb_const; |
235 | ecb_function_ uint32_t |
332 | ecb_function_ uint16_t |
236 | ecb_bswap16 (uint32_t x) |
333 | ecb_bswap16 (uint16_t x) |
237 | { |
334 | { |
238 | return ((x >> 8) & 0xff) |
335 | return ecb_rotl16 (x, 8); |
239 | | ((x << 8) & 0x00ff0000) |
|
|
240 | | (x << 24); |
|
|
241 | } |
336 | } |
242 | |
337 | |
243 | ecb_function_ uint32_t ecb_bswap32 (uint32_t x) ecb_const; |
338 | ecb_function_ uint32_t ecb_bswap32 (uint32_t x) ecb_const; |
244 | ecb_function_ uint32_t |
339 | ecb_function_ uint32_t |
245 | ecb_bswap32 (uint32_t x) |
340 | ecb_bswap32 (uint32_t x) |
246 | { |
341 | { |
247 | return (x >> 24) |
342 | return (((uint32_t)ecb_bswap16 (x)) << 16) | ecb_bswap16 (x >> 16); |
248 | | ((x >> 8) & 0x0000ff00) |
|
|
249 | | ((x << 8) & 0x00ff0000) |
|
|
250 | | (x << 24); |
|
|
251 | } |
343 | } |
252 | |
344 | |
253 | ecb_function_ uint64_t ecb_bswap64 (uint64_t x) ecb_const; |
345 | ecb_function_ uint64_t ecb_bswap64 (uint64_t x) ecb_const; |
254 | ecb_function_ uint64_t |
346 | ecb_function_ uint64_t |
255 | ecb_bswap64 (uint64_t x) |
347 | ecb_bswap64 (uint64_t x) |
256 | { |
348 | { |
257 | return (((uint64_t)ecb_bswap32 (x)) << 32) |
349 | return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32); |
258 | | ecb_bswap32 (x >> 32); |
|
|
259 | } |
350 | } |
260 | #endif |
351 | #endif |
261 | |
352 | |
262 | #if ECB_GCC_VERSION(4,5) |
353 | #if ECB_GCC_VERSION(4,5) |
263 | #define ecb_unreachable() __builtin_unreachable () |
354 | #define ecb_unreachable() __builtin_unreachable () |
… | |
… | |
298 | } |
389 | } |
299 | #else |
390 | #else |
300 | #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) |
391 | #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) |
301 | #endif |
392 | #endif |
302 | |
393 | |
303 | ecb_inline uint32_t ecb_rotr32 (uint32_t x, unsigned int count) ecb_const; |
|
|
304 | ecb_inline uint32_t |
|
|
305 | ecb_rotr32 (uint32_t x, unsigned int count) |
|
|
306 | { |
|
|
307 | return (x << (32 - count)) | (x >> count); |
|
|
308 | } |
|
|
309 | |
|
|
310 | ecb_inline uint32_t ecb_rotl32 (uint32_t x, unsigned int count) ecb_const; |
|
|
311 | ecb_inline uint32_t |
|
|
312 | ecb_rotl32 (uint32_t x, unsigned int count) |
|
|
313 | { |
|
|
314 | return (x >> (32 - count)) | (x << count); |
|
|
315 | } |
|
|
316 | |
|
|
317 | ecb_inline uint64_t ecb_rotr64 (uint64_t x, unsigned int count) ecb_const; |
|
|
318 | ecb_inline uint64_t |
|
|
319 | ecb_rotr64 (uint64_t x, unsigned int count) |
|
|
320 | { |
|
|
321 | return (x << (64 - count)) | (x >> count); |
|
|
322 | } |
|
|
323 | |
|
|
324 | ecb_inline uint64_t ecb_rotl64 (uint64_t x, unsigned int count) ecb_const; |
|
|
325 | ecb_inline uint64_t |
|
|
326 | ecb_rotl64 (uint64_t x, unsigned int count) |
|
|
327 | { |
|
|
328 | return (x >> (64 - count)) | (x << count); |
|
|
329 | } |
|
|
330 | |
|
|
331 | #endif |
394 | #endif |
332 | |
395 | |