… | |
… | |
40 | |
40 | |
41 | #ifndef ECB_H |
41 | #ifndef ECB_H |
42 | #define ECB_H |
42 | #define ECB_H |
43 | |
43 | |
44 | /* 16 bits major, 16 bits minor */ |
44 | /* 16 bits major, 16 bits minor */ |
45 | #define ECB_VERSION 0x00010005 |
45 | #define ECB_VERSION 0x00010007 |
46 | |
46 | |
47 | #ifdef _WIN32 |
47 | #ifdef _WIN32 |
48 | typedef signed char int8_t; |
48 | typedef signed char int8_t; |
49 | typedef unsigned char uint8_t; |
49 | typedef unsigned char uint8_t; |
50 | typedef signed short int16_t; |
50 | typedef signed short int16_t; |
… | |
… | |
67 | typedef uint32_t uintptr_t; |
67 | typedef uint32_t uintptr_t; |
68 | typedef int32_t intptr_t; |
68 | typedef int32_t intptr_t; |
69 | #endif |
69 | #endif |
70 | #else |
70 | #else |
71 | #include <inttypes.h> |
71 | #include <inttypes.h> |
72 | #if UINTMAX_MAX > 0xffffffffU |
72 | #if (defined INTPTR_MAX ? INTPTR_MAX : ULONG_MAX) > 0xffffffffU |
73 | #define ECB_PTRSIZE 8 |
73 | #define ECB_PTRSIZE 8 |
74 | #else |
74 | #else |
75 | #define ECB_PTRSIZE 4 |
75 | #define ECB_PTRSIZE 4 |
76 | #endif |
76 | #endif |
77 | #endif |
77 | #endif |
78 | |
78 | |
79 | #define ECB_GCC_AMD64 (__amd64 || __amd64__ || __x86_64 || __x86_64__) |
79 | #define ECB_GCC_AMD64 (__amd64 || __amd64__ || __x86_64 || __x86_64__) |
80 | #define ECB_MSVC_AMD64 (_M_AMD64 || _M_X64) |
80 | #define ECB_MSVC_AMD64 (_M_AMD64 || _M_X64) |
|
|
81 | |
|
|
82 | #ifndef ECB_OPTIMIZE_SIZE |
|
|
83 | #if __OPTIMIZE_SIZE__ |
|
|
84 | #define ECB_OPTIMIZE_SIZE 1 |
|
|
85 | #else |
|
|
86 | #define ECB_OPTIMIZE_SIZE 0 |
|
|
87 | #endif |
|
|
88 | #endif |
81 | |
89 | |
82 | /* work around x32 idiocy by defining proper macros */ |
90 | /* work around x32 idiocy by defining proper macros */ |
83 | #if ECB_GCC_AMD64 || ECB_MSVC_AMD64 |
91 | #if ECB_GCC_AMD64 || ECB_MSVC_AMD64 |
84 | #if _ILP32 |
92 | #if _ILP32 |
85 | #define ECB_AMD64_X32 1 |
93 | #define ECB_AMD64_X32 1 |
… | |
… | |
115 | #define ECB_CLANG_EXTENSION(x) 0 |
123 | #define ECB_CLANG_EXTENSION(x) 0 |
116 | #endif |
124 | #endif |
117 | |
125 | |
118 | #define ECB_CPP (__cplusplus+0) |
126 | #define ECB_CPP (__cplusplus+0) |
119 | #define ECB_CPP11 (__cplusplus >= 201103L) |
127 | #define ECB_CPP11 (__cplusplus >= 201103L) |
|
|
128 | #define ECB_CPP14 (__cplusplus >= 201402L) |
|
|
129 | #define ECB_CPP17 (__cplusplus >= 201703L) |
120 | |
130 | |
121 | #if ECB_CPP |
131 | #if ECB_CPP |
122 | #define ECB_C 0 |
132 | #define ECB_C 0 |
123 | #define ECB_STDC_VERSION 0 |
133 | #define ECB_STDC_VERSION 0 |
124 | #else |
134 | #else |
… | |
… | |
126 | #define ECB_STDC_VERSION __STDC_VERSION__ |
136 | #define ECB_STDC_VERSION __STDC_VERSION__ |
127 | #endif |
137 | #endif |
128 | |
138 | |
129 | #define ECB_C99 (ECB_STDC_VERSION >= 199901L) |
139 | #define ECB_C99 (ECB_STDC_VERSION >= 199901L) |
130 | #define ECB_C11 (ECB_STDC_VERSION >= 201112L) |
140 | #define ECB_C11 (ECB_STDC_VERSION >= 201112L) |
|
|
141 | #define ECB_C17 (ECB_STDC_VERSION >= 201710L) |
131 | |
142 | |
132 | #if ECB_CPP |
143 | #if ECB_CPP |
133 | #define ECB_EXTERN_C extern "C" |
144 | #define ECB_EXTERN_C extern "C" |
134 | #define ECB_EXTERN_C_BEG ECB_EXTERN_C { |
145 | #define ECB_EXTERN_C_BEG ECB_EXTERN_C { |
135 | #define ECB_EXTERN_C_END } |
146 | #define ECB_EXTERN_C_END } |
… | |
… | |
155 | /* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/compiler_ref/compiler_builtins.html */ |
166 | /* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/compiler_ref/compiler_builtins.html */ |
156 | #if __xlC__ && ECB_CPP |
167 | #if __xlC__ && ECB_CPP |
157 | #include <builtins.h> |
168 | #include <builtins.h> |
158 | #endif |
169 | #endif |
159 | |
170 | |
|
|
171 | #if 1400 <= _MSC_VER |
|
|
172 | #include <intrin.h> /* fence functions _ReadBarrier, also bit search functions _BitScanReverse */ |
|
|
173 | #endif |
|
|
174 | |
160 | #ifndef ECB_MEMORY_FENCE |
175 | #ifndef ECB_MEMORY_FENCE |
161 | #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 |
176 | #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 |
|
|
177 | #define ECB_MEMORY_FENCE_RELAXED __asm__ __volatile__ ("" : : : "memory") |
162 | #if __i386 || __i386__ |
178 | #if __i386 || __i386__ |
163 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") |
179 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") |
164 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") |
180 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") |
165 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") |
181 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory") |
166 | #elif ECB_GCC_AMD64 |
182 | #elif ECB_GCC_AMD64 |
167 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") |
183 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") |
168 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") |
184 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") |
169 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") |
185 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory") |
170 | #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ |
186 | #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ |
171 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") |
187 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") |
|
|
188 | #elif defined __ARM_ARCH_2__ \ |
|
|
189 | || defined __ARM_ARCH_3__ || defined __ARM_ARCH_3M__ \ |
|
|
190 | || defined __ARM_ARCH_4__ || defined __ARM_ARCH_4T__ \ |
|
|
191 | || defined __ARM_ARCH_5__ || defined __ARM_ARCH_5E__ \ |
|
|
192 | || defined __ARM_ARCH_5T__ || defined __ARM_ARCH_5TE__ \ |
|
|
193 | || defined __ARM_ARCH_5TEJ__ |
|
|
194 | /* should not need any, unless running old code on newer cpu - arm doesn't support that */ |
172 | #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \ |
195 | #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \ |
173 | || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ |
196 | || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ \ |
|
|
197 | || defined __ARM_ARCH_6T2__ |
174 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") |
198 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") |
175 | #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ |
199 | #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ |
176 | || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__ |
200 | || defined __ARM_ARCH_7R__ || defined __ARM_ARCH_7M__ |
177 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") |
201 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") |
178 | #elif __aarch64__ |
202 | #elif __aarch64__ |
179 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory") |
203 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory") |
180 | #elif (__sparc || __sparc__) && !(__sparc_v8__ || defined __sparcv8) |
204 | #elif (__sparc || __sparc__) && !(__sparc_v8__ || defined __sparcv8) |
181 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory") |
205 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory") |
… | |
… | |
208 | #if ECB_GCC_VERSION(4,7) |
232 | #if ECB_GCC_VERSION(4,7) |
209 | /* see comment below (stdatomic.h) about the C11 memory model. */ |
233 | /* see comment below (stdatomic.h) about the C11 memory model. */ |
210 | #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST) |
234 | #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST) |
211 | #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE) |
235 | #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE) |
212 | #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE) |
236 | #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE) |
|
|
237 | #define ECB_MEMORY_FENCE_RELAXED __atomic_thread_fence (__ATOMIC_RELAXED) |
213 | |
238 | |
214 | #elif ECB_CLANG_EXTENSION(c_atomic) |
239 | #elif ECB_CLANG_EXTENSION(c_atomic) |
215 | /* see comment below (stdatomic.h) about the C11 memory model. */ |
240 | /* see comment below (stdatomic.h) about the C11 memory model. */ |
216 | #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST) |
241 | #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST) |
217 | #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE) |
242 | #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE) |
218 | #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE) |
243 | #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE) |
|
|
244 | #define ECB_MEMORY_FENCE_RELAXED __c11_atomic_thread_fence (__ATOMIC_RELAXED) |
219 | |
245 | |
220 | #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ |
246 | #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ |
221 | #define ECB_MEMORY_FENCE __sync_synchronize () |
247 | #define ECB_MEMORY_FENCE __sync_synchronize () |
222 | #elif _MSC_VER >= 1500 /* VC++ 2008 */ |
248 | #elif _MSC_VER >= 1500 /* VC++ 2008 */ |
223 | /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */ |
249 | /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */ |
… | |
… | |
233 | #elif defined _WIN32 |
259 | #elif defined _WIN32 |
234 | #include <WinNT.h> |
260 | #include <WinNT.h> |
235 | #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */ |
261 | #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */ |
236 | #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 |
262 | #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 |
237 | #include <mbarrier.h> |
263 | #include <mbarrier.h> |
238 | #define ECB_MEMORY_FENCE __machine_rw_barrier () |
264 | #define ECB_MEMORY_FENCE __machine_rw_barrier () |
239 | #define ECB_MEMORY_FENCE_ACQUIRE __machine_r_barrier () |
265 | #define ECB_MEMORY_FENCE_ACQUIRE __machine_acq_barrier () |
240 | #define ECB_MEMORY_FENCE_RELEASE __machine_w_barrier () |
266 | #define ECB_MEMORY_FENCE_RELEASE __machine_rel_barrier () |
|
|
267 | #define ECB_MEMORY_FENCE_RELAXED __compiler_barrier () |
241 | #elif __xlC__ |
268 | #elif __xlC__ |
242 | #define ECB_MEMORY_FENCE __sync () |
269 | #define ECB_MEMORY_FENCE __sync () |
243 | #endif |
270 | #endif |
244 | #endif |
271 | #endif |
245 | |
272 | |
246 | #ifndef ECB_MEMORY_FENCE |
273 | #ifndef ECB_MEMORY_FENCE |
247 | #if ECB_C11 && !defined __STDC_NO_ATOMICS__ |
274 | #if ECB_C11 && !defined __STDC_NO_ATOMICS__ |
248 | /* we assume that these memory fences work on all variables/all memory accesses, */ |
275 | /* we assume that these memory fences work on all variables/all memory accesses, */ |
249 | /* not just C11 atomics and atomic accesses */ |
276 | /* not just C11 atomics and atomic accesses */ |
250 | #include <stdatomic.h> |
277 | #include <stdatomic.h> |
251 | /* Unfortunately, neither gcc 4.7 nor clang 3.1 generate any instructions for */ |
|
|
252 | /* any fence other than seq_cst, which isn't very efficient for us. */ |
|
|
253 | /* Why that is, we don't know - either the C11 memory model is quite useless */ |
|
|
254 | /* for most usages, or gcc and clang have a bug */ |
|
|
255 | /* I *currently* lean towards the latter, and inefficiently implement */ |
|
|
256 | /* all three of ecb's fences as a seq_cst fence */ |
|
|
257 | /* Update, gcc-4.8 generates mfence for all c++ fences, but nothing */ |
|
|
258 | /* for all __atomic_thread_fence's except seq_cst */ |
|
|
259 | #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst) |
278 | #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst) |
|
|
279 | #define ECB_MEMORY_FENCE_ACQUIRE atomic_thread_fence (memory_order_acquire) |
|
|
280 | #define ECB_MEMORY_FENCE_RELEASE atomic_thread_fence (memory_order_release) |
260 | #endif |
281 | #endif |
261 | #endif |
282 | #endif |
262 | |
283 | |
263 | #ifndef ECB_MEMORY_FENCE |
284 | #ifndef ECB_MEMORY_FENCE |
264 | #if !ECB_AVOID_PTHREADS |
285 | #if !ECB_AVOID_PTHREADS |
… | |
… | |
284 | |
305 | |
285 | #if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE |
306 | #if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE |
286 | #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE |
307 | #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE |
287 | #endif |
308 | #endif |
288 | |
309 | |
|
|
310 | #if !defined ECB_MEMORY_FENCE_RELAXED && defined ECB_MEMORY_FENCE |
|
|
311 | #define ECB_MEMORY_FENCE_RELAXED ECB_MEMORY_FENCE /* very heavy-handed */ |
|
|
312 | #endif |
|
|
313 | |
289 | /*****************************************************************************/ |
314 | /*****************************************************************************/ |
290 | |
315 | |
291 | #if ECB_CPP |
316 | #if ECB_CPP |
292 | #define ecb_inline static inline |
317 | #define ecb_inline static inline |
293 | #elif ECB_GCC_VERSION(2,5) |
318 | #elif ECB_GCC_VERSION(2,5) |
… | |
… | |
423 | #else |
448 | #else |
424 | ecb_function_ ecb_const int ecb_ctz32 (uint32_t x); |
449 | ecb_function_ ecb_const int ecb_ctz32 (uint32_t x); |
425 | ecb_function_ ecb_const int |
450 | ecb_function_ ecb_const int |
426 | ecb_ctz32 (uint32_t x) |
451 | ecb_ctz32 (uint32_t x) |
427 | { |
452 | { |
|
|
453 | #if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM) |
|
|
454 | unsigned long r; |
|
|
455 | _BitScanForward (&r, x); |
|
|
456 | return (int)r; |
|
|
457 | #else |
428 | int r = 0; |
458 | int r = 0; |
429 | |
459 | |
430 | x &= ~x + 1; /* this isolates the lowest bit */ |
460 | x &= ~x + 1; /* this isolates the lowest bit */ |
431 | |
461 | |
432 | #if ECB_branchless_on_i386 |
462 | #if ECB_branchless_on_i386 |
… | |
… | |
442 | if (x & 0xff00ff00) r += 8; |
472 | if (x & 0xff00ff00) r += 8; |
443 | if (x & 0xffff0000) r += 16; |
473 | if (x & 0xffff0000) r += 16; |
444 | #endif |
474 | #endif |
445 | |
475 | |
446 | return r; |
476 | return r; |
|
|
477 | #endif |
447 | } |
478 | } |
448 | |
479 | |
449 | ecb_function_ ecb_const int ecb_ctz64 (uint64_t x); |
480 | ecb_function_ ecb_const int ecb_ctz64 (uint64_t x); |
450 | ecb_function_ ecb_const int |
481 | ecb_function_ ecb_const int |
451 | ecb_ctz64 (uint64_t x) |
482 | ecb_ctz64 (uint64_t x) |
452 | { |
483 | { |
|
|
484 | #if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM) |
|
|
485 | unsigned long r; |
|
|
486 | _BitScanForward64 (&r, x); |
|
|
487 | return (int)r; |
|
|
488 | #else |
453 | int shift = x & 0xffffffff ? 0 : 32; |
489 | int shift = x & 0xffffffff ? 0 : 32; |
454 | return ecb_ctz32 (x >> shift) + shift; |
490 | return ecb_ctz32 (x >> shift) + shift; |
|
|
491 | #endif |
455 | } |
492 | } |
456 | |
493 | |
457 | ecb_function_ ecb_const int ecb_popcount32 (uint32_t x); |
494 | ecb_function_ ecb_const int ecb_popcount32 (uint32_t x); |
458 | ecb_function_ ecb_const int |
495 | ecb_function_ ecb_const int |
459 | ecb_popcount32 (uint32_t x) |
496 | ecb_popcount32 (uint32_t x) |
… | |
… | |
467 | } |
504 | } |
468 | |
505 | |
469 | ecb_function_ ecb_const int ecb_ld32 (uint32_t x); |
506 | ecb_function_ ecb_const int ecb_ld32 (uint32_t x); |
470 | ecb_function_ ecb_const int ecb_ld32 (uint32_t x) |
507 | ecb_function_ ecb_const int ecb_ld32 (uint32_t x) |
471 | { |
508 | { |
|
|
509 | #if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM) |
|
|
510 | unsigned long r; |
|
|
511 | _BitScanReverse (&r, x); |
|
|
512 | return (int)r; |
|
|
513 | #else |
472 | int r = 0; |
514 | int r = 0; |
473 | |
515 | |
474 | if (x >> 16) { x >>= 16; r += 16; } |
516 | if (x >> 16) { x >>= 16; r += 16; } |
475 | if (x >> 8) { x >>= 8; r += 8; } |
517 | if (x >> 8) { x >>= 8; r += 8; } |
476 | if (x >> 4) { x >>= 4; r += 4; } |
518 | if (x >> 4) { x >>= 4; r += 4; } |
477 | if (x >> 2) { x >>= 2; r += 2; } |
519 | if (x >> 2) { x >>= 2; r += 2; } |
478 | if (x >> 1) { r += 1; } |
520 | if (x >> 1) { r += 1; } |
479 | |
521 | |
480 | return r; |
522 | return r; |
|
|
523 | #endif |
481 | } |
524 | } |
482 | |
525 | |
483 | ecb_function_ ecb_const int ecb_ld64 (uint64_t x); |
526 | ecb_function_ ecb_const int ecb_ld64 (uint64_t x); |
484 | ecb_function_ ecb_const int ecb_ld64 (uint64_t x) |
527 | ecb_function_ ecb_const int ecb_ld64 (uint64_t x) |
485 | { |
528 | { |
|
|
529 | #if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM) |
|
|
530 | unsigned long r; |
|
|
531 | _BitScanReverse64 (&r, x); |
|
|
532 | return (int)r; |
|
|
533 | #else |
486 | int r = 0; |
534 | int r = 0; |
487 | |
535 | |
488 | if (x >> 32) { x >>= 32; r += 32; } |
536 | if (x >> 32) { x >>= 32; r += 32; } |
489 | |
537 | |
490 | return r + ecb_ld32 (x); |
538 | return r + ecb_ld32 (x); |
|
|
539 | #endif |
491 | } |
540 | } |
492 | #endif |
541 | #endif |
493 | |
542 | |
494 | ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x); |
543 | ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x); |
495 | ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); } |
544 | ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); } |
… | |
… | |
598 | #endif |
647 | #endif |
599 | |
648 | |
600 | /* try to tell the compiler that some condition is definitely true */ |
649 | /* try to tell the compiler that some condition is definitely true */ |
601 | #define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0 |
650 | #define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0 |
602 | |
651 | |
603 | ecb_inline ecb_const unsigned char ecb_byteorder_helper (void); |
652 | ecb_inline ecb_const uint32_t ecb_byteorder_helper (void); |
604 | ecb_inline ecb_const unsigned char |
653 | ecb_inline ecb_const uint32_t |
605 | ecb_byteorder_helper (void) |
654 | ecb_byteorder_helper (void) |
606 | { |
655 | { |
607 | /* the union code still generates code under pressure in gcc, */ |
656 | /* the union code still generates code under pressure in gcc, */ |
608 | /* but less than using pointers, and always seems to */ |
657 | /* but less than using pointers, and always seems to */ |
609 | /* successfully return a constant. */ |
658 | /* successfully return a constant. */ |
610 | /* the reason why we have this horrible preprocessor mess */ |
659 | /* the reason why we have this horrible preprocessor mess */ |
611 | /* is to avoid it in all cases, at least on common architectures */ |
660 | /* is to avoid it in all cases, at least on common architectures */ |
612 | /* or when using a recent enough gcc version (>= 4.6) */ |
661 | /* or when using a recent enough gcc version (>= 4.6) */ |
613 | #if ((__i386 || __i386__) && !__VOS__) || _M_IX86 || ECB_GCC_AMD64 || ECB_MSVC_AMD64 |
|
|
614 | return 0x44; |
|
|
615 | #elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ |
662 | #if (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \ |
|
|
663 | || ((__i386 || __i386__ || _M_IX86 || ECB_GCC_AMD64 || ECB_MSVC_AMD64) && !__VOS__) |
|
|
664 | #define ECB_LITTLE_ENDIAN 1 |
616 | return 0x44; |
665 | return 0x44332211; |
617 | #elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ |
666 | #elif (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) \ |
|
|
667 | || ((__AARCH64EB__ || __MIPSEB__ || __ARMEB__) && !__VOS__) |
|
|
668 | #define ECB_BIG_ENDIAN 1 |
618 | return 0x11; |
669 | return 0x11223344; |
619 | #else |
670 | #else |
620 | union |
671 | union |
621 | { |
672 | { |
|
|
673 | uint8_t c[4]; |
622 | uint32_t i; |
674 | uint32_t u; |
623 | uint8_t c; |
|
|
624 | } u = { 0x11223344 }; |
675 | } u = { 0x11, 0x22, 0x33, 0x44 }; |
625 | return u.c; |
676 | return u.u; |
626 | #endif |
677 | #endif |
627 | } |
678 | } |
628 | |
679 | |
629 | ecb_inline ecb_const ecb_bool ecb_big_endian (void); |
680 | ecb_inline ecb_const ecb_bool ecb_big_endian (void); |
630 | ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; } |
681 | ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11223344; } |
631 | ecb_inline ecb_const ecb_bool ecb_little_endian (void); |
682 | ecb_inline ecb_const ecb_bool ecb_little_endian (void); |
632 | ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44; } |
683 | ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44332211; } |
633 | |
684 | |
634 | #if ECB_GCC_VERSION(3,0) || ECB_C99 |
685 | #if ECB_GCC_VERSION(3,0) || ECB_C99 |
635 | #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0)) |
686 | #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0)) |
636 | #else |
687 | #else |
637 | #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n))) |
688 | #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n))) |
… | |
… | |
662 | } |
713 | } |
663 | #else |
714 | #else |
664 | #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) |
715 | #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) |
665 | #endif |
716 | #endif |
666 | |
717 | |
667 | ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint16_t x); |
718 | ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x); |
668 | ecb_function_ ecb_const uint32_t |
719 | ecb_function_ ecb_const uint32_t |
669 | ecb_binary16_to_binary32 (uint16_t x) |
720 | ecb_binary16_to_binary32 (uint32_t x) |
670 | { |
721 | { |
671 | unsigned int s = (x & 0x8000) << (31 - 15); |
722 | unsigned int s = (x & 0x8000) << (31 - 15); |
672 | int e = (x >> 10) & 0x001f; |
723 | int e = (x >> 10) & 0x001f; |
673 | unsigned int m = x & 0x03ff; |
724 | unsigned int m = x & 0x03ff; |
674 | |
725 | |