… | |
… | |
67 | typedef uint32_t uintptr_t; |
67 | typedef uint32_t uintptr_t; |
68 | typedef int32_t intptr_t; |
68 | typedef int32_t intptr_t; |
69 | #endif |
69 | #endif |
70 | #else |
70 | #else |
71 | #include <inttypes.h> |
71 | #include <inttypes.h> |
72 | #if UINTMAX_MAX > 0xffffffffU |
72 | #if (defined INTPTR_MAX ? INTPTR_MAX : ULONG_MAX) > 0xffffffffU |
73 | #define ECB_PTRSIZE 8 |
73 | #define ECB_PTRSIZE 8 |
74 | #else |
74 | #else |
75 | #define ECB_PTRSIZE 4 |
75 | #define ECB_PTRSIZE 4 |
76 | #endif |
76 | #endif |
77 | #endif |
77 | #endif |
… | |
… | |
153 | #endif |
153 | #endif |
154 | |
154 | |
155 | /* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/compiler_ref/compiler_builtins.html */ |
155 | /* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/compiler_ref/compiler_builtins.html */ |
156 | #if __xlC__ && ECB_CPP |
156 | #if __xlC__ && ECB_CPP |
157 | #include <builtins.h> |
157 | #include <builtins.h> |
|
|
158 | #endif |
|
|
159 | |
|
|
160 | #if 1400 <= _MSC_VER |
|
|
161 | #include <intrin.h> /* fence functions _ReadBarrier, also bit search functions _BitScanReverse */ |
158 | #endif |
162 | #endif |
159 | |
163 | |
160 | #ifndef ECB_MEMORY_FENCE |
164 | #ifndef ECB_MEMORY_FENCE |
161 | #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 |
165 | #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 |
162 | #if __i386 || __i386__ |
166 | #if __i386 || __i386__ |
… | |
… | |
167 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") |
171 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") |
168 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") |
172 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") |
169 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") |
173 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") |
170 | #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ |
174 | #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ |
171 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") |
175 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") |
|
|
176 | #elif defined __ARM_ARCH_2__ \ |
|
|
177 | || defined __ARM_ARCH_3__ || defined __ARM_ARCH_3M__ \ |
|
|
178 | || defined __ARM_ARCH_4__ || defined __ARM_ARCH_4T__ \ |
|
|
179 | || defined __ARM_ARCH_5__ || defined __ARM_ARCH_5E__ \ |
|
|
180 | || defined __ARM_ARCH_5T__ || defined __ARM_ARCH_5TE__ \ |
|
|
181 | || defined __ARM_ARCH_5TEJ__ |
|
|
182 | /* should not need any, unless running old code on newer cpu - arm doesn't support that */ |
172 | #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \ |
183 | #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \ |
173 | || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ |
184 | || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ \ |
|
|
185 | || defined __ARM_ARCH_6T2__ |
174 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") |
186 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") |
175 | #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ |
187 | #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ |
176 | || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__ |
188 | || defined __ARM_ARCH_7R__ || defined __ARM_ARCH_7M__ |
177 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") |
189 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") |
178 | #elif __aarch64__ |
190 | #elif __aarch64__ |
179 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory") |
191 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory") |
180 | #elif (__sparc || __sparc__) && !(__sparc_v8__ || defined __sparcv8) |
192 | #elif (__sparc || __sparc__) && !(__sparc_v8__ || defined __sparcv8) |
181 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory") |
193 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory") |
… | |
… | |
423 | #else |
435 | #else |
424 | ecb_function_ ecb_const int ecb_ctz32 (uint32_t x); |
436 | ecb_function_ ecb_const int ecb_ctz32 (uint32_t x); |
425 | ecb_function_ ecb_const int |
437 | ecb_function_ ecb_const int |
426 | ecb_ctz32 (uint32_t x) |
438 | ecb_ctz32 (uint32_t x) |
427 | { |
439 | { |
|
|
440 | #if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM) |
|
|
441 | unsigned long r; |
|
|
442 | _BitScanForward (&r, x); |
|
|
443 | return (int)r; |
|
|
444 | #else |
428 | int r = 0; |
445 | int r = 0; |
429 | |
446 | |
430 | x &= ~x + 1; /* this isolates the lowest bit */ |
447 | x &= ~x + 1; /* this isolates the lowest bit */ |
431 | |
448 | |
432 | #if ECB_branchless_on_i386 |
449 | #if ECB_branchless_on_i386 |
… | |
… | |
442 | if (x & 0xff00ff00) r += 8; |
459 | if (x & 0xff00ff00) r += 8; |
443 | if (x & 0xffff0000) r += 16; |
460 | if (x & 0xffff0000) r += 16; |
444 | #endif |
461 | #endif |
445 | |
462 | |
446 | return r; |
463 | return r; |
|
|
464 | #endif |
447 | } |
465 | } |
448 | |
466 | |
449 | ecb_function_ ecb_const int ecb_ctz64 (uint64_t x); |
467 | ecb_function_ ecb_const int ecb_ctz64 (uint64_t x); |
450 | ecb_function_ ecb_const int |
468 | ecb_function_ ecb_const int |
451 | ecb_ctz64 (uint64_t x) |
469 | ecb_ctz64 (uint64_t x) |
452 | { |
470 | { |
|
|
471 | #if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM) |
|
|
472 | unsigned long r; |
|
|
473 | _BitScanForward64 (&r, x); |
|
|
474 | return (int)r; |
|
|
475 | #else |
453 | int shift = x & 0xffffffff ? 0 : 32; |
476 | int shift = x & 0xffffffff ? 0 : 32; |
454 | return ecb_ctz32 (x >> shift) + shift; |
477 | return ecb_ctz32 (x >> shift) + shift; |
|
|
478 | #endif |
455 | } |
479 | } |
456 | |
480 | |
457 | ecb_function_ ecb_const int ecb_popcount32 (uint32_t x); |
481 | ecb_function_ ecb_const int ecb_popcount32 (uint32_t x); |
458 | ecb_function_ ecb_const int |
482 | ecb_function_ ecb_const int |
459 | ecb_popcount32 (uint32_t x) |
483 | ecb_popcount32 (uint32_t x) |
… | |
… | |
467 | } |
491 | } |
468 | |
492 | |
469 | ecb_function_ ecb_const int ecb_ld32 (uint32_t x); |
493 | ecb_function_ ecb_const int ecb_ld32 (uint32_t x); |
470 | ecb_function_ ecb_const int ecb_ld32 (uint32_t x) |
494 | ecb_function_ ecb_const int ecb_ld32 (uint32_t x) |
471 | { |
495 | { |
|
|
496 | #if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM) |
|
|
497 | unsigned long r; |
|
|
498 | _BitScanReverse (&r, x); |
|
|
499 | return (int)r; |
|
|
500 | #else |
472 | int r = 0; |
501 | int r = 0; |
473 | |
502 | |
474 | if (x >> 16) { x >>= 16; r += 16; } |
503 | if (x >> 16) { x >>= 16; r += 16; } |
475 | if (x >> 8) { x >>= 8; r += 8; } |
504 | if (x >> 8) { x >>= 8; r += 8; } |
476 | if (x >> 4) { x >>= 4; r += 4; } |
505 | if (x >> 4) { x >>= 4; r += 4; } |
477 | if (x >> 2) { x >>= 2; r += 2; } |
506 | if (x >> 2) { x >>= 2; r += 2; } |
478 | if (x >> 1) { r += 1; } |
507 | if (x >> 1) { r += 1; } |
479 | |
508 | |
480 | return r; |
509 | return r; |
|
|
510 | #endif |
481 | } |
511 | } |
482 | |
512 | |
483 | ecb_function_ ecb_const int ecb_ld64 (uint64_t x); |
513 | ecb_function_ ecb_const int ecb_ld64 (uint64_t x); |
484 | ecb_function_ ecb_const int ecb_ld64 (uint64_t x) |
514 | ecb_function_ ecb_const int ecb_ld64 (uint64_t x) |
485 | { |
515 | { |
|
|
516 | #if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM) |
|
|
517 | unsigned long r; |
|
|
518 | _BitScanReverse64 (&r, x); |
|
|
519 | return (int)r; |
|
|
520 | #else |
486 | int r = 0; |
521 | int r = 0; |
487 | |
522 | |
488 | if (x >> 32) { x >>= 32; r += 32; } |
523 | if (x >> 32) { x >>= 32; r += 32; } |
489 | |
524 | |
490 | return r + ecb_ld32 (x); |
525 | return r + ecb_ld32 (x); |
|
|
526 | #endif |
491 | } |
527 | } |
492 | #endif |
528 | #endif |
493 | |
529 | |
494 | ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x); |
530 | ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x); |
495 | ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); } |
531 | ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); } |
… | |
… | |
598 | #endif |
634 | #endif |
599 | |
635 | |
600 | /* try to tell the compiler that some condition is definitely true */ |
636 | /* try to tell the compiler that some condition is definitely true */ |
601 | #define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0 |
637 | #define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0 |
602 | |
638 | |
603 | ecb_inline ecb_const unsigned char ecb_byteorder_helper (void); |
639 | ecb_inline ecb_const uint32_t ecb_byteorder_helper (void); |
604 | ecb_inline ecb_const unsigned char |
640 | ecb_inline ecb_const uint32_t |
605 | ecb_byteorder_helper (void) |
641 | ecb_byteorder_helper (void) |
606 | { |
642 | { |
607 | /* the union code still generates code under pressure in gcc, */ |
643 | /* the union code still generates code under pressure in gcc, */ |
608 | /* but less than using pointers, and always seems to */ |
644 | /* but less than using pointers, and always seems to */ |
609 | /* successfully return a constant. */ |
645 | /* successfully return a constant. */ |
610 | /* the reason why we have this horrible preprocessor mess */ |
646 | /* the reason why we have this horrible preprocessor mess */ |
611 | /* is to avoid it in all cases, at least on common architectures */ |
647 | /* is to avoid it in all cases, at least on common architectures */ |
612 | /* or when using a recent enough gcc version (>= 4.6) */ |
648 | /* or when using a recent enough gcc version (>= 4.6) */ |
613 | #if ((__i386 || __i386__) && !__VOS__) || _M_IX86 || ECB_GCC_AMD64 || ECB_MSVC_AMD64 |
|
|
614 | return 0x44; |
|
|
615 | #elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ |
649 | #if (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \ |
|
|
650 | || ((__i386 || __i386__ || _M_IX86 || ECB_GCC_AMD64 || ECB_MSVC_AMD64) && !__VOS__) |
|
|
651 | #define ECB_LITTLE_ENDIAN 1 |
616 | return 0x44; |
652 | return 0x44332211; |
617 | #elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ |
653 | #elif (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) \ |
|
|
654 | || ((__AARCH64EB__ || __MIPSEB__ || __ARMEB__) && !__VOS__) |
|
|
655 | #define ECB_BIG_ENDIAN 1 |
618 | return 0x11; |
656 | return 0x11223344; |
619 | #else |
657 | #else |
620 | union |
658 | union |
621 | { |
659 | { |
|
|
660 | uint8_t c[4]; |
622 | uint32_t i; |
661 | uint32_t u; |
623 | uint8_t c; |
|
|
624 | } u = { 0x11223344 }; |
662 | } u = { 0x11, 0x22, 0x33, 0x44 }; |
625 | return u.c; |
663 | return u.u; |
626 | #endif |
664 | #endif |
627 | } |
665 | } |
628 | |
666 | |
629 | ecb_inline ecb_const ecb_bool ecb_big_endian (void); |
667 | ecb_inline ecb_const ecb_bool ecb_big_endian (void); |
630 | ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; } |
668 | ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11223344; } |
631 | ecb_inline ecb_const ecb_bool ecb_little_endian (void); |
669 | ecb_inline ecb_const ecb_bool ecb_little_endian (void); |
632 | ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44; } |
670 | ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44332211; } |
633 | |
671 | |
634 | #if ECB_GCC_VERSION(3,0) || ECB_C99 |
672 | #if ECB_GCC_VERSION(3,0) || ECB_C99 |
635 | #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0)) |
673 | #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0)) |
636 | #else |
674 | #else |
637 | #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n))) |
675 | #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n))) |