… | |
… | |
504 | */ |
504 | */ |
505 | |
505 | |
506 | #ifndef ECB_H |
506 | #ifndef ECB_H |
507 | #define ECB_H |
507 | #define ECB_H |
508 | |
508 | |
|
|
509 | /* 16 bits major, 16 bits minor */ |
|
|
510 | #define ECB_VERSION 0x00010001 |
|
|
511 | |
509 | #ifdef _WIN32 |
512 | #ifdef _WIN32 |
510 | typedef signed char int8_t; |
513 | typedef signed char int8_t; |
511 | typedef unsigned char uint8_t; |
514 | typedef unsigned char uint8_t; |
512 | typedef signed short int16_t; |
515 | typedef signed short int16_t; |
513 | typedef unsigned short uint16_t; |
516 | typedef unsigned short uint16_t; |
… | |
… | |
518 | typedef unsigned long long uint64_t; |
521 | typedef unsigned long long uint64_t; |
519 | #else /* _MSC_VER || __BORLANDC__ */ |
522 | #else /* _MSC_VER || __BORLANDC__ */ |
520 | typedef signed __int64 int64_t; |
523 | typedef signed __int64 int64_t; |
521 | typedef unsigned __int64 uint64_t; |
524 | typedef unsigned __int64 uint64_t; |
522 | #endif |
525 | #endif |
|
|
526 | #ifdef _WIN64 |
|
|
527 | #define ECB_PTRSIZE 8 |
|
|
528 | typedef uint64_t uintptr_t; |
|
|
529 | typedef int64_t intptr_t; |
|
|
530 | #else |
|
|
531 | #define ECB_PTRSIZE 4 |
|
|
532 | typedef uint32_t uintptr_t; |
|
|
533 | typedef int32_t intptr_t; |
|
|
534 | #endif |
|
|
535 | typedef intptr_t ptrdiff_t; |
523 | #else |
536 | #else |
524 | #include <inttypes.h> |
537 | #include <inttypes.h> |
|
|
538 | #if UINTMAX_MAX > 0xffffffffU |
|
|
539 | #define ECB_PTRSIZE 8 |
|
|
540 | #else |
|
|
541 | #define ECB_PTRSIZE 4 |
|
|
542 | #endif |
525 | #endif |
543 | #endif |
526 | |
544 | |
527 | /* many compilers define _GNUC_ to some versions but then only implement |
545 | /* many compilers define _GNUC_ to some versions but then only implement |
528 | * what their idiot authors think are the "more important" extensions, |
546 | * what their idiot authors think are the "more important" extensions, |
529 | * causing enormous grief in return for some better fake benchmark numbers. |
547 | * causing enormous grief in return for some better fake benchmark numbers. |
… | |
… | |
537 | #else |
555 | #else |
538 | #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) |
556 | #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) |
539 | #endif |
557 | #endif |
540 | #endif |
558 | #endif |
541 | |
559 | |
|
|
560 | #define ECB_C (__STDC__+0) /* this assumes that __STDC__ is either empty or a number */ |
|
|
561 | #define ECB_C99 (__STDC_VERSION__ >= 199901L) |
|
|
562 | #define ECB_C11 (__STDC_VERSION__ >= 201112L) |
|
|
563 | #define ECB_CPP (__cplusplus+0) |
|
|
564 | #define ECB_CPP98 (__cplusplus >= 199711L) |
|
|
565 | #define ECB_CPP11 (__cplusplus >= 201103L) |
|
|
566 | |
542 | /*****************************************************************************/ |
567 | /*****************************************************************************/ |
543 | |
568 | |
544 | /* ECB_NO_THREADS - ecb is not used by multiple threads, ever */ |
569 | /* ECB_NO_THREADS - ecb is not used by multiple threads, ever */ |
545 | /* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */ |
570 | /* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */ |
546 | |
571 | |
547 | #if ECB_NO_THREADS |
572 | #if ECB_NO_THREADS |
548 | # define ECB_NO_SMP 1 |
573 | # define ECB_NO_SMP 1 |
549 | #endif |
574 | #endif |
550 | |
575 | |
551 | #if ECB_NO_THREADS || ECB_NO_SMP |
576 | #if ECB_NO_SMP |
552 | #define ECB_MEMORY_FENCE do { } while (0) |
577 | #define ECB_MEMORY_FENCE do { } while (0) |
553 | #endif |
578 | #endif |
554 | |
579 | |
555 | #ifndef ECB_MEMORY_FENCE |
580 | #ifndef ECB_MEMORY_FENCE |
556 | #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 |
581 | #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 |
557 | #if __i386 || __i386__ |
582 | #if __i386 || __i386__ |
558 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") |
583 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") |
559 | #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE /* non-lock xchg might be enough */ |
584 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") |
560 | #define ECB_MEMORY_FENCE_RELEASE do { } while (0) /* unlikely to change in future cpus */ |
585 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") |
561 | #elif __amd64 || __amd64__ || __x86_64 || __x86_64__ |
586 | #elif __amd64 || __amd64__ || __x86_64 || __x86_64__ |
562 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") |
587 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") |
563 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("lfence" : : : "memory") |
588 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") |
564 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("sfence") /* play safe - not needed in any current cpu */ |
589 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") |
565 | #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ |
590 | #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ |
566 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") |
591 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") |
567 | #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \ |
592 | #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \ |
568 | || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ |
593 | || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ |
569 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") |
594 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") |
570 | #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ |
595 | #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ |
571 | || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__ |
596 | || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__ |
572 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") |
597 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") |
573 | #elif __sparc || __sparc__ |
598 | #elif __sparc || __sparc__ |
574 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad | " : : : "memory") |
599 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory") |
575 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory") |
600 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory") |
576 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore") |
601 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore") |
577 | #elif defined __s390__ || defined __s390x__ |
602 | #elif defined __s390__ || defined __s390x__ |
578 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory") |
603 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory") |
579 | #elif defined __mips__ |
604 | #elif defined __mips__ |
580 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") |
605 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") |
581 | #elif defined __alpha__ |
606 | #elif defined __alpha__ |
582 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory") |
607 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory") |
|
|
608 | #elif defined __hppa__ |
|
|
609 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory") |
|
|
610 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") |
|
|
611 | #elif defined __ia64__ |
|
|
612 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory") |
583 | #endif |
613 | #endif |
584 | #endif |
614 | #endif |
585 | #endif |
615 | #endif |
586 | |
616 | |
587 | #ifndef ECB_MEMORY_FENCE |
617 | #ifndef ECB_MEMORY_FENCE |
|
|
618 | #if ECB_GCC_VERSION(4,7) |
|
|
619 | /* see comment below about the C11 memory model. in short - avoid */ |
|
|
620 | #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST) |
|
|
621 | #elif defined __clang && __has_feature (cxx_atomic) |
|
|
622 | /* see above */ |
|
|
623 | #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST) |
588 | #if ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ |
624 | #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ |
589 | #define ECB_MEMORY_FENCE __sync_synchronize () |
625 | #define ECB_MEMORY_FENCE __sync_synchronize () |
590 | /*#define ECB_MEMORY_FENCE_ACQUIRE ({ char dummy = 0; __sync_lock_test_and_set (&dummy, 1); }) */ |
626 | /*#define ECB_MEMORY_FENCE_ACQUIRE ({ char dummy = 0; __sync_lock_test_and_set (&dummy, 1); }) */ |
591 | /*#define ECB_MEMORY_FENCE_RELEASE ({ char dummy = 1; __sync_lock_release (&dummy ); }) */ |
627 | /*#define ECB_MEMORY_FENCE_RELEASE ({ char dummy = 1; __sync_lock_release (&dummy ); }) */ |
592 | #elif _MSC_VER >= 1400 /* VC++ 2005 */ |
628 | #elif _MSC_VER >= 1400 /* VC++ 2005 */ |
593 | #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) |
629 | #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) |
… | |
… | |
606 | #define ECB_MEMORY_FENCE __sync () |
642 | #define ECB_MEMORY_FENCE __sync () |
607 | #endif |
643 | #endif |
608 | #endif |
644 | #endif |
609 | |
645 | |
610 | #ifndef ECB_MEMORY_FENCE |
646 | #ifndef ECB_MEMORY_FENCE |
|
|
647 | #if ECB_C11 && !defined __STDC_NO_ATOMICS__ |
|
|
648 | /* we assume that these memory fences work on all variables/all memory accesses, */ |
|
|
649 | /* not just C11 atomics and atomic accesses */ |
|
|
650 | #include <stdatomic.h> |
|
|
651 | /* unfortunately, the C11 memory model seems to be very limited, and unable to express */ |
|
|
652 | /* simple barrier semantics. That means we need to take out thor's hammer. */ |
|
|
653 | #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst) |
|
|
654 | #endif |
|
|
655 | #endif |
|
|
656 | #endif |
|
|
657 | |
|
|
658 | #ifndef ECB_MEMORY_FENCE |
611 | #if !ECB_AVOID_PTHREADS |
659 | #if !ECB_AVOID_PTHREADS |
612 | /* |
660 | /* |
613 | * if you get undefined symbol references to pthread_mutex_lock, |
661 | * if you get undefined symbol references to pthread_mutex_lock, |
614 | * or failure to find pthread.h, then you should implement |
662 | * or failure to find pthread.h, then you should implement |
615 | * the ECB_MEMORY_FENCE operations for your cpu/compiler |
663 | * the ECB_MEMORY_FENCE operations for your cpu/compiler |
… | |
… | |
633 | #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE |
681 | #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE |
634 | #endif |
682 | #endif |
635 | |
683 | |
636 | /*****************************************************************************/ |
684 | /*****************************************************************************/ |
637 | |
685 | |
638 | #define ECB_C99 (__STDC_VERSION__ >= 199901L) |
|
|
639 | |
|
|
640 | #if __cplusplus |
686 | #if __cplusplus |
641 | #define ecb_inline static inline |
687 | #define ecb_inline static inline |
642 | #elif ECB_GCC_VERSION(2,5) |
688 | #elif ECB_GCC_VERSION(2,5) |
643 | #define ecb_inline static __inline__ |
689 | #define ecb_inline static __inline__ |
644 | #elif ECB_C99 |
690 | #elif ECB_C99 |
… | |
… | |
682 | #elif ECB_GCC_VERSION(3,0) |
728 | #elif ECB_GCC_VERSION(3,0) |
683 | #define ecb_decltype(x) __typeof(x) |
729 | #define ecb_decltype(x) __typeof(x) |
684 | #endif |
730 | #endif |
685 | |
731 | |
686 | #define ecb_noinline ecb_attribute ((__noinline__)) |
732 | #define ecb_noinline ecb_attribute ((__noinline__)) |
687 | #define ecb_noreturn ecb_attribute ((__noreturn__)) |
|
|
688 | #define ecb_unused ecb_attribute ((__unused__)) |
733 | #define ecb_unused ecb_attribute ((__unused__)) |
689 | #define ecb_const ecb_attribute ((__const__)) |
734 | #define ecb_const ecb_attribute ((__const__)) |
690 | #define ecb_pure ecb_attribute ((__pure__)) |
735 | #define ecb_pure ecb_attribute ((__pure__)) |
|
|
736 | |
|
|
737 | #if ECB_C11 |
|
|
738 | #define ecb_noreturn _Noreturn |
|
|
739 | #else |
|
|
740 | #define ecb_noreturn ecb_attribute ((__noreturn__)) |
|
|
741 | #endif |
691 | |
742 | |
692 | #if ECB_GCC_VERSION(4,3) |
743 | #if ECB_GCC_VERSION(4,3) |
693 | #define ecb_artificial ecb_attribute ((__artificial__)) |
744 | #define ecb_artificial ecb_attribute ((__artificial__)) |
694 | #define ecb_hot ecb_attribute ((__hot__)) |
745 | #define ecb_hot ecb_attribute ((__hot__)) |
695 | #define ecb_cold ecb_attribute ((__cold__)) |
746 | #define ecb_cold ecb_attribute ((__cold__)) |
… | |
… | |
785 | if (x >> 32) { x >>= 32; r += 32; } |
836 | if (x >> 32) { x >>= 32; r += 32; } |
786 | |
837 | |
787 | return r + ecb_ld32 (x); |
838 | return r + ecb_ld32 (x); |
788 | } |
839 | } |
789 | #endif |
840 | #endif |
|
|
841 | |
|
|
842 | ecb_function_ ecb_bool ecb_is_pot32 (uint32_t x) ecb_const; |
|
|
843 | ecb_function_ ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); } |
|
|
844 | ecb_function_ ecb_bool ecb_is_pot64 (uint64_t x) ecb_const; |
|
|
845 | ecb_function_ ecb_bool ecb_is_pot64 (uint64_t x) { return !(x & (x - 1)); } |
790 | |
846 | |
791 | ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) ecb_const; |
847 | ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) ecb_const; |
792 | ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) |
848 | ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) |
793 | { |
849 | { |
794 | return ( (x * 0x0802U & 0x22110U) |
850 | return ( (x * 0x0802U & 0x22110U) |