--- libecb/ecb.h 2011/06/11 14:18:49 1.39 +++ libecb/ecb.h 2011/08/04 14:47:53 1.64 @@ -30,33 +30,124 @@ #ifndef ECB_H #define ECB_H -#include +#ifdef _WIN32 + typedef signed char int8_t; + typedef unsigned char uint8_t; + typedef signed short int16_t; + typedef unsigned short uint16_t; + typedef signed int int32_t; + typedef unsigned int uint32_t; + #if __GNUC__ + typedef signed long long int64_t; + typedef unsigned long long uint64_t; + #else /* _MSC_VER || __BORLANDC__ */ + typedef signed __int64 int64_t; + typedef unsigned __int64 uint64_t; + #endif +#else + #include +#endif /* many compilers define _GNUC_ to some versions but then only implement * what their idiot authors think are the "more important" extensions, - * causing enourmous grief in return for some better fake benchmark numbers. + * causing enormous grief in return for some better fake benchmark numbers. * or so. * we try to detect these and simply assume they are not gcc - if they have * an issue with that they should have done it right in the first place. */ #ifndef ECB_GCC_VERSION - #if defined(__INTEL_COMPILER) || defined(__SUNPRO_C) || defined(__llvm__) + #if !defined(__GNUC_MINOR__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_C) || defined(__SUNPRO_CC) || defined(__llvm__) || defined(__clang__) #define ECB_GCC_VERSION(major,minor) 0 #else #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) #endif #endif +/*****************************************************************************/ + +/* ECB_NO_THREADS - ecb is not used by multiple threads, ever */ +/* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */ + +#if ECB_NO_THREADS || ECB_NO_SMP + #define ECB_MEMORY_FENCE do { } while (0) +#endif + +#ifndef ECB_MEMORY_FENCE + #if ECB_GCC_VERSION(2,5) + #if __x86 + #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") + #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE /* non-lock xchg might be enough */ + #define ECB_MEMORY_FENCE_RELEASE do { } while (0) /* unlikely to change in future cpus */ + #elif __amd64 + #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") + #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("lfence" : : : "memory") + #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("sfence") /* play safe - not needed in any current cpu */ + #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ + #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") + #elif defined(__ARM_ARCH_6__ ) || defined(__ARM_ARCH_6J__ ) \ + || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6ZK__) + #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,4" : : "r" (0) : "memory") + #elif defined(__ARM_ARCH_7__ ) || defined(__ARM_ARCH_7A__ ) \ + || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7R__ ) + #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dsb" : : : "memory") + #endif + #endif +#endif + +#ifndef ECB_MEMORY_FENCE + #if ECB_GCC_VERSION(4,4) || defined(__INTEL_COMPILER) + #define ECB_MEMORY_FENCE __sync_synchronize () + /*#define ECB_MEMORY_FENCE_ACQUIRE ({ char dummy = 0; __sync_lock_test_and_set (&dummy, 1); }) */ + /*#define ECB_MEMORY_FENCE_RELEASE ({ char dummy = 1; __sync_lock_release (&dummy ); }) */ + #elif _MSC_VER >= 1400 /* VC++ 2005 */ + #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) + #define ECB_MEMORY_FENCE _ReadWriteBarrier () + #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */ + #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier () + #elif defined(_WIN32) + #include + #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */ + #endif +#endif + +#ifndef ECB_MEMORY_FENCE + #if !ECB_AVOID_PTHREADS + /* + * if you get undefined symbol references to pthread_mutex_lock, + * or failure to find pthread.h, then you should implement + * the ECB_MEMORY_FENCE operations for your cpu/compiler + * OR provide pthread.h and link against the posix thread library + * of your system. + */ + #include + #define ECB_NEEDS_PTHREADS 1 + #define ECB_MEMORY_FENCE_NEEDS_PTHREADS 1 + + static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER; + #define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0) + #endif +#endif + +#if !defined(ECB_MEMORY_FENCE_ACQUIRE) && defined(ECB_MEMORY_FENCE) + #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE +#endif + +#if !defined(ECB_MEMORY_FENCE_RELEASE) && defined(ECB_MEMORY_FENCE) + #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE +#endif + +/*****************************************************************************/ + #define ECB_C99 (__STDC_VERSION__ >= 199901L) #if __cplusplus - #define ECB_INLINE static inline + #define ecb_inline static inline #elif ECB_GCC_VERSION(2,5) - #define ECB_INLINE static __inline__ + #define ecb_inline static __inline__ #elif ECB_C99 - #define ECB_INLINE static inline + #define ecb_inline static inline #else - #define ECB_INLINE static + #define ecb_inline static #endif #if ECB_GCC_VERSION(3,3) @@ -74,7 +165,7 @@ #define ECB_STRINGIFY_(a) # a #define ECB_STRINGIFY(a) ECB_STRINGIFY_(a) -#define ecb_function_ ECB_INLINE +#define ecb_function_ ecb_inline #if ECB_GCC_VERSION(3,1) #define ecb_attribute(attrlist) __attribute__(attrlist) @@ -120,13 +211,15 @@ #define ecb_likely(expr) ecb_expect_true (expr) #define ecb_unlikely(expr) ecb_expect_false (expr) -/* try to tell the compiler that some condition is definitely true */ -#define ecb_assume(cond) do { if (!(cond)) ecb_unreachable (); } while (0) - /* count trailing zero bits and count # of one bits */ #if ECB_GCC_VERSION(3,4) + /* we assume int == 32 bit, long == 32 or 64 bit and long long == 64 bit */ + #define ecb_ld32(x) (__builtin_clz (x) ^ 31) + #define ecb_ld64(x) (__builtin_clzll (x) ^ 63) #define ecb_ctz32(x) __builtin_ctz (x) + #define ecb_ctz64(x) __builtin_ctzll (x) #define ecb_popcount32(x) __builtin_popcount (x) + /* no popcountll */ #else ecb_function_ int ecb_ctz32 (uint32_t x) ecb_const; ecb_function_ int @@ -134,17 +227,33 @@ { int r = 0; - x &= -x; /* this isolates the lowest bit */ + x &= ~x + 1; /* this isolates the lowest bit */ +#if ECB_branchless_on_i386 + r += !!(x & 0xaaaaaaaa) << 0; + r += !!(x & 0xcccccccc) << 1; + r += !!(x & 0xf0f0f0f0) << 2; + r += !!(x & 0xff00ff00) << 3; + r += !!(x & 0xffff0000) << 4; +#else if (x & 0xaaaaaaaa) r += 1; if (x & 0xcccccccc) r += 2; if (x & 0xf0f0f0f0) r += 4; if (x & 0xff00ff00) r += 8; if (x & 0xffff0000) r += 16; +#endif return r; } + ecb_function_ int ecb_ctz64 (uint64_t x) ecb_const; + ecb_function_ int + ecb_ctz64 (uint64_t x) + { + int shift = x & 0xffffffffU ? 0 : 32; + return ecb_ctz32 (x >> shift) + shift; + } + ecb_function_ int ecb_popcount32 (uint32_t x) ecb_const; ecb_function_ int ecb_popcount32 (uint32_t x) @@ -156,29 +265,83 @@ return x >> 24; } + + ecb_function_ int ecb_ld32 (uint32_t x) ecb_const; + ecb_function_ int ecb_ld32 (uint32_t x) + { + int r = 0; + + if (x >> 16) { x >>= 16; r += 16; } + if (x >> 8) { x >>= 8; r += 8; } + if (x >> 4) { x >>= 4; r += 4; } + if (x >> 2) { x >>= 2; r += 2; } + if (x >> 1) { r += 1; } + + return r; + } + + ecb_function_ int ecb_ld64 (uint64_t x) ecb_const; + ecb_function_ int ecb_ld64 (uint64_t x) + { + int r = 0; + + if (x >> 32) { x >>= 32; r += 32; } + + return r + ecb_ld32 (x); + } #endif +/* popcount64 is only available on 64 bit cpus as gcc builtin */ +/* so for this version we are lazy */ +ecb_function_ int ecb_popcount64 (uint64_t x) ecb_const; +ecb_function_ int +ecb_popcount64 (uint64_t x) +{ + return ecb_popcount32 (x) + ecb_popcount32 (x >> 32); +} + +ecb_inline uint8_t ecb_rotl8 (uint8_t x, unsigned int count) ecb_const; +ecb_inline uint8_t ecb_rotr8 (uint8_t x, unsigned int count) ecb_const; +ecb_inline uint16_t ecb_rotl16 (uint16_t x, unsigned int count) ecb_const; +ecb_inline uint16_t ecb_rotr16 (uint16_t x, unsigned int count) ecb_const; +ecb_inline uint32_t ecb_rotl32 (uint32_t x, unsigned int count) ecb_const; +ecb_inline uint32_t ecb_rotr32 (uint32_t x, unsigned int count) ecb_const; +ecb_inline uint64_t ecb_rotl64 (uint64_t x, unsigned int count) ecb_const; +ecb_inline uint64_t ecb_rotr64 (uint64_t x, unsigned int count) ecb_const; + +ecb_inline uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> ( 8 - count)) | (x << count); } +ecb_inline uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << ( 8 - count)) | (x >> count); } +ecb_inline uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (16 - count)) | (x << count); } +ecb_inline uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (16 - count)) | (x >> count); } +ecb_inline uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); } +ecb_inline uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); } +ecb_inline uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); } +ecb_inline uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); } + #if ECB_GCC_VERSION(4,3) - #define ecb_bswap32(x) __builtin_bswap32 (x) - #define ecb_bswap16(x) (__builtin_bswap32(x) >> 16) + #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16) + #define ecb_bswap32(x) __builtin_bswap32 (x) + #define ecb_bswap64(x) __builtin_bswap64 (x) #else + ecb_function_ uint16_t ecb_bswap16 (uint16_t x) ecb_const; + ecb_function_ uint16_t + ecb_bswap16 (uint16_t x) + { + return ecb_rotl16 (x, 8); + } + ecb_function_ uint32_t ecb_bswap32 (uint32_t x) ecb_const; ecb_function_ uint32_t ecb_bswap32 (uint32_t x) { - return (x >> 24) - | ((x >> 8) & 0x0000ff00) - | ((x << 8) & 0x00ff0000) - | (x << 24); + return (((uint32_t)ecb_bswap16 (x)) << 16) | ecb_bswap16 (x >> 16); } - ecb_function_ uint32_t ecb_bswap16 (uint32_t x) ecb_const; - ecb_function_ uint32_t - ecb_bswap16 (uint32_t x) + ecb_function_ uint64_t ecb_bswap64 (uint64_t x) ecb_const; + ecb_function_ uint64_t + ecb_bswap64 (uint64_t x) { - return ((x >> 8) & 0xff) - | ((x << 8) & 0x00ff0000) - | (x << 24); + return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32); } #endif @@ -190,6 +353,9 @@ ecb_function_ void ecb_unreachable (void) { } #endif +/* try to tell the compiler that some condition is definitely true */ +#define ecb_assume(cond) do { if (!(cond)) ecb_unreachable (); } while (0) + ecb_function_ unsigned char ecb_byteorder_helper (void) ecb_const; ecb_function_ unsigned char ecb_byteorder_helper (void) @@ -199,9 +365,9 @@ } ecb_function_ ecb_bool ecb_big_endian (void) ecb_const; -ecb_function_ ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; }; +ecb_function_ ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; } ecb_function_ ecb_bool ecb_little_endian (void) ecb_const; -ecb_function_ ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44; }; +ecb_function_ ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44; } #if ECB_GCC_VERSION(3,0) || ECB_C99 #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0)) @@ -210,7 +376,7 @@ #endif #if ecb_cplusplus_does_not_suck - // does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) + /* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */ template static inline int ecb_array_length (const T (&arr)[N]) { @@ -220,19 +386,5 @@ #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) #endif -ECB_INLINE uint32_t ecb_rotr32 (uint32_t x, unsigned int count) ecb_const; -ECB_INLINE uint32_t -ecb_rotr32 (uint32_t x, unsigned int count) -{ - return (x << (32 - count)) | (x >> count); -} - -ECB_INLINE uint32_t ecb_rotl32 (uint32_t x, unsigned int count) ecb_const; -ECB_INLINE uint32_t -ecb_rotl32 (uint32_t x, unsigned int count) -{ - return (x >> (32 - count)) | (x << count); -} - #endif