ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libecb/ecb.h
(Generate patch)

Comparing libecb/ecb.h (file contents):
Revision 1.81 by root, Fri Mar 23 19:04:07 2012 UTC vs.
Revision 1.143 by root, Fri Oct 17 10:56:48 2014 UTC

1/* 1/*
2 * libecb - http://software.schmorp.de/pkg/libecb 2 * libecb - http://software.schmorp.de/pkg/libecb
3 * 3 *
4 * Copyright (©) 2009-2012 Marc Alexander Lehmann <libecb@schmorp.de> 4 * Copyright (©) 2009-2014 Marc Alexander Lehmann <libecb@schmorp.de>
5 * Copyright (©) 2011 Emanuele Giaquinta 5 * Copyright (©) 2011 Emanuele Giaquinta
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without modifica- 8 * Redistribution and use in source and binary forms, with or without modifica-
9 * tion, are permitted provided that the following conditions are met: 9 * tion, are permitted provided that the following conditions are met:
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- 25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
26 * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 26 * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
27 * OF THE POSSIBILITY OF SUCH DAMAGE. 27 * OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * Alternatively, the contents of this file may be used under the terms of
30 * the GNU General Public License ("GPL") version 2 or any later version,
31 * in which case the provisions of the GPL are applicable instead of
32 * the above. If you wish to allow the use of your version of this file
33 * only under the terms of the GPL and not to allow others to use your
34 * version of this file under the BSD license, indicate your decision
35 * by deleting the provisions above and replace them with the notice
36 * and other provisions required by the GPL. If you do not delete the
37 * provisions above, a recipient may use your version of this file under
38 * either the BSD or the GPL.
28 */ 39 */
29 40
30#ifndef ECB_H 41#ifndef ECB_H
31#define ECB_H 42#define ECB_H
43
44/* 16 bits major, 16 bits minor */
45#define ECB_VERSION 0x00010004
32 46
33#ifdef _WIN32 47#ifdef _WIN32
34 typedef signed char int8_t; 48 typedef signed char int8_t;
35 typedef unsigned char uint8_t; 49 typedef unsigned char uint8_t;
36 typedef signed short int16_t; 50 typedef signed short int16_t;
42 typedef unsigned long long uint64_t; 56 typedef unsigned long long uint64_t;
43 #else /* _MSC_VER || __BORLANDC__ */ 57 #else /* _MSC_VER || __BORLANDC__ */
44 typedef signed __int64 int64_t; 58 typedef signed __int64 int64_t;
45 typedef unsigned __int64 uint64_t; 59 typedef unsigned __int64 uint64_t;
46 #endif 60 #endif
61 #ifdef _WIN64
62 #define ECB_PTRSIZE 8
63 typedef uint64_t uintptr_t;
64 typedef int64_t intptr_t;
65 #else
66 #define ECB_PTRSIZE 4
67 typedef uint32_t uintptr_t;
68 typedef int32_t intptr_t;
69 #endif
47#else 70#else
48 #include <inttypes.h> 71 #include <inttypes.h>
72 #if UINTMAX_MAX > 0xffffffffU
73 #define ECB_PTRSIZE 8
74 #else
75 #define ECB_PTRSIZE 4
76 #endif
77#endif
78
79/* work around x32 idiocy by defining proper macros */
80#if __amd64 || __x86_64 || _M_AMD64 || _M_X64
81 #if _ILP32
82 #define ECB_AMD64_X32 1
83 #else
84 #define ECB_AMD64 1
85 #endif
49#endif 86#endif
50 87
51/* many compilers define _GNUC_ to some versions but then only implement 88/* many compilers define _GNUC_ to some versions but then only implement
52 * what their idiot authors think are the "more important" extensions, 89 * what their idiot authors think are the "more important" extensions,
53 * causing enormous grief in return for some better fake benchmark numbers. 90 * causing enormous grief in return for some better fake benchmark numbers.
54 * or so. 91 * or so.
55 * we try to detect these and simply assume they are not gcc - if they have 92 * we try to detect these and simply assume they are not gcc - if they have
56 * an issue with that they should have done it right in the first place. 93 * an issue with that they should have done it right in the first place.
57 */ 94 */
58#ifndef ECB_GCC_VERSION
59 #if !defined(__GNUC_MINOR__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_C) || defined(__SUNPRO_CC) || defined(__llvm__) || defined(__clang__) 95#if !defined __GNUC_MINOR__ || defined __INTEL_COMPILER || defined __SUNPRO_C || defined __SUNPRO_CC || defined __llvm__ || defined __clang__
60 #define ECB_GCC_VERSION(major,minor) 0 96 #define ECB_GCC_VERSION(major,minor) 0
61 #else 97#else
62 #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) 98 #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))
63 #endif 99#endif
100
101#define ECB_CLANG_VERSION(major,minor) (__clang_major__ > (major) || (__clang_major__ == (major) && __clang_minor__ >= (minor)))
102
103#if __clang__ && defined(__has_builtin)
104 #define ECB_CLANG_BUILTIN(x) __has_builtin(x)
105#else
106 #define ECB_CLANG_BUILTIN(x) 0
107#endif
108
109#if __clang__ && defined(__has_extension)
110 #define ECB_CLANG_EXTENSION(x) __has_extension(x)
111#else
112 #define ECB_CLANG_EXTENSION(x) 0
113#endif
114
115#define ECB_CPP (__cplusplus+0)
116#define ECB_CPP11 (__cplusplus >= 201103L)
117
118#if ECB_CPP
119 #define ECB_C 0
120 #define ECB_STDC_VERSION 0
121#else
122 #define ECB_C 1
123 #define ECB_STDC_VERSION __STDC_VERSION__
124#endif
125
126#define ECB_C99 (ECB_STDC_VERSION >= 199901L)
127#define ECB_C11 (ECB_STDC_VERSION >= 201112L)
128
129#if ECB_CPP
130 #define ECB_EXTERN_C extern "C"
131 #define ECB_EXTERN_C_BEG ECB_EXTERN_C {
132 #define ECB_EXTERN_C_END }
133#else
134 #define ECB_EXTERN_C extern
135 #define ECB_EXTERN_C_BEG
136 #define ECB_EXTERN_C_END
64#endif 137#endif
65 138
66/*****************************************************************************/ 139/*****************************************************************************/
67 140
68/* ECB_NO_THREADS - ecb is not used by multiple threads, ever */ 141/* ECB_NO_THREADS - ecb is not used by multiple threads, ever */
69/* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */ 142/* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */
70 143
71#if ECB_NO_THREADS 144#if ECB_NO_THREADS
72# define ECB_NO_SMP 1 145 #define ECB_NO_SMP 1
73#endif 146#endif
74 147
75#if ECB_NO_THREADS || ECB_NO_SMP 148#if ECB_NO_SMP
76 #define ECB_MEMORY_FENCE do { } while (0) 149 #define ECB_MEMORY_FENCE do { } while (0)
77#endif 150#endif
78 151
79#ifndef ECB_MEMORY_FENCE 152#ifndef ECB_MEMORY_FENCE
80 #if ECB_GCC_VERSION(2,5) || defined(__INTEL_COMPILER) || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 153 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
81 #if __i386 || __i386__ 154 #if __i386 || __i386__
82 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") 155 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
83 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE /* non-lock xchg might be enough */ 156 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
84 #define ECB_MEMORY_FENCE_RELEASE do { } while (0) /* unlikely to change in future cpus */ 157 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
85 #elif __amd64 || __amd64__ || __x86_64 || __x86_64__ 158 #elif __amd64 || __amd64__ || __x86_64 || __x86_64__
86 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") 159 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory")
87 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("lfence" : : : "memory") 160 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
88 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("sfence") /* play safe - not needed in any current cpu */ 161 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
89 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ 162 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__
90 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") 163 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory")
91 #elif defined(__ARM_ARCH_6__ ) || defined(__ARM_ARCH_6J__ ) \ 164 #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \
92 || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6ZK__) 165 || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__
93 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") 166 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory")
94 #elif defined(__ARM_ARCH_7__ ) || defined(__ARM_ARCH_7A__ ) \ 167 #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \
95 || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7R__ ) 168 || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__
96 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") 169 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory")
170 #elif __aarch64__
171 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory")
97 #elif __sparc || __sparc__ 172 #elif (__sparc || __sparc__) && !__sparcv8
98 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad | " : : : "memory") 173 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory")
99 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory") 174 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory")
100 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore") 175 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore")
101 #elif defined(__s390__) || defined(__s390x__) 176 #elif defined __s390__ || defined __s390x__
102 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory") 177 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory")
103 #elif defined(__mips__) 178 #elif defined __mips__
179 /* GNU/Linux emulates sync on mips1 architectures, so we force its use */
180 /* anybody else who still uses mips1 is supposed to send in their version, with detection code. */
181 #define ECB_MEMORY_FENCE __asm__ __volatile__ (".set mips2; sync; .set mips0" : : : "memory")
182 #elif defined __alpha__
104 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") 183 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory")
105 #elif __xlC__ 184 #elif defined __hppa__
106 #define ECB_MEMORY_FENCE __lwsync () 185 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
186 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
187 #elif defined __ia64__
188 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory")
189 #elif defined __m68k__
190 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
191 #elif defined __m88k__
192 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("tb1 0,%%r0,128" : : : "memory")
193 #elif defined __sh__
194 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
107 #endif 195 #endif
108 #endif 196 #endif
109#endif 197#endif
110 198
111#ifndef ECB_MEMORY_FENCE 199#ifndef ECB_MEMORY_FENCE
200 #if ECB_GCC_VERSION(4,7)
201 /* see comment below (stdatomic.h) about the C11 memory model. */
202 #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST)
203 #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE)
204 #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE)
205
206 #elif ECB_CLANG_EXTENSION(c_atomic)
207 /* see comment below (stdatomic.h) about the C11 memory model. */
208 #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST)
209 #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE)
210 #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE)
211
112 #if ECB_GCC_VERSION(4,4) || defined(__INTEL_COMPILER) || defined(__clang__) 212 #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__
113 #define ECB_MEMORY_FENCE __sync_synchronize () 213 #define ECB_MEMORY_FENCE __sync_synchronize ()
114 /*#define ECB_MEMORY_FENCE_ACQUIRE ({ char dummy = 0; __sync_lock_test_and_set (&dummy, 1); }) */ 214 #elif _MSC_VER >= 1500 /* VC++ 2008 */
115 /*#define ECB_MEMORY_FENCE_RELEASE ({ char dummy = 1; __sync_lock_release (&dummy ); }) */ 215 /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */
216 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
217 #define ECB_MEMORY_FENCE _ReadWriteBarrier (); MemoryBarrier()
218 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier (); MemoryBarrier() /* according to msdn, _ReadBarrier is not a load fence */
219 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier (); MemoryBarrier()
116 #elif _MSC_VER >= 1400 /* VC++ 2005 */ 220 #elif _MSC_VER >= 1400 /* VC++ 2005 */
117 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) 221 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
118 #define ECB_MEMORY_FENCE _ReadWriteBarrier () 222 #define ECB_MEMORY_FENCE _ReadWriteBarrier ()
119 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */ 223 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */
120 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier () 224 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier ()
121 #elif defined(_WIN32) 225 #elif defined _WIN32
122 #include <WinNT.h> 226 #include <WinNT.h>
123 #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */ 227 #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */
124 #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 228 #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
125 #include <mbarrier.h> 229 #include <mbarrier.h>
126 #define ECB_MEMORY_FENCE __machine_rw_barrier () 230 #define ECB_MEMORY_FENCE __machine_rw_barrier ()
127 #define ECB_MEMORY_FENCE_ACQUIRE __machine_r_barrier () 231 #define ECB_MEMORY_FENCE_ACQUIRE __machine_r_barrier ()
128 #define ECB_MEMORY_FENCE_RELEASE __machine_w_barrier () 232 #define ECB_MEMORY_FENCE_RELEASE __machine_w_barrier ()
233 #elif __xlC__
234 #define ECB_MEMORY_FENCE __sync ()
235 #endif
236#endif
237
238#ifndef ECB_MEMORY_FENCE
239 #if ECB_C11 && !defined __STDC_NO_ATOMICS__
240 /* we assume that these memory fences work on all variables/all memory accesses, */
241 /* not just C11 atomics and atomic accesses */
242 #include <stdatomic.h>
243 /* Unfortunately, neither gcc 4.7 nor clang 3.1 generate any instructions for */
244 /* any fence other than seq_cst, which isn't very efficient for us. */
245 /* Why that is, we don't know - either the C11 memory model is quite useless */
246 /* for most usages, or gcc and clang have a bug */
247 /* I *currently* lean towards the latter, and inefficiently implement */
248 /* all three of ecb's fences as a seq_cst fence */
249 /* Update, gcc-4.8 generates mfence for all c++ fences, but nothing */
250 /* for all __atomic_thread_fence's except seq_cst */
251 #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst)
129 #endif 252 #endif
130#endif 253#endif
131 254
132#ifndef ECB_MEMORY_FENCE 255#ifndef ECB_MEMORY_FENCE
133 #if !ECB_AVOID_PTHREADS 256 #if !ECB_AVOID_PTHREADS
145 static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER; 268 static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER;
146 #define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0) 269 #define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0)
147 #endif 270 #endif
148#endif 271#endif
149 272
150#if !defined(ECB_MEMORY_FENCE_ACQUIRE) && defined(ECB_MEMORY_FENCE) 273#if !defined ECB_MEMORY_FENCE_ACQUIRE && defined ECB_MEMORY_FENCE
151 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE 274 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
152#endif 275#endif
153 276
154#if !defined(ECB_MEMORY_FENCE_RELEASE) && defined(ECB_MEMORY_FENCE) 277#if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE
155 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE 278 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
156#endif 279#endif
157 280
158/*****************************************************************************/ 281/*****************************************************************************/
159
160#define ECB_C99 (__STDC_VERSION__ >= 199901L)
161 282
162#if __cplusplus 283#if __cplusplus
163 #define ecb_inline static inline 284 #define ecb_inline static inline
164#elif ECB_GCC_VERSION(2,5) 285#elif ECB_GCC_VERSION(2,5)
165 #define ecb_inline static __inline__ 286 #define ecb_inline static __inline__
184#define ECB_STRINGIFY_(a) # a 305#define ECB_STRINGIFY_(a) # a
185#define ECB_STRINGIFY(a) ECB_STRINGIFY_(a) 306#define ECB_STRINGIFY(a) ECB_STRINGIFY_(a)
186 307
187#define ecb_function_ ecb_inline 308#define ecb_function_ ecb_inline
188 309
189#if ECB_GCC_VERSION(3,1) 310#if ECB_GCC_VERSION(3,1) || ECB_CLANG_VERSION(2,8)
190 #define ecb_attribute(attrlist) __attribute__(attrlist) 311 #define ecb_attribute(attrlist) __attribute__ (attrlist)
312#else
313 #define ecb_attribute(attrlist)
314#endif
315
316#if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_constant_p)
191 #define ecb_is_constant(expr) __builtin_constant_p (expr) 317 #define ecb_is_constant(expr) __builtin_constant_p (expr)
318#else
319 /* possible C11 impl for integral types
320 typedef struct ecb_is_constant_struct ecb_is_constant_struct;
321 #define ecb_is_constant(expr) _Generic ((1 ? (struct ecb_is_constant_struct *)0 : (void *)((expr) - (expr)), ecb_is_constant_struct *: 0, default: 1)) */
322
323 #define ecb_is_constant(expr) 0
324#endif
325
326#if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_expect)
192 #define ecb_expect(expr,value) __builtin_expect ((expr),(value)) 327 #define ecb_expect(expr,value) __builtin_expect ((expr),(value))
328#else
329 #define ecb_expect(expr,value) (expr)
330#endif
331
332#if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_prefetch)
193 #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality) 333 #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality)
194#else 334#else
195 #define ecb_attribute(attrlist)
196 #define ecb_is_constant(expr) 0
197 #define ecb_expect(expr,value) (expr)
198 #define ecb_prefetch(addr,rw,locality) 335 #define ecb_prefetch(addr,rw,locality)
199#endif 336#endif
200 337
201/* no emulation for ecb_decltype */ 338/* no emulation for ecb_decltype */
202#if ECB_GCC_VERSION(4,5) 339#if ECB_CPP11
203 #define ecb_decltype(x) __decltype(x) 340 #define ecb_decltype(x) decltype (x)
204#elif ECB_GCC_VERSION(3,0) 341#elif ECB_GCC_VERSION(3,0) || ECB_CLANG_VERSION(2,8)
205 #define ecb_decltype(x) __typeof(x) 342 #define ecb_decltype(x) __typeof__ (x)
343#endif
344
345#if _MSC_VER >= 1300
346 #define ecb_deprecated __declspec(deprecated)
347#else
348 #define ecb_deprecated ecb_attribute ((__deprecated__))
206#endif 349#endif
207 350
208#define ecb_noinline ecb_attribute ((__noinline__)) 351#define ecb_noinline ecb_attribute ((__noinline__))
209#define ecb_noreturn ecb_attribute ((__noreturn__))
210#define ecb_unused ecb_attribute ((__unused__)) 352#define ecb_unused ecb_attribute ((__unused__))
211#define ecb_const ecb_attribute ((__const__)) 353#define ecb_const ecb_attribute ((__const__))
212#define ecb_pure ecb_attribute ((__pure__)) 354#define ecb_pure ecb_attribute ((__pure__))
355
356/* http://msdn.microsoft.com/en-us/library/k6ktzx3s.aspx __declspec(noreturn) */
357#if ECB_C11
358 #define ecb_noreturn _Noreturn
359#else
360 #define ecb_noreturn ecb_attribute ((__noreturn__))
361#endif
213 362
214#if ECB_GCC_VERSION(4,3) 363#if ECB_GCC_VERSION(4,3)
215 #define ecb_artificial ecb_attribute ((__artificial__)) 364 #define ecb_artificial ecb_attribute ((__artificial__))
216 #define ecb_hot ecb_attribute ((__hot__)) 365 #define ecb_hot ecb_attribute ((__hot__))
217 #define ecb_cold ecb_attribute ((__cold__)) 366 #define ecb_cold ecb_attribute ((__cold__))
229/* for compatibility to the rest of the world */ 378/* for compatibility to the rest of the world */
230#define ecb_likely(expr) ecb_expect_true (expr) 379#define ecb_likely(expr) ecb_expect_true (expr)
231#define ecb_unlikely(expr) ecb_expect_false (expr) 380#define ecb_unlikely(expr) ecb_expect_false (expr)
232 381
233/* count trailing zero bits and count # of one bits */ 382/* count trailing zero bits and count # of one bits */
234#if ECB_GCC_VERSION(3,4) 383#if ECB_GCC_VERSION(3,4) \
384 || (ECB_CLANG_BUILTIN(__builtin_clz) && ECB_CLANG_BUILTIN(__builtin_clzll) \
385 && ECB_CLANG_BUILTIN(__builtin_ctz) && ECB_CLANG_BUILTIN(__builtin_ctzll) \
386 && ECB_CLANG_BUILTIN(__builtin_popcount))
235 /* we assume int == 32 bit, long == 32 or 64 bit and long long == 64 bit */ 387 /* we assume int == 32 bit, long == 32 or 64 bit and long long == 64 bit */
236 #define ecb_ld32(x) (__builtin_clz (x) ^ 31) 388 #define ecb_ld32(x) (__builtin_clz (x) ^ 31)
237 #define ecb_ld64(x) (__builtin_clzll (x) ^ 63) 389 #define ecb_ld64(x) (__builtin_clzll (x) ^ 63)
238 #define ecb_ctz32(x) __builtin_ctz (x) 390 #define ecb_ctz32(x) __builtin_ctz (x)
239 #define ecb_ctz64(x) __builtin_ctzll (x) 391 #define ecb_ctz64(x) __builtin_ctzll (x)
307 if (x >> 32) { x >>= 32; r += 32; } 459 if (x >> 32) { x >>= 32; r += 32; }
308 460
309 return r + ecb_ld32 (x); 461 return r + ecb_ld32 (x);
310 } 462 }
311#endif 463#endif
464
465ecb_function_ ecb_bool ecb_is_pot32 (uint32_t x) ecb_const;
466ecb_function_ ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); }
467ecb_function_ ecb_bool ecb_is_pot64 (uint64_t x) ecb_const;
468ecb_function_ ecb_bool ecb_is_pot64 (uint64_t x) { return !(x & (x - 1)); }
312 469
313ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) ecb_const; 470ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) ecb_const;
314ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) 471ecb_function_ uint8_t ecb_bitrev8 (uint8_t x)
315{ 472{
316 return ( (x * 0x0802U & 0x22110U) 473 return ( (x * 0x0802U & 0x22110U)
365ecb_inline uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); } 522ecb_inline uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); }
366ecb_inline uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); } 523ecb_inline uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); }
367ecb_inline uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); } 524ecb_inline uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); }
368ecb_inline uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); } 525ecb_inline uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); }
369 526
370#if ECB_GCC_VERSION(4,3) 527#if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64))
371 #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16) 528 #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16)
372 #define ecb_bswap32(x) __builtin_bswap32 (x) 529 #define ecb_bswap32(x) __builtin_bswap32 (x)
373 #define ecb_bswap64(x) __builtin_bswap64 (x) 530 #define ecb_bswap64(x) __builtin_bswap64 (x)
374#else 531#else
375 ecb_function_ uint16_t ecb_bswap16 (uint16_t x) ecb_const; 532 ecb_function_ uint16_t ecb_bswap16 (uint16_t x) ecb_const;
392 { 549 {
393 return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32); 550 return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32);
394 } 551 }
395#endif 552#endif
396 553
397#if ECB_GCC_VERSION(4,5) 554#if ECB_GCC_VERSION(4,5) || ECB_CLANG_BUILTIN(__builtin_unreachable)
398 #define ecb_unreachable() __builtin_unreachable () 555 #define ecb_unreachable() __builtin_unreachable ()
399#else 556#else
400 /* this seems to work fine, but gcc always emits a warning for it :/ */ 557 /* this seems to work fine, but gcc always emits a warning for it :/ */
401 ecb_inline void ecb_unreachable (void) ecb_noreturn; 558 ecb_inline void ecb_unreachable (void) ecb_noreturn;
402 ecb_inline void ecb_unreachable (void) { } 559 ecb_inline void ecb_unreachable (void) { }
403#endif 560#endif
404 561
405/* try to tell the compiler that some condition is definitely true */ 562/* try to tell the compiler that some condition is definitely true */
406#define ecb_assume(cond) do { if (!(cond)) ecb_unreachable (); } while (0) 563#define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0
407 564
408ecb_inline unsigned char ecb_byteorder_helper (void) ecb_const; 565ecb_inline unsigned char ecb_byteorder_helper (void) ecb_const;
409ecb_inline unsigned char 566ecb_inline unsigned char
410ecb_byteorder_helper (void) 567ecb_byteorder_helper (void)
411{ 568{
412 const uint32_t u = 0x11223344; 569 /* the union code still generates code under pressure in gcc, */
413 return *(unsigned char *)&u; 570 /* but less than using pointers, and always seems to */
571 /* successfully return a constant. */
572 /* the reason why we have this horrible preprocessor mess */
573 /* is to avoid it in all cases, at least on common architectures */
574 /* or when using a recent enough gcc version (>= 4.6) */
575#if __i386 || __i386__ || _M_X86 || __amd64 || __amd64__ || _M_X64
576 return 0x44;
577#elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
578 return 0x44;
579#elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
580 return 0x11;
581#else
582 union
583 {
584 uint32_t i;
585 uint8_t c;
586 } u = { 0x11223344 };
587 return u.c;
588#endif
414} 589}
415 590
416ecb_inline ecb_bool ecb_big_endian (void) ecb_const; 591ecb_inline ecb_bool ecb_big_endian (void) ecb_const;
417ecb_inline ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; } 592ecb_inline ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; }
418ecb_inline ecb_bool ecb_little_endian (void) ecb_const; 593ecb_inline ecb_bool ecb_little_endian (void) ecb_const;
449 } 624 }
450#else 625#else
451 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) 626 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0]))
452#endif 627#endif
453 628
629/*******************************************************************************/
630/* floating point stuff, can be disabled by defining ECB_NO_LIBM */
631
632/* basically, everything uses "ieee pure-endian" floating point numbers */
633/* the only noteworthy exception is ancient armle, which uses order 43218765 */
634#if 0 \
635 || __i386 || __i386__ \
636 || __amd64 || __amd64__ || __x86_64 || __x86_64__ \
637 || __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \
638 || defined __s390__ || defined __s390x__ \
639 || defined __mips__ \
640 || defined __alpha__ \
641 || defined __hppa__ \
642 || defined __ia64__ \
643 || defined __m68k__ \
644 || defined __m88k__ \
645 || defined __sh__ \
646 || defined _M_IX86 || defined _M_AMD64 || defined _M_IA64 \
647 || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \
648 || defined __aarch64__
649 #define ECB_STDFP 1
650 #include <string.h> /* for memcpy */
651#else
652 #define ECB_STDFP 0
653#endif
654
655#ifndef ECB_NO_LIBM
656
657 #include <math.h> /* for frexp*, ldexp*, INFINITY, NAN */
658
659 /* only the oldest of old doesn't have this one. solaris. */
660 #ifdef INFINITY
661 #define ECB_INFINITY INFINITY
662 #else
663 #define ECB_INFINITY HUGE_VAL
454#endif 664 #endif
455 665
666 #ifdef NAN
667 #define ECB_NAN NAN
668 #else
669 #define ECB_NAN ECB_INFINITY
670 #endif
671
672 /* converts an ieee half/binary16 to a float */
673 ecb_function_ float ecb_binary16_to_float (uint16_t x) ecb_const;
674 ecb_function_ float
675 ecb_binary16_to_float (uint16_t x)
676 {
677 int e = (x >> 10) & 0x1f;
678 int m = x & 0x3ff;
679 float r;
680
681 if (!e ) r = ldexpf (m , -24);
682 else if (e != 31) r = ldexpf (m + 0x400, e - 25);
683 else if (m ) r = ECB_NAN;
684 else r = ECB_INFINITY;
685
686 return x & 0x8000 ? -r : r;
687 }
688
689 /* convert a float to ieee single/binary32 */
690 ecb_function_ uint32_t ecb_float_to_binary32 (float x) ecb_const;
691 ecb_function_ uint32_t
692 ecb_float_to_binary32 (float x)
693 {
694 uint32_t r;
695
696 #if ECB_STDFP
697 memcpy (&r, &x, 4);
698 #else
699 /* slow emulation, works for anything but -0 */
700 uint32_t m;
701 int e;
702
703 if (x == 0e0f ) return 0x00000000U;
704 if (x > +3.40282346638528860e+38f) return 0x7f800000U;
705 if (x < -3.40282346638528860e+38f) return 0xff800000U;
706 if (x != x ) return 0x7fbfffffU;
707
708 m = frexpf (x, &e) * 0x1000000U;
709
710 r = m & 0x80000000U;
711
712 if (r)
713 m = -m;
714
715 if (e <= -126)
716 {
717 m &= 0xffffffU;
718 m >>= (-125 - e);
719 e = -126;
720 }
721
722 r |= (e + 126) << 23;
723 r |= m & 0x7fffffU;
724 #endif
725
726 return r;
727 }
728
729 /* converts an ieee single/binary32 to a float */
730 ecb_function_ float ecb_binary32_to_float (uint32_t x) ecb_const;
731 ecb_function_ float
732 ecb_binary32_to_float (uint32_t x)
733 {
734 float r;
735
736 #if ECB_STDFP
737 memcpy (&r, &x, 4);
738 #else
739 /* emulation, only works for normals and subnormals and +0 */
740 int neg = x >> 31;
741 int e = (x >> 23) & 0xffU;
742
743 x &= 0x7fffffU;
744
745 if (e)
746 x |= 0x800000U;
747 else
748 e = 1;
749
750 /* we distrust ldexpf a bit and do the 2**-24 scaling by an extra multiply */
751 r = ldexpf (x * (0.5f / 0x800000U), e - 126);
752
753 r = neg ? -r : r;
754 #endif
755
756 return r;
757 }
758
759 /* convert a double to ieee double/binary64 */
760 ecb_function_ uint64_t ecb_double_to_binary64 (double x) ecb_const;
761 ecb_function_ uint64_t
762 ecb_double_to_binary64 (double x)
763 {
764 uint64_t r;
765
766 #if ECB_STDFP
767 memcpy (&r, &x, 8);
768 #else
769 /* slow emulation, works for anything but -0 */
770 uint64_t m;
771 int e;
772
773 if (x == 0e0 ) return 0x0000000000000000U;
774 if (x > +1.79769313486231470e+308) return 0x7ff0000000000000U;
775 if (x < -1.79769313486231470e+308) return 0xfff0000000000000U;
776 if (x != x ) return 0X7ff7ffffffffffffU;
777
778 m = frexp (x, &e) * 0x20000000000000U;
779
780 r = m & 0x8000000000000000;;
781
782 if (r)
783 m = -m;
784
785 if (e <= -1022)
786 {
787 m &= 0x1fffffffffffffU;
788 m >>= (-1021 - e);
789 e = -1022;
790 }
791
792 r |= ((uint64_t)(e + 1022)) << 52;
793 r |= m & 0xfffffffffffffU;
794 #endif
795
796 return r;
797 }
798
799 /* converts an ieee double/binary64 to a double */
800 ecb_function_ double ecb_binary64_to_double (uint64_t x) ecb_const;
801 ecb_function_ double
802 ecb_binary64_to_double (uint64_t x)
803 {
804 double r;
805
806 #if ECB_STDFP
807 memcpy (&r, &x, 8);
808 #else
809 /* emulation, only works for normals and subnormals and +0 */
810 int neg = x >> 63;
811 int e = (x >> 52) & 0x7ffU;
812
813 x &= 0xfffffffffffffU;
814
815 if (e)
816 x |= 0x10000000000000U;
817 else
818 e = 1;
819
820 /* we distrust ldexp a bit and do the 2**-53 scaling by an extra multiply */
821 r = ldexp (x * (0.5 / 0x10000000000000U), e - 1022);
822
823 r = neg ? -r : r;
824 #endif
825
826 return r;
827 }
828
829#endif
830
831#endif
832

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines