ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libecb/ecb.h
(Generate patch)

Comparing libecb/ecb.h (file contents):
Revision 1.90 by root, Tue May 29 14:09:49 2012 UTC vs.
Revision 1.139 by root, Tue Oct 14 14:39:06 2014 UTC

1/* 1/*
2 * libecb - http://software.schmorp.de/pkg/libecb 2 * libecb - http://software.schmorp.de/pkg/libecb
3 * 3 *
4 * Copyright (©) 2009-2012 Marc Alexander Lehmann <libecb@schmorp.de> 4 * Copyright (©) 2009-2014 Marc Alexander Lehmann <libecb@schmorp.de>
5 * Copyright (©) 2011 Emanuele Giaquinta 5 * Copyright (©) 2011 Emanuele Giaquinta
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without modifica- 8 * Redistribution and use in source and binary forms, with or without modifica-
9 * tion, are permitted provided that the following conditions are met: 9 * tion, are permitted provided that the following conditions are met:
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- 25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
26 * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 26 * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
27 * OF THE POSSIBILITY OF SUCH DAMAGE. 27 * OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * Alternatively, the contents of this file may be used under the terms of
30 * the GNU General Public License ("GPL") version 2 or any later version,
31 * in which case the provisions of the GPL are applicable instead of
32 * the above. If you wish to allow the use of your version of this file
33 * only under the terms of the GPL and not to allow others to use your
34 * version of this file under the BSD license, indicate your decision
35 * by deleting the provisions above and replace them with the notice
36 * and other provisions required by the GPL. If you do not delete the
37 * provisions above, a recipient may use your version of this file under
38 * either the BSD or the GPL.
28 */ 39 */
29 40
30#ifndef ECB_H 41#ifndef ECB_H
31#define ECB_H 42#define ECB_H
32 43
33/* 16 bits major, 16 bits minor */ 44/* 16 bits major, 16 bits minor */
34#define ECB_VERSION 0x00010001 45#define ECB_VERSION 0x00010003
35 46
36#ifdef _WIN32 47#ifdef _WIN32
37 typedef signed char int8_t; 48 typedef signed char int8_t;
38 typedef unsigned char uint8_t; 49 typedef unsigned char uint8_t;
39 typedef signed short int16_t; 50 typedef signed short int16_t;
54 #else 65 #else
55 #define ECB_PTRSIZE 4 66 #define ECB_PTRSIZE 4
56 typedef uint32_t uintptr_t; 67 typedef uint32_t uintptr_t;
57 typedef int32_t intptr_t; 68 typedef int32_t intptr_t;
58 #endif 69 #endif
59 typedef intptr_t ptrdiff_t;
60#else 70#else
61 #include <inttypes.h> 71 #include <inttypes.h>
62 #if UINTMAX_MAX > 0xffffffffU 72 #if UINTMAX_MAX > 0xffffffffU
63 #define ECB_PTRSIZE 8 73 #define ECB_PTRSIZE 8
64 #else 74 #else
65 #define ECB_PTRSIZE 4 75 #define ECB_PTRSIZE 4
76 #endif
77#endif
78
79/* work around x32 idiocy by defining proper macros */
80#if __amd64 || __x86_64 || _M_AMD64 || _M_X64
81 #if _ILP32
82 #define ECB_AMD64_X32 1
83 #else
84 #define ECB_AMD64 1
66 #endif 85 #endif
67#endif 86#endif
68 87
69/* many compilers define _GNUC_ to some versions but then only implement 88/* many compilers define _GNUC_ to some versions but then only implement
70 * what their idiot authors think are the "more important" extensions, 89 * what their idiot authors think are the "more important" extensions,
71 * causing enormous grief in return for some better fake benchmark numbers. 90 * causing enormous grief in return for some better fake benchmark numbers.
72 * or so. 91 * or so.
73 * we try to detect these and simply assume they are not gcc - if they have 92 * we try to detect these and simply assume they are not gcc - if they have
74 * an issue with that they should have done it right in the first place. 93 * an issue with that they should have done it right in the first place.
75 */ 94 */
76#ifndef ECB_GCC_VERSION
77 #if !defined __GNUC_MINOR__ || defined __INTEL_COMPILER || defined __SUNPRO_C || defined __SUNPRO_CC || defined __llvm__ || defined __clang__ 95#if !defined __GNUC_MINOR__ || defined __INTEL_COMPILER || defined __SUNPRO_C || defined __SUNPRO_CC || defined __llvm__ || defined __clang__
78 #define ECB_GCC_VERSION(major,minor) 0 96 #define ECB_GCC_VERSION(major,minor) 0
79 #else 97#else
80 #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) 98 #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))
81 #endif 99#endif
82#endif
83 100
101#define ECB_CLANG_VERSION(major,minor) (__clang_major__ > (major) || (__clang_major__ == (major) && __clang_minor__ >= (minor)))
102
103#if __clang__ && defined(__has_builtin)
104 #define ECB_CLANG_BUILTIN(x) __has_builtin(x)
105#else
106 #define ECB_CLANG_BUILTIN(x) 0
107#endif
108
109#define ECB_CPP (__cplusplus+0)
110#define ECB_CPP11 (__cplusplus >= 201103L)
111
112#if ECB_CPP
113 #define ECB_C 0
114 #define ECB_STDC_VERSION 0
115#else
116 #define ECB_C 1
117 #define ECB_STDC_VERSION __STDC_VERSION__
118#endif
119
84#define ECB_C99 (__STDC_VERSION__ >= 199901L) 120#define ECB_C99 (ECB_STDC_VERSION >= 199901L)
85#define ECB_C11 (__STDC_VERSION__ >= 201112L) 121#define ECB_C11 (ECB_STDC_VERSION >= 201112L)
122
123#if ECB_CPP
124 #define ECB_EXTERN_C extern "C"
125 #define ECB_EXTERN_C_BEG ECB_EXTERN_C {
126 #define ECB_EXTERN_C_END }
127#else
128 #define ECB_EXTERN_C extern
129 #define ECB_EXTERN_C_BEG
130 #define ECB_EXTERN_C_END
131#endif
86 132
87/*****************************************************************************/ 133/*****************************************************************************/
88 134
89/* ECB_NO_THREADS - ecb is not used by multiple threads, ever */ 135/* ECB_NO_THREADS - ecb is not used by multiple threads, ever */
90/* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */ 136/* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */
91 137
92#if ECB_NO_THREADS 138#if ECB_NO_THREADS
93# define ECB_NO_SMP 1 139 #define ECB_NO_SMP 1
94#endif 140#endif
95 141
96#if ECB_NO_THREADS || ECB_NO_SMP 142#if ECB_NO_SMP
97 #define ECB_MEMORY_FENCE do { } while (0) 143 #define ECB_MEMORY_FENCE do { } while (0)
98#endif
99
100#ifndef ECB_MEMORY_FENCE
101 #if ECB_C11 && !defined __STDC_NO_ATOMICS__
102 /* we assume that these memory fences work on all variables/all memory accesses, */
103 /* not just C11 atomics and atomic accesses */
104 #include <stdatomic.h>
105 #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_acq_rel)
106 #define ECB_MEMORY_FENCE_ACQUIRE atomic_thread_fence (memory_order_acquire)
107 #define ECB_MEMORY_FENCE_RELEASE atomic_thread_fence (memory_order_release)
108 #endif
109#endif 144#endif
110 145
111#ifndef ECB_MEMORY_FENCE 146#ifndef ECB_MEMORY_FENCE
112 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 147 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
113 #if __i386 || __i386__ 148 #if __i386 || __i386__
114 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") 149 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
115 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE /* non-lock xchg might be enough */ 150 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
116 #define ECB_MEMORY_FENCE_RELEASE do { } while (0) /* unlikely to change in future cpus */ 151 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
117 #elif __amd64 || __amd64__ || __x86_64 || __x86_64__ 152 #elif __amd64 || __amd64__ || __x86_64 || __x86_64__
118 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") 153 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory")
119 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("lfence" : : : "memory") 154 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
120 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("sfence") /* play safe - not needed in any current cpu */ 155 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
121 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ 156 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__
122 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") 157 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory")
123 #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \ 158 #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \
124 || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ 159 || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__
125 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") 160 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory")
126 #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ 161 #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \
127 || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__ 162 || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__
128 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") 163 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory")
164 #elif __aarch64__
165 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory")
129 #elif __sparc || __sparc__ 166 #elif (__sparc || __sparc__) && !__sparcv8
130 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad | " : : : "memory") 167 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory")
131 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory") 168 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory")
132 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore") 169 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore")
133 #elif defined __s390__ || defined __s390x__ 170 #elif defined __s390__ || defined __s390x__
134 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory") 171 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory")
135 #elif defined __mips__ 172 #elif defined __mips__
173 /* GNU/Linux emulates sync on mips1 architectures, so we force its use */
174 /* anybody else who still uses mips1 is supposed to send in their version, with detection code. */
136 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") 175 #define ECB_MEMORY_FENCE __asm__ __volatile__ (".set mips2; sync; .set mips0" : : : "memory")
137 #elif defined __alpha__ 176 #elif defined __alpha__
138 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory") 177 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory")
178 #elif defined __hppa__
179 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
180 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
181 #elif defined __ia64__
182 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory")
183 #elif defined __m68k__
184 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
185 #elif defined __m88k__
186 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("tb1 0,%%r0,128" : : : "memory")
187 #elif defined __sh__
188 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
139 #endif 189 #endif
140 #endif 190 #endif
141#endif 191#endif
142 192
143#ifndef ECB_MEMORY_FENCE 193#ifndef ECB_MEMORY_FENCE
194 #if ECB_GCC_VERSION(4,7)
195 /* see comment below (stdatomic.h) about the C11 memory model. */
196 #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST)
197 #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE)
198 #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE)
199
200 /* The __has_feature syntax from clang is so misdesigned that we cannot use it
201 * without risking compile time errors with other compilers. We *could*
202 * define our own ecb_clang_has_feature, but I just can't be bothered to work
203 * around this shit time and again.
204 * #elif defined __clang && __has_feature (cxx_atomic)
205 * // see comment below (stdatomic.h) about the C11 memory model.
206 * #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST)
207 * #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE)
208 * #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE)
209 */
210
144 #if ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ 211 #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__
145 #define ECB_MEMORY_FENCE __sync_synchronize () 212 #define ECB_MEMORY_FENCE __sync_synchronize ()
146 /*#define ECB_MEMORY_FENCE_ACQUIRE ({ char dummy = 0; __sync_lock_test_and_set (&dummy, 1); }) */ 213 #elif _MSC_VER >= 1500 /* VC++ 2008 */
147 /*#define ECB_MEMORY_FENCE_RELEASE ({ char dummy = 1; __sync_lock_release (&dummy ); }) */ 214 /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */
215 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
216 #define ECB_MEMORY_FENCE _ReadWriteBarrier (); MemoryBarrier()
217 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier (); MemoryBarrier() /* according to msdn, _ReadBarrier is not a load fence */
218 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier (); MemoryBarrier()
148 #elif _MSC_VER >= 1400 /* VC++ 2005 */ 219 #elif _MSC_VER >= 1400 /* VC++ 2005 */
149 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) 220 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
150 #define ECB_MEMORY_FENCE _ReadWriteBarrier () 221 #define ECB_MEMORY_FENCE _ReadWriteBarrier ()
151 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */ 222 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */
152 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier () 223 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier ()
162 #define ECB_MEMORY_FENCE __sync () 233 #define ECB_MEMORY_FENCE __sync ()
163 #endif 234 #endif
164#endif 235#endif
165 236
166#ifndef ECB_MEMORY_FENCE 237#ifndef ECB_MEMORY_FENCE
238 #if ECB_C11 && !defined __STDC_NO_ATOMICS__
239 /* we assume that these memory fences work on all variables/all memory accesses, */
240 /* not just C11 atomics and atomic accesses */
241 #include <stdatomic.h>
242 /* Unfortunately, neither gcc 4.7 nor clang 3.1 generate any instructions for */
243 /* any fence other than seq_cst, which isn't very efficient for us. */
244 /* Why that is, we don't know - either the C11 memory model is quite useless */
245 /* for most usages, or gcc and clang have a bug */
246 /* I *currently* lean towards the latter, and inefficiently implement */
247 /* all three of ecb's fences as a seq_cst fence */
248 /* Update, gcc-4.8 generates mfence for all c++ fences, but nothing */
249 /* for all __atomic_thread_fence's except seq_cst */
250 #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst)
251 #endif
252#endif
253
254#ifndef ECB_MEMORY_FENCE
167 #if !ECB_AVOID_PTHREADS 255 #if !ECB_AVOID_PTHREADS
168 /* 256 /*
169 * if you get undefined symbol references to pthread_mutex_lock, 257 * if you get undefined symbol references to pthread_mutex_lock,
170 * or failure to find pthread.h, then you should implement 258 * or failure to find pthread.h, then you should implement
171 * the ECB_MEMORY_FENCE operations for your cpu/compiler 259 * the ECB_MEMORY_FENCE operations for your cpu/compiler
216#define ECB_STRINGIFY_(a) # a 304#define ECB_STRINGIFY_(a) # a
217#define ECB_STRINGIFY(a) ECB_STRINGIFY_(a) 305#define ECB_STRINGIFY(a) ECB_STRINGIFY_(a)
218 306
219#define ecb_function_ ecb_inline 307#define ecb_function_ ecb_inline
220 308
221#if ECB_GCC_VERSION(3,1) 309#if ECB_GCC_VERSION(3,1) || ECB_CLANG_VERSION(2,8)
222 #define ecb_attribute(attrlist) __attribute__(attrlist) 310 #define ecb_attribute(attrlist) __attribute__(attrlist)
311#else
312 #define ecb_attribute(attrlist)
313#endif
314
315#if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_constant_p)
223 #define ecb_is_constant(expr) __builtin_constant_p (expr) 316 #define ecb_is_constant(expr) __builtin_constant_p (expr)
317#else
318 /* possible C11 impl for integral types
319 typedef struct ecb_is_constant_struct ecb_is_constant_struct;
320 #define ecb_is_constant(expr) _Generic ((1 ? (struct ecb_is_constant_struct *)0 : (void *)((expr) - (expr)), ecb_is_constant_struct *: 0, default: 1)) */
321
322 #define ecb_is_constant(expr) 0
323#endif
324
325#if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_expect)
224 #define ecb_expect(expr,value) __builtin_expect ((expr),(value)) 326 #define ecb_expect(expr,value) __builtin_expect ((expr),(value))
327#else
328 #define ecb_expect(expr,value) (expr)
329#endif
330
331#if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_prefetch)
225 #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality) 332 #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality)
226#else 333#else
227 #define ecb_attribute(attrlist)
228 #define ecb_is_constant(expr) 0
229 #define ecb_expect(expr,value) (expr)
230 #define ecb_prefetch(addr,rw,locality) 334 #define ecb_prefetch(addr,rw,locality)
231#endif 335#endif
232 336
233/* no emulation for ecb_decltype */ 337/* no emulation for ecb_decltype */
234#if ECB_GCC_VERSION(4,5) 338#if ECB_GCC_VERSION(4,5)
235 #define ecb_decltype(x) __decltype(x) 339 #define ecb_decltype(x) __decltype(x)
236#elif ECB_GCC_VERSION(3,0) 340#elif ECB_GCC_VERSION(3,0)
237 #define ecb_decltype(x) __typeof(x) 341 #define ecb_decltype(x) __typeof(x)
238#endif 342#endif
239 343
344#if _MSC_VER >= 1300
345 #define ecb_deprecated __declspec(deprecated)
346#else
347 #define ecb_deprecated ecb_attribute ((__deprecated__))
348#endif
349
240#define ecb_noinline ecb_attribute ((__noinline__)) 350#define ecb_noinline ecb_attribute ((__noinline__))
241#define ecb_unused ecb_attribute ((__unused__)) 351#define ecb_unused ecb_attribute ((__unused__))
242#define ecb_const ecb_attribute ((__const__)) 352#define ecb_const ecb_attribute ((__const__))
243#define ecb_pure ecb_attribute ((__pure__)) 353#define ecb_pure ecb_attribute ((__pure__))
244 354
355/* http://msdn.microsoft.com/en-us/library/k6ktzx3s.aspx __declspec(noreturn) */
245#if ECB_C11 356#if ECB_C11
246 #define ecb_noreturn _Noreturn 357 #define ecb_noreturn _Noreturn
247#else 358#else
248 #define ecb_noreturn ecb_attribute ((__noreturn__)) 359 #define ecb_noreturn ecb_attribute ((__noreturn__))
249#endif 360#endif
266/* for compatibility to the rest of the world */ 377/* for compatibility to the rest of the world */
267#define ecb_likely(expr) ecb_expect_true (expr) 378#define ecb_likely(expr) ecb_expect_true (expr)
268#define ecb_unlikely(expr) ecb_expect_false (expr) 379#define ecb_unlikely(expr) ecb_expect_false (expr)
269 380
270/* count trailing zero bits and count # of one bits */ 381/* count trailing zero bits and count # of one bits */
271#if ECB_GCC_VERSION(3,4) 382#if ECB_GCC_VERSION(3,4) \
383 || (ECB_CLANG_BUILTIN(__builtin_clz) && ECB_CLANG_BUILTIN(__builtin_clzll) \
384 && ECB_CLANG_BUILTIN(__builtin_ctz) && ECB_CLANG_BUILTIN(__builtin_ctzll) \
385 && ECB_CLANG_BUILTIN(__builtin_popcount))
272 /* we assume int == 32 bit, long == 32 or 64 bit and long long == 64 bit */ 386 /* we assume int == 32 bit, long == 32 or 64 bit and long long == 64 bit */
273 #define ecb_ld32(x) (__builtin_clz (x) ^ 31) 387 #define ecb_ld32(x) (__builtin_clz (x) ^ 31)
274 #define ecb_ld64(x) (__builtin_clzll (x) ^ 63) 388 #define ecb_ld64(x) (__builtin_clzll (x) ^ 63)
275 #define ecb_ctz32(x) __builtin_ctz (x) 389 #define ecb_ctz32(x) __builtin_ctz (x)
276 #define ecb_ctz64(x) __builtin_ctzll (x) 390 #define ecb_ctz64(x) __builtin_ctzll (x)
407ecb_inline uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); } 521ecb_inline uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); }
408ecb_inline uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); } 522ecb_inline uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); }
409ecb_inline uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); } 523ecb_inline uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); }
410ecb_inline uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); } 524ecb_inline uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); }
411 525
412#if ECB_GCC_VERSION(4,3) 526#if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64))
413 #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16) 527 #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16)
414 #define ecb_bswap32(x) __builtin_bswap32 (x) 528 #define ecb_bswap32(x) __builtin_bswap32 (x)
415 #define ecb_bswap64(x) __builtin_bswap64 (x) 529 #define ecb_bswap64(x) __builtin_bswap64 (x)
416#else 530#else
417 ecb_function_ uint16_t ecb_bswap16 (uint16_t x) ecb_const; 531 ecb_function_ uint16_t ecb_bswap16 (uint16_t x) ecb_const;
434 { 548 {
435 return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32); 549 return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32);
436 } 550 }
437#endif 551#endif
438 552
439#if ECB_GCC_VERSION(4,5) 553#if ECB_GCC_VERSION(4,5) || ECB_CLANG_BUILTIN(__builtin_unreachable)
440 #define ecb_unreachable() __builtin_unreachable () 554 #define ecb_unreachable() __builtin_unreachable ()
441#else 555#else
442 /* this seems to work fine, but gcc always emits a warning for it :/ */ 556 /* this seems to work fine, but gcc always emits a warning for it :/ */
443 ecb_inline void ecb_unreachable (void) ecb_noreturn; 557 ecb_inline void ecb_unreachable (void) ecb_noreturn;
444 ecb_inline void ecb_unreachable (void) { } 558 ecb_inline void ecb_unreachable (void) { }
445#endif 559#endif
446 560
447/* try to tell the compiler that some condition is definitely true */ 561/* try to tell the compiler that some condition is definitely true */
448#define ecb_assume(cond) do { if (!(cond)) ecb_unreachable (); } while (0) 562#define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0
449 563
450ecb_inline unsigned char ecb_byteorder_helper (void) ecb_const; 564ecb_inline unsigned char ecb_byteorder_helper (void) ecb_const;
451ecb_inline unsigned char 565ecb_inline unsigned char
452ecb_byteorder_helper (void) 566ecb_byteorder_helper (void)
453{ 567{
454 const uint32_t u = 0x11223344; 568 /* the union code still generates code under pressure in gcc, */
455 return *(unsigned char *)&u; 569 /* but less than using pointers, and always seems to */
570 /* successfully return a constant. */
571 /* the reason why we have this horrible preprocessor mess */
572 /* is to avoid it in all cases, at least on common architectures */
573 /* or when using a recent enough gcc version (>= 4.6) */
574#if __i386 || __i386__ || _M_X86 || __amd64 || __amd64__ || _M_X64
575 return 0x44;
576#elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
577 return 0x44;
578#elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
579 return 0x11;
580#else
581 union
582 {
583 uint32_t i;
584 uint8_t c;
585 } u = { 0x11223344 };
586 return u.c;
587#endif
456} 588}
457 589
458ecb_inline ecb_bool ecb_big_endian (void) ecb_const; 590ecb_inline ecb_bool ecb_big_endian (void) ecb_const;
459ecb_inline ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; } 591ecb_inline ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; }
460ecb_inline ecb_bool ecb_little_endian (void) ecb_const; 592ecb_inline ecb_bool ecb_little_endian (void) ecb_const;
491 } 623 }
492#else 624#else
493 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) 625 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0]))
494#endif 626#endif
495 627
628/*******************************************************************************/
629/* floating point stuff, can be disabled by defining ECB_NO_LIBM */
630
631/* basically, everything uses "ieee pure-endian" floating point numbers */
632/* the only noteworthy exception is ancient armle, which uses order 43218765 */
633#if 0 \
634 || __i386 || __i386__ \
635 || __amd64 || __amd64__ || __x86_64 || __x86_64__ \
636 || __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \
637 || defined __s390__ || defined __s390x__ \
638 || defined __mips__ \
639 || defined __alpha__ \
640 || defined __hppa__ \
641 || defined __ia64__ \
642 || defined __m68k__ \
643 || defined __m88k__ \
644 || defined __sh__ \
645 || defined _M_IX86 || defined _M_AMD64 || defined _M_IA64 \
646 || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \
647 || defined __aarch64__
648 #define ECB_STDFP 1
649 #include <string.h> /* for memcpy */
650#else
651 #define ECB_STDFP 0
652#endif
653
654#ifndef ECB_NO_LIBM
655
656 #include <math.h> /* for frexp*, ldexp*, INFINITY, NAN */
657
658 /* only the oldest of old doesn't have this one. solaris. */
659 #ifdef INFINITY
660 #define ECB_INFINITY INFINITY
661 #else
662 #define ECB_INFINITY HUGE_VAL
496#endif 663 #endif
497 664
665 #ifdef NAN
666 #define ECB_NAN NAN
667 #else
668 #define ECB_NAN ECB_INFINITY
669 #endif
670
671 /* converts an ieee half/binary16 to a float */
672 ecb_function_ float ecb_binary16_to_float (uint16_t x) ecb_const;
673 ecb_function_ float
674 ecb_binary16_to_float (uint16_t x)
675 {
676 int e = (x >> 10) & 0x1f;
677 int m = x & 0x3ff;
678 float r;
679
680 if (!e ) r = ldexpf (m , -24);
681 else if (e != 31) r = ldexpf (m + 0x400, e - 25);
682 else if (m ) r = ECB_NAN;
683 else r = ECB_INFINITY;
684
685 return x & 0x8000 ? -r : r;
686 }
687
688 /* convert a float to ieee single/binary32 */
689 ecb_function_ uint32_t ecb_float_to_binary32 (float x) ecb_const;
690 ecb_function_ uint32_t
691 ecb_float_to_binary32 (float x)
692 {
693 uint32_t r;
694
695 #if ECB_STDFP
696 memcpy (&r, &x, 4);
697 #else
698 /* slow emulation, works for anything but -0 */
699 uint32_t m;
700 int e;
701
702 if (x == 0e0f ) return 0x00000000U;
703 if (x > +3.40282346638528860e+38f) return 0x7f800000U;
704 if (x < -3.40282346638528860e+38f) return 0xff800000U;
705 if (x != x ) return 0x7fbfffffU;
706
707 m = frexpf (x, &e) * 0x1000000U;
708
709 r = m & 0x80000000U;
710
711 if (r)
712 m = -m;
713
714 if (e <= -126)
715 {
716 m &= 0xffffffU;
717 m >>= (-125 - e);
718 e = -126;
719 }
720
721 r |= (e + 126) << 23;
722 r |= m & 0x7fffffU;
723 #endif
724
725 return r;
726 }
727
728 /* converts an ieee single/binary32 to a float */
729 ecb_function_ float ecb_binary32_to_float (uint32_t x) ecb_const;
730 ecb_function_ float
731 ecb_binary32_to_float (uint32_t x)
732 {
733 float r;
734
735 #if ECB_STDFP
736 memcpy (&r, &x, 4);
737 #else
738 /* emulation, only works for normals and subnormals and +0 */
739 int neg = x >> 31;
740 int e = (x >> 23) & 0xffU;
741
742 x &= 0x7fffffU;
743
744 if (e)
745 x |= 0x800000U;
746 else
747 e = 1;
748
749 /* we distrust ldexpf a bit and do the 2**-24 scaling by an extra multiply */
750 r = ldexpf (x * (0.5f / 0x800000U), e - 126);
751
752 r = neg ? -r : r;
753 #endif
754
755 return r;
756 }
757
758 /* convert a double to ieee double/binary64 */
759 ecb_function_ uint64_t ecb_double_to_binary64 (double x) ecb_const;
760 ecb_function_ uint64_t
761 ecb_double_to_binary64 (double x)
762 {
763 uint64_t r;
764
765 #if ECB_STDFP
766 memcpy (&r, &x, 8);
767 #else
768 /* slow emulation, works for anything but -0 */
769 uint64_t m;
770 int e;
771
772 if (x == 0e0 ) return 0x0000000000000000U;
773 if (x > +1.79769313486231470e+308) return 0x7ff0000000000000U;
774 if (x < -1.79769313486231470e+308) return 0xfff0000000000000U;
775 if (x != x ) return 0X7ff7ffffffffffffU;
776
777 m = frexp (x, &e) * 0x20000000000000U;
778
779 r = m & 0x8000000000000000;;
780
781 if (r)
782 m = -m;
783
784 if (e <= -1022)
785 {
786 m &= 0x1fffffffffffffU;
787 m >>= (-1021 - e);
788 e = -1022;
789 }
790
791 r |= ((uint64_t)(e + 1022)) << 52;
792 r |= m & 0xfffffffffffffU;
793 #endif
794
795 return r;
796 }
797
798 /* converts an ieee double/binary64 to a double */
799 ecb_function_ double ecb_binary64_to_double (uint64_t x) ecb_const;
800 ecb_function_ double
801 ecb_binary64_to_double (uint64_t x)
802 {
803 double r;
804
805 #if ECB_STDFP
806 memcpy (&r, &x, 8);
807 #else
808 /* emulation, only works for normals and subnormals and +0 */
809 int neg = x >> 63;
810 int e = (x >> 52) & 0x7ffU;
811
812 x &= 0xfffffffffffffU;
813
814 if (e)
815 x |= 0x10000000000000U;
816 else
817 e = 1;
818
819 /* we distrust ldexp a bit and do the 2**-53 scaling by an extra multiply */
820 r = ldexp (x * (0.5 / 0x10000000000000U), e - 1022);
821
822 r = neg ? -r : r;
823 #endif
824
825 return r;
826 }
827
828#endif
829
830#endif
831

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines