ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libecb/ecb.h
(Generate patch)

Comparing libecb/ecb.h (file contents):
Revision 1.92 by root, Tue May 29 17:17:56 2012 UTC vs.
Revision 1.174 by root, Wed Nov 25 02:36:53 2015 UTC

1/* 1/*
2 * libecb - http://software.schmorp.de/pkg/libecb 2 * libecb - http://software.schmorp.de/pkg/libecb
3 * 3 *
4 * Copyright (©) 2009-2012 Marc Alexander Lehmann <libecb@schmorp.de> 4 * Copyright (©) 2009-2015 Marc Alexander Lehmann <libecb@schmorp.de>
5 * Copyright (©) 2011 Emanuele Giaquinta 5 * Copyright (©) 2011 Emanuele Giaquinta
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without modifica- 8 * Redistribution and use in source and binary forms, with or without modifica-
9 * tion, are permitted provided that the following conditions are met: 9 * tion, are permitted provided that the following conditions are met:
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- 25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
26 * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 26 * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
27 * OF THE POSSIBILITY OF SUCH DAMAGE. 27 * OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * Alternatively, the contents of this file may be used under the terms of
30 * the GNU General Public License ("GPL") version 2 or any later version,
31 * in which case the provisions of the GPL are applicable instead of
32 * the above. If you wish to allow the use of your version of this file
33 * only under the terms of the GPL and not to allow others to use your
34 * version of this file under the BSD license, indicate your decision
35 * by deleting the provisions above and replace them with the notice
36 * and other provisions required by the GPL. If you do not delete the
37 * provisions above, a recipient may use your version of this file under
38 * either the BSD or the GPL.
28 */ 39 */
29 40
30#ifndef ECB_H 41#ifndef ECB_H
31#define ECB_H 42#define ECB_H
32 43
33/* 16 bits major, 16 bits minor */ 44/* 16 bits major, 16 bits minor */
34#define ECB_VERSION 0x00010001 45#define ECB_VERSION 0x00010005
35 46
36#ifdef _WIN32 47#ifdef _WIN32
37 typedef signed char int8_t; 48 typedef signed char int8_t;
38 typedef unsigned char uint8_t; 49 typedef unsigned char uint8_t;
39 typedef signed short int16_t; 50 typedef signed short int16_t;
54 #else 65 #else
55 #define ECB_PTRSIZE 4 66 #define ECB_PTRSIZE 4
56 typedef uint32_t uintptr_t; 67 typedef uint32_t uintptr_t;
57 typedef int32_t intptr_t; 68 typedef int32_t intptr_t;
58 #endif 69 #endif
59 typedef intptr_t ptrdiff_t;
60#else 70#else
61 #include <inttypes.h> 71 #include <inttypes.h>
62 #if UINTMAX_MAX > 0xffffffffU 72 #if (defined INTPTR_MAX ? INTPTR_MAX : ULONG_MAX) > 0xffffffffU
63 #define ECB_PTRSIZE 8 73 #define ECB_PTRSIZE 8
64 #else 74 #else
65 #define ECB_PTRSIZE 4 75 #define ECB_PTRSIZE 4
76 #endif
77#endif
78
79#define ECB_GCC_AMD64 (__amd64 || __amd64__ || __x86_64 || __x86_64__)
80#define ECB_MSVC_AMD64 (_M_AMD64 || _M_X64)
81
82/* work around x32 idiocy by defining proper macros */
83#if ECB_GCC_AMD64 || ECB_MSVC_AMD64
84 #if _ILP32
85 #define ECB_AMD64_X32 1
86 #else
87 #define ECB_AMD64 1
66 #endif 88 #endif
67#endif 89#endif
68 90
69/* many compilers define _GNUC_ to some versions but then only implement 91/* many compilers define _GNUC_ to some versions but then only implement
70 * what their idiot authors think are the "more important" extensions, 92 * what their idiot authors think are the "more important" extensions,
71 * causing enormous grief in return for some better fake benchmark numbers. 93 * causing enormous grief in return for some better fake benchmark numbers.
72 * or so. 94 * or so.
73 * we try to detect these and simply assume they are not gcc - if they have 95 * we try to detect these and simply assume they are not gcc - if they have
74 * an issue with that they should have done it right in the first place. 96 * an issue with that they should have done it right in the first place.
75 */ 97 */
76#ifndef ECB_GCC_VERSION
77 #if !defined __GNUC_MINOR__ || defined __INTEL_COMPILER || defined __SUNPRO_C || defined __SUNPRO_CC || defined __llvm__ || defined __clang__ 98#if !defined __GNUC_MINOR__ || defined __INTEL_COMPILER || defined __SUNPRO_C || defined __SUNPRO_CC || defined __llvm__ || defined __clang__
78 #define ECB_GCC_VERSION(major,minor) 0 99 #define ECB_GCC_VERSION(major,minor) 0
79 #else 100#else
80 #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) 101 #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))
81 #endif 102#endif
82#endif
83 103
84#define ECB_C (__STDC__+0) /* this assumes that __STDC__ is either empty or a number */ 104#define ECB_CLANG_VERSION(major,minor) (__clang_major__ > (major) || (__clang_major__ == (major) && __clang_minor__ >= (minor)))
85#define ECB_C99 (__STDC_VERSION__ >= 199901L) 105
86#define ECB_C11 (__STDC_VERSION__ >= 201112L) 106#if __clang__ && defined __has_builtin
107 #define ECB_CLANG_BUILTIN(x) __has_builtin (x)
108#else
109 #define ECB_CLANG_BUILTIN(x) 0
110#endif
111
112#if __clang__ && defined __has_extension
113 #define ECB_CLANG_EXTENSION(x) __has_extension (x)
114#else
115 #define ECB_CLANG_EXTENSION(x) 0
116#endif
117
87#define ECB_CPP (__cplusplus+0) 118#define ECB_CPP (__cplusplus+0)
88#define ECB_CPP98 (__cplusplus >= 199711L)
89#define ECB_CPP11 (__cplusplus >= 201103L) 119#define ECB_CPP11 (__cplusplus >= 201103L)
120
121#if ECB_CPP
122 #define ECB_C 0
123 #define ECB_STDC_VERSION 0
124#else
125 #define ECB_C 1
126 #define ECB_STDC_VERSION __STDC_VERSION__
127#endif
128
129#define ECB_C99 (ECB_STDC_VERSION >= 199901L)
130#define ECB_C11 (ECB_STDC_VERSION >= 201112L)
131
132#if ECB_CPP
133 #define ECB_EXTERN_C extern "C"
134 #define ECB_EXTERN_C_BEG ECB_EXTERN_C {
135 #define ECB_EXTERN_C_END }
136#else
137 #define ECB_EXTERN_C extern
138 #define ECB_EXTERN_C_BEG
139 #define ECB_EXTERN_C_END
140#endif
90 141
91/*****************************************************************************/ 142/*****************************************************************************/
92 143
93/* ECB_NO_THREADS - ecb is not used by multiple threads, ever */ 144/* ECB_NO_THREADS - ecb is not used by multiple threads, ever */
94/* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */ 145/* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */
95 146
96#if ECB_NO_THREADS 147#if ECB_NO_THREADS
97# define ECB_NO_SMP 1 148 #define ECB_NO_SMP 1
98#endif 149#endif
99 150
100#if ECB_NO_THREADS || ECB_NO_SMP 151#if ECB_NO_SMP
101 #define ECB_MEMORY_FENCE do { } while (0) 152 #define ECB_MEMORY_FENCE do { } while (0)
102#endif 153#endif
103 154
104#ifndef ECB_MEMORY_FENCE 155/* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/compiler_ref/compiler_builtins.html */
105 #if ECB_C11 && !defined __STDC_NO_ATOMICS__ 156#if __xlC__ && ECB_CPP
106 /* we assume that these memory fences work on all variables/all memory accesses, */ 157 #include <builtins.h>
107 /* not just C11 atomics and atomic accesses */
108 #include <stdatomic.h>
109 #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_acq_rel)
110 #define ECB_MEMORY_FENCE_ACQUIRE atomic_thread_fence (memory_order_acquire)
111 #define ECB_MEMORY_FENCE_RELEASE atomic_thread_fence (memory_order_release)
112 #endif 158#endif
113#endif
114 159
115#ifndef ECB_MEMORY_FENCE_RELEASE 160#if 1400 <= _MSC_VER
116 #if ECB_GCC_VERSION(4,7) 161 #include <intrin.h> /* fence functions _ReadBarrier, also bit search functions _BitScanReverse */
117 #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_ACQ_REL)
118 #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE)
119 #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE)
120 #endif
121#endif 162#endif
122 163
123#ifndef ECB_MEMORY_FENCE 164#ifndef ECB_MEMORY_FENCE
124 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 165 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
125 #if __i386 || __i386__ 166 #if __i386 || __i386__
126 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") 167 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
127 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE /* non-lock xchg might be enough */ 168 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
128 #define ECB_MEMORY_FENCE_RELEASE do { } while (0) /* unlikely to change in future cpus */ 169 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
129 #elif __amd64 || __amd64__ || __x86_64 || __x86_64__ 170 #elif ECB_GCC_AMD64
130 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") 171 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory")
131 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("lfence" : : : "memory") 172 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
132 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("sfence") /* play safe - not needed in any current cpu */ 173 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
133 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ 174 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__
134 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") 175 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory")
135 #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \ 176 #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \
136 || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ 177 || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__
137 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") 178 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory")
138 #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ 179 #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \
139 || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__ 180 || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__
140 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") 181 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory")
141 #elif __sparc || __sparc__ 182 #elif __aarch64__
183 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory")
184 #elif (__sparc || __sparc__) && !(__sparc_v8__ || defined __sparcv8)
142 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad | " : : : "memory") 185 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory")
143 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory") 186 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory")
144 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore") 187 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore")
145 #elif defined __s390__ || defined __s390x__ 188 #elif defined __s390__ || defined __s390x__
146 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory") 189 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory")
147 #elif defined __mips__ 190 #elif defined __mips__
191 /* GNU/Linux emulates sync on mips1 architectures, so we force its use */
192 /* anybody else who still uses mips1 is supposed to send in their version, with detection code. */
148 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") 193 #define ECB_MEMORY_FENCE __asm__ __volatile__ (".set mips2; sync; .set mips0" : : : "memory")
149 #elif defined __alpha__ 194 #elif defined __alpha__
150 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory") 195 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory")
196 #elif defined __hppa__
197 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
198 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
199 #elif defined __ia64__
200 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory")
201 #elif defined __m68k__
202 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
203 #elif defined __m88k__
204 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("tb1 0,%%r0,128" : : : "memory")
205 #elif defined __sh__
206 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
151 #endif 207 #endif
152 #endif 208 #endif
153#endif 209#endif
154 210
155#ifndef ECB_MEMORY_FENCE 211#ifndef ECB_MEMORY_FENCE
212 #if ECB_GCC_VERSION(4,7)
213 /* see comment below (stdatomic.h) about the C11 memory model. */
214 #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST)
215 #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE)
216 #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE)
217
218 #elif ECB_CLANG_EXTENSION(c_atomic)
219 /* see comment below (stdatomic.h) about the C11 memory model. */
220 #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST)
221 #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE)
222 #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE)
223
156 #if ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ 224 #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__
157 #define ECB_MEMORY_FENCE __sync_synchronize () 225 #define ECB_MEMORY_FENCE __sync_synchronize ()
158 /*#define ECB_MEMORY_FENCE_ACQUIRE ({ char dummy = 0; __sync_lock_test_and_set (&dummy, 1); }) */ 226 #elif _MSC_VER >= 1500 /* VC++ 2008 */
159 /*#define ECB_MEMORY_FENCE_RELEASE ({ char dummy = 1; __sync_lock_release (&dummy ); }) */ 227 /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */
228 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
229 #define ECB_MEMORY_FENCE _ReadWriteBarrier (); MemoryBarrier()
230 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier (); MemoryBarrier() /* according to msdn, _ReadBarrier is not a load fence */
231 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier (); MemoryBarrier()
160 #elif _MSC_VER >= 1400 /* VC++ 2005 */ 232 #elif _MSC_VER >= 1400 /* VC++ 2005 */
161 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) 233 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
162 #define ECB_MEMORY_FENCE _ReadWriteBarrier () 234 #define ECB_MEMORY_FENCE _ReadWriteBarrier ()
163 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */ 235 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */
164 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier () 236 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier ()
174 #define ECB_MEMORY_FENCE __sync () 246 #define ECB_MEMORY_FENCE __sync ()
175 #endif 247 #endif
176#endif 248#endif
177 249
178#ifndef ECB_MEMORY_FENCE 250#ifndef ECB_MEMORY_FENCE
251 #if ECB_C11 && !defined __STDC_NO_ATOMICS__
252 /* we assume that these memory fences work on all variables/all memory accesses, */
253 /* not just C11 atomics and atomic accesses */
254 #include <stdatomic.h>
255 /* Unfortunately, neither gcc 4.7 nor clang 3.1 generate any instructions for */
256 /* any fence other than seq_cst, which isn't very efficient for us. */
257 /* Why that is, we don't know - either the C11 memory model is quite useless */
258 /* for most usages, or gcc and clang have a bug */
259 /* I *currently* lean towards the latter, and inefficiently implement */
260 /* all three of ecb's fences as a seq_cst fence */
261 /* Update, gcc-4.8 generates mfence for all c++ fences, but nothing */
262 /* for all __atomic_thread_fence's except seq_cst */
263 #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst)
264 #endif
265#endif
266
267#ifndef ECB_MEMORY_FENCE
179 #if !ECB_AVOID_PTHREADS 268 #if !ECB_AVOID_PTHREADS
180 /* 269 /*
181 * if you get undefined symbol references to pthread_mutex_lock, 270 * if you get undefined symbol references to pthread_mutex_lock,
182 * or failure to find pthread.h, then you should implement 271 * or failure to find pthread.h, then you should implement
183 * the ECB_MEMORY_FENCE operations for your cpu/compiler 272 * the ECB_MEMORY_FENCE operations for your cpu/compiler
201 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE 290 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
202#endif 291#endif
203 292
204/*****************************************************************************/ 293/*****************************************************************************/
205 294
206#if __cplusplus 295#if ECB_CPP
207 #define ecb_inline static inline 296 #define ecb_inline static inline
208#elif ECB_GCC_VERSION(2,5) 297#elif ECB_GCC_VERSION(2,5)
209 #define ecb_inline static __inline__ 298 #define ecb_inline static __inline__
210#elif ECB_C99 299#elif ECB_C99
211 #define ecb_inline static inline 300 #define ecb_inline static inline
225 314
226#define ECB_CONCAT_(a, b) a ## b 315#define ECB_CONCAT_(a, b) a ## b
227#define ECB_CONCAT(a, b) ECB_CONCAT_(a, b) 316#define ECB_CONCAT(a, b) ECB_CONCAT_(a, b)
228#define ECB_STRINGIFY_(a) # a 317#define ECB_STRINGIFY_(a) # a
229#define ECB_STRINGIFY(a) ECB_STRINGIFY_(a) 318#define ECB_STRINGIFY(a) ECB_STRINGIFY_(a)
319#define ECB_STRINGIFY_EXPR(expr) ((expr), ECB_STRINGIFY_ (expr))
230 320
231#define ecb_function_ ecb_inline 321#define ecb_function_ ecb_inline
232 322
233#if ECB_GCC_VERSION(3,1) 323#if ECB_GCC_VERSION(3,1) || ECB_CLANG_VERSION(2,8)
234 #define ecb_attribute(attrlist) __attribute__(attrlist) 324 #define ecb_attribute(attrlist) __attribute__ (attrlist)
325#else
326 #define ecb_attribute(attrlist)
327#endif
328
329#if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_constant_p)
235 #define ecb_is_constant(expr) __builtin_constant_p (expr) 330 #define ecb_is_constant(expr) __builtin_constant_p (expr)
331#else
332 /* possible C11 impl for integral types
333 typedef struct ecb_is_constant_struct ecb_is_constant_struct;
334 #define ecb_is_constant(expr) _Generic ((1 ? (struct ecb_is_constant_struct *)0 : (void *)((expr) - (expr)), ecb_is_constant_struct *: 0, default: 1)) */
335
336 #define ecb_is_constant(expr) 0
337#endif
338
339#if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_expect)
236 #define ecb_expect(expr,value) __builtin_expect ((expr),(value)) 340 #define ecb_expect(expr,value) __builtin_expect ((expr),(value))
341#else
342 #define ecb_expect(expr,value) (expr)
343#endif
344
345#if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_prefetch)
237 #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality) 346 #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality)
238#else 347#else
239 #define ecb_attribute(attrlist)
240 #define ecb_is_constant(expr) 0
241 #define ecb_expect(expr,value) (expr)
242 #define ecb_prefetch(addr,rw,locality) 348 #define ecb_prefetch(addr,rw,locality)
243#endif 349#endif
244 350
245/* no emulation for ecb_decltype */ 351/* no emulation for ecb_decltype */
246#if ECB_GCC_VERSION(4,5) 352#if ECB_CPP11
353 // older implementations might have problems with decltype(x)::type, work around it
354 template<class T> struct ecb_decltype_t { typedef T type; };
247 #define ecb_decltype(x) __decltype(x) 355 #define ecb_decltype(x) ecb_decltype_t<decltype (x)>::type
248#elif ECB_GCC_VERSION(3,0) 356#elif ECB_GCC_VERSION(3,0) || ECB_CLANG_VERSION(2,8)
249 #define ecb_decltype(x) __typeof(x) 357 #define ecb_decltype(x) __typeof__ (x)
250#endif 358#endif
251 359
360#if _MSC_VER >= 1300
361 #define ecb_deprecated __declspec (deprecated)
362#else
363 #define ecb_deprecated ecb_attribute ((__deprecated__))
364#endif
365
366#if _MSC_VER >= 1500
367 #define ecb_deprecated_message(msg) __declspec (deprecated (msg))
368#elif ECB_GCC_VERSION(4,5)
369 #define ecb_deprecated_message(msg) ecb_attribute ((__deprecated__ (msg))
370#else
371 #define ecb_deprecated_message(msg) ecb_deprecated
372#endif
373
374#if _MSC_VER >= 1400
375 #define ecb_noinline __declspec (noinline)
376#else
252#define ecb_noinline ecb_attribute ((__noinline__)) 377 #define ecb_noinline ecb_attribute ((__noinline__))
378#endif
379
253#define ecb_unused ecb_attribute ((__unused__)) 380#define ecb_unused ecb_attribute ((__unused__))
254#define ecb_const ecb_attribute ((__const__)) 381#define ecb_const ecb_attribute ((__const__))
255#define ecb_pure ecb_attribute ((__pure__)) 382#define ecb_pure ecb_attribute ((__pure__))
256 383
257#if ECB_C11 384#if ECB_C11 || __IBMC_NORETURN
385 /* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/language_ref/noreturn.html */
258 #define ecb_noreturn _Noreturn 386 #define ecb_noreturn _Noreturn
387#elif ECB_CPP11
388 #define ecb_noreturn [[noreturn]]
389#elif _MSC_VER >= 1200
390 /* http://msdn.microsoft.com/en-us/library/k6ktzx3s.aspx */
391 #define ecb_noreturn __declspec (noreturn)
259#else 392#else
260 #define ecb_noreturn ecb_attribute ((__noreturn__)) 393 #define ecb_noreturn ecb_attribute ((__noreturn__))
261#endif 394#endif
262 395
263#if ECB_GCC_VERSION(4,3) 396#if ECB_GCC_VERSION(4,3)
278/* for compatibility to the rest of the world */ 411/* for compatibility to the rest of the world */
279#define ecb_likely(expr) ecb_expect_true (expr) 412#define ecb_likely(expr) ecb_expect_true (expr)
280#define ecb_unlikely(expr) ecb_expect_false (expr) 413#define ecb_unlikely(expr) ecb_expect_false (expr)
281 414
282/* count trailing zero bits and count # of one bits */ 415/* count trailing zero bits and count # of one bits */
283#if ECB_GCC_VERSION(3,4) 416#if ECB_GCC_VERSION(3,4) \
417 || (ECB_CLANG_BUILTIN(__builtin_clz) && ECB_CLANG_BUILTIN(__builtin_clzll) \
418 && ECB_CLANG_BUILTIN(__builtin_ctz) && ECB_CLANG_BUILTIN(__builtin_ctzll) \
419 && ECB_CLANG_BUILTIN(__builtin_popcount))
284 /* we assume int == 32 bit, long == 32 or 64 bit and long long == 64 bit */ 420 /* we assume int == 32 bit, long == 32 or 64 bit and long long == 64 bit */
285 #define ecb_ld32(x) (__builtin_clz (x) ^ 31) 421 #define ecb_ld32(x) (__builtin_clz (x) ^ 31)
286 #define ecb_ld64(x) (__builtin_clzll (x) ^ 63) 422 #define ecb_ld64(x) (__builtin_clzll (x) ^ 63)
287 #define ecb_ctz32(x) __builtin_ctz (x) 423 #define ecb_ctz32(x) __builtin_ctz (x)
288 #define ecb_ctz64(x) __builtin_ctzll (x) 424 #define ecb_ctz64(x) __builtin_ctzll (x)
289 #define ecb_popcount32(x) __builtin_popcount (x) 425 #define ecb_popcount32(x) __builtin_popcount (x)
290 /* no popcountll */ 426 /* no popcountll */
291#else 427#else
292 ecb_function_ int ecb_ctz32 (uint32_t x) ecb_const; 428 ecb_function_ ecb_const int ecb_ctz32 (uint32_t x);
293 ecb_function_ int 429 ecb_function_ ecb_const int
294 ecb_ctz32 (uint32_t x) 430 ecb_ctz32 (uint32_t x)
295 { 431 {
432#if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM)
433 unsigned long r;
434 _BitScanForward (&r, x);
435 return (int)r;
436#else
296 int r = 0; 437 int r = 0;
297 438
298 x &= ~x + 1; /* this isolates the lowest bit */ 439 x &= ~x + 1; /* this isolates the lowest bit */
299 440
300#if ECB_branchless_on_i386 441#if ECB_branchless_on_i386
310 if (x & 0xff00ff00) r += 8; 451 if (x & 0xff00ff00) r += 8;
311 if (x & 0xffff0000) r += 16; 452 if (x & 0xffff0000) r += 16;
312#endif 453#endif
313 454
314 return r; 455 return r;
456#endif
315 } 457 }
316 458
317 ecb_function_ int ecb_ctz64 (uint64_t x) ecb_const; 459 ecb_function_ ecb_const int ecb_ctz64 (uint64_t x);
318 ecb_function_ int 460 ecb_function_ ecb_const int
319 ecb_ctz64 (uint64_t x) 461 ecb_ctz64 (uint64_t x)
320 { 462 {
463#if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM)
464 unsigned long r;
465 _BitScanForward64 (&r, x);
466 return (int)r;
467#else
321 int shift = x & 0xffffffffU ? 0 : 32; 468 int shift = x & 0xffffffff ? 0 : 32;
322 return ecb_ctz32 (x >> shift) + shift; 469 return ecb_ctz32 (x >> shift) + shift;
470#endif
323 } 471 }
324 472
325 ecb_function_ int ecb_popcount32 (uint32_t x) ecb_const; 473 ecb_function_ ecb_const int ecb_popcount32 (uint32_t x);
326 ecb_function_ int 474 ecb_function_ ecb_const int
327 ecb_popcount32 (uint32_t x) 475 ecb_popcount32 (uint32_t x)
328 { 476 {
329 x -= (x >> 1) & 0x55555555; 477 x -= (x >> 1) & 0x55555555;
330 x = ((x >> 2) & 0x33333333) + (x & 0x33333333); 478 x = ((x >> 2) & 0x33333333) + (x & 0x33333333);
331 x = ((x >> 4) + x) & 0x0f0f0f0f; 479 x = ((x >> 4) + x) & 0x0f0f0f0f;
332 x *= 0x01010101; 480 x *= 0x01010101;
333 481
334 return x >> 24; 482 return x >> 24;
335 } 483 }
336 484
337 ecb_function_ int ecb_ld32 (uint32_t x) ecb_const; 485 ecb_function_ ecb_const int ecb_ld32 (uint32_t x);
338 ecb_function_ int ecb_ld32 (uint32_t x) 486 ecb_function_ ecb_const int ecb_ld32 (uint32_t x)
339 { 487 {
488#if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM)
489 unsigned long r;
490 _BitScanReverse (&r, x);
491 return (int)r;
492#else
340 int r = 0; 493 int r = 0;
341 494
342 if (x >> 16) { x >>= 16; r += 16; } 495 if (x >> 16) { x >>= 16; r += 16; }
343 if (x >> 8) { x >>= 8; r += 8; } 496 if (x >> 8) { x >>= 8; r += 8; }
344 if (x >> 4) { x >>= 4; r += 4; } 497 if (x >> 4) { x >>= 4; r += 4; }
345 if (x >> 2) { x >>= 2; r += 2; } 498 if (x >> 2) { x >>= 2; r += 2; }
346 if (x >> 1) { r += 1; } 499 if (x >> 1) { r += 1; }
347 500
348 return r; 501 return r;
502#endif
349 } 503 }
350 504
351 ecb_function_ int ecb_ld64 (uint64_t x) ecb_const; 505 ecb_function_ ecb_const int ecb_ld64 (uint64_t x);
352 ecb_function_ int ecb_ld64 (uint64_t x) 506 ecb_function_ ecb_const int ecb_ld64 (uint64_t x)
353 { 507 {
508#if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM)
509 unsigned long r;
510 _BitScanReverse64 (&r, x);
511 return (int)r;
512#else
354 int r = 0; 513 int r = 0;
355 514
356 if (x >> 32) { x >>= 32; r += 32; } 515 if (x >> 32) { x >>= 32; r += 32; }
357 516
358 return r + ecb_ld32 (x); 517 return r + ecb_ld32 (x);
359 }
360#endif 518#endif
519 }
520#endif
361 521
362ecb_function_ ecb_bool ecb_is_pot32 (uint32_t x) ecb_const; 522ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x);
363ecb_function_ ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); } 523ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); }
364ecb_function_ ecb_bool ecb_is_pot64 (uint64_t x) ecb_const; 524ecb_function_ ecb_const ecb_bool ecb_is_pot64 (uint64_t x);
365ecb_function_ ecb_bool ecb_is_pot64 (uint64_t x) { return !(x & (x - 1)); } 525ecb_function_ ecb_const ecb_bool ecb_is_pot64 (uint64_t x) { return !(x & (x - 1)); }
366 526
367ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) ecb_const; 527ecb_function_ ecb_const uint8_t ecb_bitrev8 (uint8_t x);
368ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) 528ecb_function_ ecb_const uint8_t ecb_bitrev8 (uint8_t x)
369{ 529{
370 return ( (x * 0x0802U & 0x22110U) 530 return ( (x * 0x0802U & 0x22110U)
371 | (x * 0x8020U & 0x88440U)) * 0x10101U >> 16; 531 | (x * 0x8020U & 0x88440U)) * 0x10101U >> 16;
372} 532}
373 533
374ecb_function_ uint16_t ecb_bitrev16 (uint16_t x) ecb_const; 534ecb_function_ ecb_const uint16_t ecb_bitrev16 (uint16_t x);
375ecb_function_ uint16_t ecb_bitrev16 (uint16_t x) 535ecb_function_ ecb_const uint16_t ecb_bitrev16 (uint16_t x)
376{ 536{
377 x = ((x >> 1) & 0x5555) | ((x & 0x5555) << 1); 537 x = ((x >> 1) & 0x5555) | ((x & 0x5555) << 1);
378 x = ((x >> 2) & 0x3333) | ((x & 0x3333) << 2); 538 x = ((x >> 2) & 0x3333) | ((x & 0x3333) << 2);
379 x = ((x >> 4) & 0x0f0f) | ((x & 0x0f0f) << 4); 539 x = ((x >> 4) & 0x0f0f) | ((x & 0x0f0f) << 4);
380 x = ( x >> 8 ) | ( x << 8); 540 x = ( x >> 8 ) | ( x << 8);
381 541
382 return x; 542 return x;
383} 543}
384 544
385ecb_function_ uint32_t ecb_bitrev32 (uint32_t x) ecb_const; 545ecb_function_ ecb_const uint32_t ecb_bitrev32 (uint32_t x);
386ecb_function_ uint32_t ecb_bitrev32 (uint32_t x) 546ecb_function_ ecb_const uint32_t ecb_bitrev32 (uint32_t x)
387{ 547{
388 x = ((x >> 1) & 0x55555555) | ((x & 0x55555555) << 1); 548 x = ((x >> 1) & 0x55555555) | ((x & 0x55555555) << 1);
389 x = ((x >> 2) & 0x33333333) | ((x & 0x33333333) << 2); 549 x = ((x >> 2) & 0x33333333) | ((x & 0x33333333) << 2);
390 x = ((x >> 4) & 0x0f0f0f0f) | ((x & 0x0f0f0f0f) << 4); 550 x = ((x >> 4) & 0x0f0f0f0f) | ((x & 0x0f0f0f0f) << 4);
391 x = ((x >> 8) & 0x00ff00ff) | ((x & 0x00ff00ff) << 8); 551 x = ((x >> 8) & 0x00ff00ff) | ((x & 0x00ff00ff) << 8);
394 return x; 554 return x;
395} 555}
396 556
397/* popcount64 is only available on 64 bit cpus as gcc builtin */ 557/* popcount64 is only available on 64 bit cpus as gcc builtin */
398/* so for this version we are lazy */ 558/* so for this version we are lazy */
399ecb_function_ int ecb_popcount64 (uint64_t x) ecb_const; 559ecb_function_ ecb_const int ecb_popcount64 (uint64_t x);
400ecb_function_ int 560ecb_function_ ecb_const int
401ecb_popcount64 (uint64_t x) 561ecb_popcount64 (uint64_t x)
402{ 562{
403 return ecb_popcount32 (x) + ecb_popcount32 (x >> 32); 563 return ecb_popcount32 (x) + ecb_popcount32 (x >> 32);
404} 564}
405 565
406ecb_inline uint8_t ecb_rotl8 (uint8_t x, unsigned int count) ecb_const; 566ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count);
407ecb_inline uint8_t ecb_rotr8 (uint8_t x, unsigned int count) ecb_const; 567ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count);
408ecb_inline uint16_t ecb_rotl16 (uint16_t x, unsigned int count) ecb_const; 568ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count);
409ecb_inline uint16_t ecb_rotr16 (uint16_t x, unsigned int count) ecb_const; 569ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count);
410ecb_inline uint32_t ecb_rotl32 (uint32_t x, unsigned int count) ecb_const; 570ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count);
411ecb_inline uint32_t ecb_rotr32 (uint32_t x, unsigned int count) ecb_const; 571ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count);
412ecb_inline uint64_t ecb_rotl64 (uint64_t x, unsigned int count) ecb_const; 572ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count);
413ecb_inline uint64_t ecb_rotr64 (uint64_t x, unsigned int count) ecb_const; 573ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count);
414 574
415ecb_inline uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> ( 8 - count)) | (x << count); } 575ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> ( 8 - count)) | (x << count); }
416ecb_inline uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << ( 8 - count)) | (x >> count); } 576ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << ( 8 - count)) | (x >> count); }
417ecb_inline uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (16 - count)) | (x << count); } 577ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (16 - count)) | (x << count); }
418ecb_inline uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (16 - count)) | (x >> count); } 578ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (16 - count)) | (x >> count); }
419ecb_inline uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); } 579ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); }
420ecb_inline uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); } 580ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); }
421ecb_inline uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); } 581ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); }
422ecb_inline uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); } 582ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); }
423 583
424#if ECB_GCC_VERSION(4,3) 584#if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64))
585 #if ECB_GCC_VERSION(4,8) || ECB_CLANG_BUILTIN(__builtin_bswap16)
586 #define ecb_bswap16(x) __builtin_bswap16 (x)
587 #else
425 #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16) 588 #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16)
589 #endif
426 #define ecb_bswap32(x) __builtin_bswap32 (x) 590 #define ecb_bswap32(x) __builtin_bswap32 (x)
427 #define ecb_bswap64(x) __builtin_bswap64 (x) 591 #define ecb_bswap64(x) __builtin_bswap64 (x)
592#elif _MSC_VER
593 #include <stdlib.h>
594 #define ecb_bswap16(x) ((uint16_t)_byteswap_ushort ((uint16_t)(x)))
595 #define ecb_bswap32(x) ((uint32_t)_byteswap_ulong ((uint32_t)(x)))
596 #define ecb_bswap64(x) ((uint64_t)_byteswap_uint64 ((uint64_t)(x)))
428#else 597#else
429 ecb_function_ uint16_t ecb_bswap16 (uint16_t x) ecb_const; 598 ecb_function_ ecb_const uint16_t ecb_bswap16 (uint16_t x);
430 ecb_function_ uint16_t 599 ecb_function_ ecb_const uint16_t
431 ecb_bswap16 (uint16_t x) 600 ecb_bswap16 (uint16_t x)
432 { 601 {
433 return ecb_rotl16 (x, 8); 602 return ecb_rotl16 (x, 8);
434 } 603 }
435 604
436 ecb_function_ uint32_t ecb_bswap32 (uint32_t x) ecb_const; 605 ecb_function_ ecb_const uint32_t ecb_bswap32 (uint32_t x);
437 ecb_function_ uint32_t 606 ecb_function_ ecb_const uint32_t
438 ecb_bswap32 (uint32_t x) 607 ecb_bswap32 (uint32_t x)
439 { 608 {
440 return (((uint32_t)ecb_bswap16 (x)) << 16) | ecb_bswap16 (x >> 16); 609 return (((uint32_t)ecb_bswap16 (x)) << 16) | ecb_bswap16 (x >> 16);
441 } 610 }
442 611
443 ecb_function_ uint64_t ecb_bswap64 (uint64_t x) ecb_const; 612 ecb_function_ ecb_const uint64_t ecb_bswap64 (uint64_t x);
444 ecb_function_ uint64_t 613 ecb_function_ ecb_const uint64_t
445 ecb_bswap64 (uint64_t x) 614 ecb_bswap64 (uint64_t x)
446 { 615 {
447 return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32); 616 return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32);
448 } 617 }
449#endif 618#endif
450 619
451#if ECB_GCC_VERSION(4,5) 620#if ECB_GCC_VERSION(4,5) || ECB_CLANG_BUILTIN(__builtin_unreachable)
452 #define ecb_unreachable() __builtin_unreachable () 621 #define ecb_unreachable() __builtin_unreachable ()
453#else 622#else
454 /* this seems to work fine, but gcc always emits a warning for it :/ */ 623 /* this seems to work fine, but gcc always emits a warning for it :/ */
455 ecb_inline void ecb_unreachable (void) ecb_noreturn; 624 ecb_inline ecb_noreturn void ecb_unreachable (void);
456 ecb_inline void ecb_unreachable (void) { } 625 ecb_inline ecb_noreturn void ecb_unreachable (void) { }
457#endif 626#endif
458 627
459/* try to tell the compiler that some condition is definitely true */ 628/* try to tell the compiler that some condition is definitely true */
460#define ecb_assume(cond) do { if (!(cond)) ecb_unreachable (); } while (0) 629#define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0
461 630
462ecb_inline unsigned char ecb_byteorder_helper (void) ecb_const; 631ecb_inline ecb_const uint32_t ecb_byteorder_helper (void);
463ecb_inline unsigned char 632ecb_inline ecb_const uint32_t
464ecb_byteorder_helper (void) 633ecb_byteorder_helper (void)
465{ 634{
466 const uint32_t u = 0x11223344; 635 /* the union code still generates code under pressure in gcc, */
467 return *(unsigned char *)&u; 636 /* but less than using pointers, and always seems to */
637 /* successfully return a constant. */
638 /* the reason why we have this horrible preprocessor mess */
639 /* is to avoid it in all cases, at least on common architectures */
640 /* or when using a recent enough gcc version (>= 4.6) */
641#if (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \
642 || ((__i386 || __i386__ || _M_IX86 || ECB_GCC_AMD64 || ECB_MSVC_AMD64) && !__VOS__)
643 #define ECB_LITTLE_ENDIAN 1
644 return 0x44332211;
645#elif (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) \
646 || ((__AARCH64EB__ || __MIPSEB__ || __ARMEB__) && !__VOS__)
647 #define ECB_BIG_ENDIAN 1
648 return 0x11223344;
649#else
650 union
651 {
652 uint8_t c[4];
653 uint32_t u;
654 } u = { 0x11, 0x22, 0x33, 0x44 };
655 return u.u;
656#endif
468} 657}
469 658
470ecb_inline ecb_bool ecb_big_endian (void) ecb_const; 659ecb_inline ecb_const ecb_bool ecb_big_endian (void);
471ecb_inline ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; } 660ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11223344; }
472ecb_inline ecb_bool ecb_little_endian (void) ecb_const; 661ecb_inline ecb_const ecb_bool ecb_little_endian (void);
473ecb_inline ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44; } 662ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44332211; }
474 663
475#if ECB_GCC_VERSION(3,0) || ECB_C99 664#if ECB_GCC_VERSION(3,0) || ECB_C99
476 #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0)) 665 #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0))
477#else 666#else
478 #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n))) 667 #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n)))
479#endif 668#endif
480 669
481#if __cplusplus 670#if ECB_CPP
482 template<typename T> 671 template<typename T>
483 static inline T ecb_div_rd (T val, T div) 672 static inline T ecb_div_rd (T val, T div)
484 { 673 {
485 return val < 0 ? - ((-val + div - 1) / div) : (val ) / div; 674 return val < 0 ? - ((-val + div - 1) / div) : (val ) / div;
486 } 675 }
503 } 692 }
504#else 693#else
505 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) 694 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0]))
506#endif 695#endif
507 696
697ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x);
698ecb_function_ ecb_const uint32_t
699ecb_binary16_to_binary32 (uint32_t x)
700{
701 unsigned int s = (x & 0x8000) << (31 - 15);
702 int e = (x >> 10) & 0x001f;
703 unsigned int m = x & 0x03ff;
704
705 if (ecb_expect_false (e == 31))
706 /* infinity or NaN */
707 e = 255 - (127 - 15);
708 else if (ecb_expect_false (!e))
709 {
710 if (ecb_expect_true (!m))
711 /* zero, handled by code below by forcing e to 0 */
712 e = 0 - (127 - 15);
713 else
714 {
715 /* subnormal, renormalise */
716 unsigned int s = 10 - ecb_ld32 (m);
717
718 m = (m << s) & 0x3ff; /* mask implicit bit */
719 e -= s - 1;
720 }
721 }
722
723 /* e and m now are normalised, or zero, (or inf or nan) */
724 e += 127 - 15;
725
726 return s | (e << 23) | (m << (23 - 10));
727}
728
729ecb_function_ ecb_const uint16_t ecb_binary32_to_binary16 (uint32_t x);
730ecb_function_ ecb_const uint16_t
731ecb_binary32_to_binary16 (uint32_t x)
732{
733 unsigned int s = (x >> 16) & 0x00008000; /* sign bit, the easy part */
734 unsigned int e = ((x >> 23) & 0x000000ff) - (127 - 15); /* the desired exponent */
735 unsigned int m = x & 0x007fffff;
736
737 x &= 0x7fffffff;
738
739 /* if it's within range of binary16 normals, use fast path */
740 if (ecb_expect_true (0x38800000 <= x && x <= 0x477fefff))
741 {
742 /* mantissa round-to-even */
743 m += 0x00000fff + ((m >> (23 - 10)) & 1);
744
745 /* handle overflow */
746 if (ecb_expect_false (m >= 0x00800000))
747 {
748 m >>= 1;
749 e += 1;
750 }
751
752 return s | (e << 10) | (m >> (23 - 10));
753 }
754
755 /* handle large numbers and infinity */
756 if (ecb_expect_true (0x477fefff < x && x <= 0x7f800000))
757 return s | 0x7c00;
758
759 /* handle zero, subnormals and small numbers */
760 if (ecb_expect_true (x < 0x38800000))
761 {
762 /* zero */
763 if (ecb_expect_true (!x))
764 return s;
765
766 /* handle subnormals */
767
768 /* too small, will be zero */
769 if (e < (14 - 24)) /* might not be sharp, but is good enough */
770 return s;
771
772 m |= 0x00800000; /* make implicit bit explicit */
773
774 /* very tricky - we need to round to the nearest e (+10) bit value */
775 {
776 unsigned int bits = 14 - e;
777 unsigned int half = (1 << (bits - 1)) - 1;
778 unsigned int even = (m >> bits) & 1;
779
780 /* if this overflows, we will end up with a normalised number */
781 m = (m + half + even) >> bits;
782 }
783
784 return s | m;
785 }
786
787 /* handle NaNs, preserve leftmost nan bits, but make sure we don't turn them into infinities */
788 m >>= 13;
789
790 return s | 0x7c00 | m | !m;
791}
792
793/*******************************************************************************/
794/* floating point stuff, can be disabled by defining ECB_NO_LIBM */
795
796/* basically, everything uses "ieee pure-endian" floating point numbers */
797/* the only noteworthy exception is ancient armle, which uses order 43218765 */
798#if 0 \
799 || __i386 || __i386__ \
800 || ECB_GCC_AMD64 \
801 || __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \
802 || defined __s390__ || defined __s390x__ \
803 || defined __mips__ \
804 || defined __alpha__ \
805 || defined __hppa__ \
806 || defined __ia64__ \
807 || defined __m68k__ \
808 || defined __m88k__ \
809 || defined __sh__ \
810 || defined _M_IX86 || defined ECB_MSVC_AMD64 || defined _M_IA64 \
811 || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \
812 || defined __aarch64__
813 #define ECB_STDFP 1
814 #include <string.h> /* for memcpy */
815#else
816 #define ECB_STDFP 0
817#endif
818
819#ifndef ECB_NO_LIBM
820
821 #include <math.h> /* for frexp*, ldexp*, INFINITY, NAN */
822
823 /* only the oldest of old doesn't have this one. solaris. */
824 #ifdef INFINITY
825 #define ECB_INFINITY INFINITY
826 #else
827 #define ECB_INFINITY HUGE_VAL
508#endif 828 #endif
509 829
830 #ifdef NAN
831 #define ECB_NAN NAN
832 #else
833 #define ECB_NAN ECB_INFINITY
834 #endif
835
836 #if ECB_C99 || _XOPEN_VERSION >= 600 || _POSIX_VERSION >= 200112L
837 #define ecb_ldexpf(x,e) ldexpf ((x), (e))
838 #define ecb_frexpf(x,e) frexpf ((x), (e))
839 #else
840 #define ecb_ldexpf(x,e) (float) ldexp ((double) (x), (e))
841 #define ecb_frexpf(x,e) (float) frexp ((double) (x), (e))
842 #endif
843
844 /* convert a float to ieee single/binary32 */
845 ecb_function_ ecb_const uint32_t ecb_float_to_binary32 (float x);
846 ecb_function_ ecb_const uint32_t
847 ecb_float_to_binary32 (float x)
848 {
849 uint32_t r;
850
851 #if ECB_STDFP
852 memcpy (&r, &x, 4);
853 #else
854 /* slow emulation, works for anything but -0 */
855 uint32_t m;
856 int e;
857
858 if (x == 0e0f ) return 0x00000000U;
859 if (x > +3.40282346638528860e+38f) return 0x7f800000U;
860 if (x < -3.40282346638528860e+38f) return 0xff800000U;
861 if (x != x ) return 0x7fbfffffU;
862
863 m = ecb_frexpf (x, &e) * 0x1000000U;
864
865 r = m & 0x80000000U;
866
867 if (r)
868 m = -m;
869
870 if (e <= -126)
871 {
872 m &= 0xffffffU;
873 m >>= (-125 - e);
874 e = -126;
875 }
876
877 r |= (e + 126) << 23;
878 r |= m & 0x7fffffU;
879 #endif
880
881 return r;
882 }
883
884 /* converts an ieee single/binary32 to a float */
885 ecb_function_ ecb_const float ecb_binary32_to_float (uint32_t x);
886 ecb_function_ ecb_const float
887 ecb_binary32_to_float (uint32_t x)
888 {
889 float r;
890
891 #if ECB_STDFP
892 memcpy (&r, &x, 4);
893 #else
894 /* emulation, only works for normals and subnormals and +0 */
895 int neg = x >> 31;
896 int e = (x >> 23) & 0xffU;
897
898 x &= 0x7fffffU;
899
900 if (e)
901 x |= 0x800000U;
902 else
903 e = 1;
904
905 /* we distrust ldexpf a bit and do the 2**-24 scaling by an extra multiply */
906 r = ecb_ldexpf (x * (0.5f / 0x800000U), e - 126);
907
908 r = neg ? -r : r;
909 #endif
910
911 return r;
912 }
913
914 /* convert a double to ieee double/binary64 */
915 ecb_function_ ecb_const uint64_t ecb_double_to_binary64 (double x);
916 ecb_function_ ecb_const uint64_t
917 ecb_double_to_binary64 (double x)
918 {
919 uint64_t r;
920
921 #if ECB_STDFP
922 memcpy (&r, &x, 8);
923 #else
924 /* slow emulation, works for anything but -0 */
925 uint64_t m;
926 int e;
927
928 if (x == 0e0 ) return 0x0000000000000000U;
929 if (x > +1.79769313486231470e+308) return 0x7ff0000000000000U;
930 if (x < -1.79769313486231470e+308) return 0xfff0000000000000U;
931 if (x != x ) return 0X7ff7ffffffffffffU;
932
933 m = frexp (x, &e) * 0x20000000000000U;
934
935 r = m & 0x8000000000000000;;
936
937 if (r)
938 m = -m;
939
940 if (e <= -1022)
941 {
942 m &= 0x1fffffffffffffU;
943 m >>= (-1021 - e);
944 e = -1022;
945 }
946
947 r |= ((uint64_t)(e + 1022)) << 52;
948 r |= m & 0xfffffffffffffU;
949 #endif
950
951 return r;
952 }
953
954 /* converts an ieee double/binary64 to a double */
955 ecb_function_ ecb_const double ecb_binary64_to_double (uint64_t x);
956 ecb_function_ ecb_const double
957 ecb_binary64_to_double (uint64_t x)
958 {
959 double r;
960
961 #if ECB_STDFP
962 memcpy (&r, &x, 8);
963 #else
964 /* emulation, only works for normals and subnormals and +0 */
965 int neg = x >> 63;
966 int e = (x >> 52) & 0x7ffU;
967
968 x &= 0xfffffffffffffU;
969
970 if (e)
971 x |= 0x10000000000000U;
972 else
973 e = 1;
974
975 /* we distrust ldexp a bit and do the 2**-53 scaling by an extra multiply */
976 r = ldexp (x * (0.5 / 0x10000000000000U), e - 1022);
977
978 r = neg ? -r : r;
979 #endif
980
981 return r;
982 }
983
984 /* convert a float to ieee half/binary16 */
985 ecb_function_ ecb_const uint16_t ecb_float_to_binary16 (float x);
986 ecb_function_ ecb_const uint16_t
987 ecb_float_to_binary16 (float x)
988 {
989 return ecb_binary32_to_binary16 (ecb_float_to_binary32 (x));
990 }
991
992 /* convert an ieee half/binary16 to float */
993 ecb_function_ ecb_const float ecb_binary16_to_float (uint16_t x);
994 ecb_function_ ecb_const float
995 ecb_binary16_to_float (uint16_t x)
996 {
997 return ecb_binary32_to_float (ecb_binary16_to_binary32 (x));
998 }
999
1000#endif
1001
1002#endif
1003

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines