ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libecb/ecb.h
(Generate patch)

Comparing libecb/ecb.h (file contents):
Revision 1.40 by root, Sat Jun 11 17:34:57 2011 UTC vs.
Revision 1.212 by root, Fri Mar 25 15:31:22 2022 UTC

1/* 1/*
2 * libecb - http://software.schmorp.de/pkg/libecb 2 * libecb - http://software.schmorp.de/pkg/libecb
3 * 3 *
4 * Copyright (©) 2009-2011 Marc Alexander Lehmann <libecb@schmorp.de> 4 * Copyright (©) 2009-2015,2018-2021 Marc Alexander Lehmann <libecb@schmorp.de>
5 * Copyright (©) 2011 Emanuele Giaquinta 5 * Copyright (©) 2011 Emanuele Giaquinta
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without modifica- 8 * Redistribution and use in source and binary forms, with or without modifica-
9 * tion, are permitted provided that the following conditions are met: 9 * tion, are permitted provided that the following conditions are met:
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- 25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
26 * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 26 * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
27 * OF THE POSSIBILITY OF SUCH DAMAGE. 27 * OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * Alternatively, the contents of this file may be used under the terms of
30 * the GNU General Public License ("GPL") version 2 or any later version,
31 * in which case the provisions of the GPL are applicable instead of
32 * the above. If you wish to allow the use of your version of this file
33 * only under the terms of the GPL and not to allow others to use your
34 * version of this file under the BSD license, indicate your decision
35 * by deleting the provisions above and replace them with the notice
36 * and other provisions required by the GPL. If you do not delete the
37 * provisions above, a recipient may use your version of this file under
38 * either the BSD or the GPL.
28 */ 39 */
29 40
30#ifndef ECB_H 41#ifndef ECB_H
31#define ECB_H 42#define ECB_H
32 43
44/* 16 bits major, 16 bits minor */
45#define ECB_VERSION 0x0001000c
46
47#include <string.h> /* for memcpy */
48
49#if defined (_WIN32) && !defined (__MINGW32__)
50 typedef signed char int8_t;
51 typedef unsigned char uint8_t;
52 typedef signed char int_fast8_t;
53 typedef unsigned char uint_fast8_t;
54 typedef signed short int16_t;
55 typedef unsigned short uint16_t;
56 typedef signed int int_fast16_t;
57 typedef unsigned int uint_fast16_t;
58 typedef signed int int32_t;
59 typedef unsigned int uint32_t;
60 typedef signed int int_fast32_t;
61 typedef unsigned int uint_fast32_t;
62 #if __GNUC__
63 typedef signed long long int64_t;
64 typedef unsigned long long uint64_t;
65 #else /* _MSC_VER || __BORLANDC__ */
66 typedef signed __int64 int64_t;
67 typedef unsigned __int64 uint64_t;
68 #endif
69 typedef int64_t int_fast64_t;
70 typedef uint64_t uint_fast64_t;
71 #ifdef _WIN64
72 #define ECB_PTRSIZE 8
73 typedef uint64_t uintptr_t;
74 typedef int64_t intptr_t;
75 #else
76 #define ECB_PTRSIZE 4
77 typedef uint32_t uintptr_t;
78 typedef int32_t intptr_t;
79 #endif
80#else
33#include <inttypes.h> 81 #include <inttypes.h>
82 #if (defined INTPTR_MAX ? INTPTR_MAX : ULONG_MAX) > 0xffffffffU
83 #define ECB_PTRSIZE 8
84 #else
85 #define ECB_PTRSIZE 4
86 #endif
87#endif
88
89#define ECB_GCC_AMD64 (__amd64 || __amd64__ || __x86_64 || __x86_64__)
90#define ECB_MSVC_AMD64 (_M_AMD64 || _M_X64)
91
92#ifndef ECB_OPTIMIZE_SIZE
93 #if __OPTIMIZE_SIZE__
94 #define ECB_OPTIMIZE_SIZE 1
95 #else
96 #define ECB_OPTIMIZE_SIZE 0
97 #endif
98#endif
99
100/* work around x32 idiocy by defining proper macros */
101#if ECB_GCC_AMD64 || ECB_MSVC_AMD64
102 #if _ILP32
103 #define ECB_AMD64_X32 1
104 #else
105 #define ECB_AMD64 1
106 #endif
107#endif
108
109#if ECB_PTRSIZE >= 8 || ECB_AMD64_X32
110 #define ECB_64BIT_NATIVE 1
111#else
112 #define ECB_64BIT_NATIVE 0
113#endif
34 114
35/* many compilers define _GNUC_ to some versions but then only implement 115/* many compilers define _GNUC_ to some versions but then only implement
36 * what their idiot authors think are the "more important" extensions, 116 * what their idiot authors think are the "more important" extensions,
37 * causing enourmous grief in return for some better fake benchmark numbers. 117 * causing enormous grief in return for some better fake benchmark numbers.
38 * or so. 118 * or so.
39 * we try to detect these and simply assume they are not gcc - if they have 119 * we try to detect these and simply assume they are not gcc - if they have
40 * an issue with that they should have done it right in the first place. 120 * an issue with that they should have done it right in the first place.
41 */ 121 */
42#ifndef ECB_GCC_VERSION 122#if !defined __GNUC_MINOR__ || defined __INTEL_COMPILER || defined __SUNPRO_C || defined __SUNPRO_CC || defined __llvm__ || defined __clang__
43 #if defined(__INTEL_COMPILER) || defined(__SUNPRO_C) || defined(__llvm__)
44 #define ECB_GCC_VERSION(major,minor) 0 123 #define ECB_GCC_VERSION(major,minor) 0
45 #else 124#else
46 #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) 125 #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))
47 #endif 126#endif
48#endif
49 127
128#define ECB_CLANG_VERSION(major,minor) (__clang_major__ > (major) || (__clang_major__ == (major) && __clang_minor__ >= (minor)))
129
130#if __clang__ && defined __has_builtin
131 #define ECB_CLANG_BUILTIN(x) __has_builtin (x)
132#else
133 #define ECB_CLANG_BUILTIN(x) 0
134#endif
135
136#if __clang__ && defined __has_extension
137 #define ECB_CLANG_EXTENSION(x) __has_extension (x)
138#else
139 #define ECB_CLANG_EXTENSION(x) 0
140#endif
141
142#define ECB_CPP (__cplusplus+0)
143#define ECB_CPP11 (__cplusplus >= 201103L)
144#define ECB_CPP14 (__cplusplus >= 201402L)
145#define ECB_CPP17 (__cplusplus >= 201703L)
146
147#if ECB_CPP
148 #define ECB_C 0
149 #define ECB_STDC_VERSION 0
150#else
151 #define ECB_C 1
152 #define ECB_STDC_VERSION __STDC_VERSION__
153#endif
154
50#define ECB_C99 (__STDC_VERSION__ >= 199901L) 155#define ECB_C99 (ECB_STDC_VERSION >= 199901L)
156#define ECB_C11 (ECB_STDC_VERSION >= 201112L)
157#define ECB_C17 (ECB_STDC_VERSION >= 201710L)
51 158
52#if __cplusplus 159#if ECB_CPP
53 #define ECB_INLINE static inline 160 #define ECB_EXTERN_C extern "C"
161 #define ECB_EXTERN_C_BEG ECB_EXTERN_C {
162 #define ECB_EXTERN_C_END }
163#else
164 #define ECB_EXTERN_C extern
165 #define ECB_EXTERN_C_BEG
166 #define ECB_EXTERN_C_END
167#endif
168
169/*****************************************************************************/
170
171/* ECB_NO_THREADS - ecb is not used by multiple threads, ever */
172/* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */
173
174#if ECB_NO_THREADS
175 #define ECB_NO_SMP 1
176#endif
177
178#if ECB_NO_SMP
179 #define ECB_MEMORY_FENCE do { } while (0)
180#endif
181
182/* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/compiler_ref/compiler_builtins.html */
183#if __xlC__ && ECB_CPP
184 #include <builtins.h>
185#endif
186
187#if 1400 <= _MSC_VER
188 #include <intrin.h> /* fence functions _ReadBarrier, also bit search functions _BitScanReverse */
189#endif
190
191#ifndef ECB_MEMORY_FENCE
192 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
193 #define ECB_MEMORY_FENCE_RELAXED __asm__ __volatile__ ("" : : : "memory")
194 #if __i386 || __i386__
195 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
196 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
197 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory")
198 #elif ECB_GCC_AMD64
199 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory")
200 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
201 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory")
202 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__
203 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory")
204 #elif defined __ARM_ARCH_2__ \
205 || defined __ARM_ARCH_3__ || defined __ARM_ARCH_3M__ \
206 || defined __ARM_ARCH_4__ || defined __ARM_ARCH_4T__ \
207 || defined __ARM_ARCH_5__ || defined __ARM_ARCH_5E__ \
208 || defined __ARM_ARCH_5T__ || defined __ARM_ARCH_5TE__ \
209 || defined __ARM_ARCH_5TEJ__
210 /* should not need any, unless running old code on newer cpu - arm doesn't support that */
211 #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \
212 || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ \
213 || defined __ARM_ARCH_6T2__
214 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory")
215 #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \
216 || defined __ARM_ARCH_7R__ || defined __ARM_ARCH_7M__
217 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory")
218 #elif __aarch64__
219 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory")
220 #elif (__sparc || __sparc__) && !(__sparc_v8__ || defined __sparcv8)
221 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory")
222 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory")
223 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore")
224 #elif defined __s390__ || defined __s390x__
225 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory")
226 #elif defined __mips__
227 /* GNU/Linux emulates sync on mips1 architectures, so we force its use */
228 /* anybody else who still uses mips1 is supposed to send in their version, with detection code. */
229 #define ECB_MEMORY_FENCE __asm__ __volatile__ (".set mips2; sync; .set mips0" : : : "memory")
230 #elif defined __alpha__
231 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory")
232 #elif defined __hppa__
233 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
234 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
235 #elif defined __ia64__
236 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory")
237 #elif defined __m68k__
238 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
239 #elif defined __m88k__
240 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("tb1 0,%%r0,128" : : : "memory")
241 #elif defined __sh__
242 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
243 #endif
244 #endif
245#endif
246
247#ifndef ECB_MEMORY_FENCE
248 #if ECB_GCC_VERSION(4,7)
249 /* see comment below (stdatomic.h) about the C11 memory model. */
250 #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST)
251 #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE)
252 #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE)
253 #undef ECB_MEMORY_FENCE_RELAXED
254 #define ECB_MEMORY_FENCE_RELAXED __atomic_thread_fence (__ATOMIC_RELAXED)
255
256 #elif ECB_CLANG_EXTENSION(c_atomic)
257 /* see comment below (stdatomic.h) about the C11 memory model. */
258 #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST)
259 #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE)
260 #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE)
261 #undef ECB_MEMORY_FENCE_RELAXED
262 #define ECB_MEMORY_FENCE_RELAXED __c11_atomic_thread_fence (__ATOMIC_RELAXED)
263
264 #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__
265 #define ECB_MEMORY_FENCE __sync_synchronize ()
266 #elif _MSC_VER >= 1500 /* VC++ 2008 */
267 /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */
268 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
269 #define ECB_MEMORY_FENCE _ReadWriteBarrier (); MemoryBarrier()
270 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier (); MemoryBarrier() /* according to msdn, _ReadBarrier is not a load fence */
271 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier (); MemoryBarrier()
272 #elif _MSC_VER >= 1400 /* VC++ 2005 */
273 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
274 #define ECB_MEMORY_FENCE _ReadWriteBarrier ()
275 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */
276 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier ()
277 #elif defined _WIN32
278 #include <WinNT.h>
279 #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */
280 #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
281 #include <mbarrier.h>
282 #define ECB_MEMORY_FENCE __machine_rw_barrier ()
283 #define ECB_MEMORY_FENCE_ACQUIRE __machine_acq_barrier ()
284 #define ECB_MEMORY_FENCE_RELEASE __machine_rel_barrier ()
285 #define ECB_MEMORY_FENCE_RELAXED __compiler_barrier ()
286 #elif __xlC__
287 #define ECB_MEMORY_FENCE __sync ()
288 #endif
289#endif
290
291#ifndef ECB_MEMORY_FENCE
292 #if ECB_C11 && !defined __STDC_NO_ATOMICS__
293 /* we assume that these memory fences work on all variables/all memory accesses, */
294 /* not just C11 atomics and atomic accesses */
295 #include <stdatomic.h>
296 #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst)
297 #define ECB_MEMORY_FENCE_ACQUIRE atomic_thread_fence (memory_order_acquire)
298 #define ECB_MEMORY_FENCE_RELEASE atomic_thread_fence (memory_order_release)
299 #endif
300#endif
301
302#ifndef ECB_MEMORY_FENCE
303 #if !ECB_AVOID_PTHREADS
304 /*
305 * if you get undefined symbol references to pthread_mutex_lock,
306 * or failure to find pthread.h, then you should implement
307 * the ECB_MEMORY_FENCE operations for your cpu/compiler
308 * OR provide pthread.h and link against the posix thread library
309 * of your system.
310 */
311 #include <pthread.h>
312 #define ECB_NEEDS_PTHREADS 1
313 #define ECB_MEMORY_FENCE_NEEDS_PTHREADS 1
314
315 static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER;
316 #define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0)
317 #endif
318#endif
319
320#if !defined ECB_MEMORY_FENCE_ACQUIRE && defined ECB_MEMORY_FENCE
321 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
322#endif
323
324#if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE
325 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
326#endif
327
328#if !defined ECB_MEMORY_FENCE_RELAXED && defined ECB_MEMORY_FENCE
329 #define ECB_MEMORY_FENCE_RELAXED ECB_MEMORY_FENCE /* very heavy-handed */
330#endif
331
332/*****************************************************************************/
333
334#if ECB_CPP
335 #define ecb_inline static inline
54#elif ECB_GCC_VERSION(2,5) 336#elif ECB_GCC_VERSION(2,5)
55 #define ECB_INLINE static __inline__ 337 #define ecb_inline static __inline__
56#elif ECB_C99 338#elif ECB_C99
57 #define ECB_INLINE static inline 339 #define ecb_inline static inline
58#else 340#else
59 #define ECB_INLINE static 341 #define ecb_inline static
60#endif 342#endif
61 343
62#if ECB_GCC_VERSION(3,3) 344#if ECB_GCC_VERSION(3,3)
63 #define ecb_restrict __restrict__ 345 #define ecb_restrict __restrict__
64#elif ECB_C99 346#elif ECB_C99
71 353
72#define ECB_CONCAT_(a, b) a ## b 354#define ECB_CONCAT_(a, b) a ## b
73#define ECB_CONCAT(a, b) ECB_CONCAT_(a, b) 355#define ECB_CONCAT(a, b) ECB_CONCAT_(a, b)
74#define ECB_STRINGIFY_(a) # a 356#define ECB_STRINGIFY_(a) # a
75#define ECB_STRINGIFY(a) ECB_STRINGIFY_(a) 357#define ECB_STRINGIFY(a) ECB_STRINGIFY_(a)
358#define ECB_STRINGIFY_EXPR(expr) ((expr), ECB_STRINGIFY_ (expr))
76 359
77#define ecb_function_ ECB_INLINE 360/* This marks larger functions that do not neccessarily need to be inlined */
361/* The idea is to possibly compile the header twice, */
362/* once exposing only the declarations, another time to define external functions */
363/* TODO: possibly static would be best for these at the moment? */
364#define ecb_function_ ecb_inline
78 365
79#if ECB_GCC_VERSION(3,1) 366#if ECB_GCC_VERSION(3,1) || ECB_CLANG_VERSION(2,8)
80 #define ecb_attribute(attrlist) __attribute__(attrlist) 367 #define ecb_attribute(attrlist) __attribute__ (attrlist)
368#else
369 #define ecb_attribute(attrlist)
370#endif
371
372#if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_constant_p)
81 #define ecb_is_constant(expr) __builtin_constant_p (expr) 373 #define ecb_is_constant(expr) __builtin_constant_p (expr)
374#else
375 /* possible C11 impl for integral types
376 typedef struct ecb_is_constant_struct ecb_is_constant_struct;
377 #define ecb_is_constant(expr) _Generic ((1 ? (struct ecb_is_constant_struct *)0 : (void *)((expr) - (expr)), ecb_is_constant_struct *: 0, default: 1)) */
378
379 #define ecb_is_constant(expr) 0
380#endif
381
382#if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_expect)
82 #define ecb_expect(expr,value) __builtin_expect ((expr),(value)) 383 #define ecb_expect(expr,value) __builtin_expect ((expr),(value))
384#else
385 #define ecb_expect(expr,value) (expr)
386#endif
387
388#if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_prefetch)
83 #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality) 389 #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality)
84#else 390#else
85 #define ecb_attribute(attrlist)
86 #define ecb_is_constant(expr) 0
87 #define ecb_expect(expr,value) (expr)
88 #define ecb_prefetch(addr,rw,locality) 391 #define ecb_prefetch(addr,rw,locality)
89#endif 392#endif
90 393
91/* no emulation for ecb_decltype */ 394/* no emulation for ecb_decltype */
92#if ECB_GCC_VERSION(4,5) 395#if ECB_CPP11
396 // older implementations might have problems with decltype(x)::type, work around it
397 template<class T> struct ecb_decltype_t { typedef T type; };
93 #define ecb_decltype(x) __decltype(x) 398 #define ecb_decltype(x) ecb_decltype_t<decltype (x)>::type
94#elif ECB_GCC_VERSION(3,0) 399#elif ECB_GCC_VERSION(3,0) || ECB_CLANG_VERSION(2,8)
95 #define ecb_decltype(x) __typeof(x) 400 #define ecb_decltype(x) __typeof__ (x)
96#endif 401#endif
97 402
403#if _MSC_VER >= 1300
404 #define ecb_deprecated __declspec (deprecated)
405#else
406 #define ecb_deprecated ecb_attribute ((__deprecated__))
407#endif
408
409#if _MSC_VER >= 1500
410 #define ecb_deprecated_message(msg) __declspec (deprecated (msg))
411#elif ECB_GCC_VERSION(4,5)
412 #define ecb_deprecated_message(msg) ecb_attribute ((__deprecated__ (msg))
413#else
414 #define ecb_deprecated_message(msg) ecb_deprecated
415#endif
416
417#if _MSC_VER >= 1400
418 #define ecb_noinline __declspec (noinline)
419#else
98#define ecb_noinline ecb_attribute ((__noinline__)) 420 #define ecb_noinline ecb_attribute ((__noinline__))
99#define ecb_noreturn ecb_attribute ((__noreturn__)) 421#endif
422
100#define ecb_unused ecb_attribute ((__unused__)) 423#define ecb_unused ecb_attribute ((__unused__))
101#define ecb_const ecb_attribute ((__const__)) 424#define ecb_const ecb_attribute ((__const__))
102#define ecb_pure ecb_attribute ((__pure__)) 425#define ecb_pure ecb_attribute ((__pure__))
426
427#if ECB_C11 || __IBMC_NORETURN
428 /* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/language_ref/noreturn.html */
429 #define ecb_noreturn _Noreturn
430#elif ECB_CPP11
431 #define ecb_noreturn [[noreturn]]
432#elif _MSC_VER >= 1200
433 /* http://msdn.microsoft.com/en-us/library/k6ktzx3s.aspx */
434 #define ecb_noreturn __declspec (noreturn)
435#else
436 #define ecb_noreturn ecb_attribute ((__noreturn__))
437#endif
103 438
104#if ECB_GCC_VERSION(4,3) 439#if ECB_GCC_VERSION(4,3)
105 #define ecb_artificial ecb_attribute ((__artificial__)) 440 #define ecb_artificial ecb_attribute ((__artificial__))
106 #define ecb_hot ecb_attribute ((__hot__)) 441 #define ecb_hot ecb_attribute ((__hot__))
107 #define ecb_cold ecb_attribute ((__cold__)) 442 #define ecb_cold ecb_attribute ((__cold__))
118#define ecb_expect_true(expr) ecb_expect (!!(expr), 1) 453#define ecb_expect_true(expr) ecb_expect (!!(expr), 1)
119/* for compatibility to the rest of the world */ 454/* for compatibility to the rest of the world */
120#define ecb_likely(expr) ecb_expect_true (expr) 455#define ecb_likely(expr) ecb_expect_true (expr)
121#define ecb_unlikely(expr) ecb_expect_false (expr) 456#define ecb_unlikely(expr) ecb_expect_false (expr)
122 457
123/* try to tell the compiler that some condition is definitely true */
124#define ecb_assume(cond) do { if (!(cond)) ecb_unreachable (); } while (0)
125
126/* count trailing zero bits and count # of one bits */ 458/* count trailing zero bits and count # of one bits */
127#if ECB_GCC_VERSION(3,4) 459#if ECB_GCC_VERSION(3,4) \
460 || (ECB_CLANG_BUILTIN(__builtin_clz) && ECB_CLANG_BUILTIN(__builtin_clzll) \
461 && ECB_CLANG_BUILTIN(__builtin_ctz) && ECB_CLANG_BUILTIN(__builtin_ctzll) \
462 && ECB_CLANG_BUILTIN(__builtin_popcount))
128 #define ecb_ctz32(x) __builtin_ctz (x) 463 #define ecb_ctz32(x) __builtin_ctz (x)
464 #define ecb_ctz64(x) (__SIZEOF_LONG__ == 64 ? __builtin_ctzl (x) : __builtin_ctzll (x))
465 #define ecb_clz32(x) __builtin_clz (x)
466 #define ecb_clz64(x) (__SIZEOF_LONG__ == 64 ? __builtin_clzl (x) : __builtin_clzll (x))
467 #define ecb_ld32(x) (ecb_clz32 (x) ^ 31)
468 #define ecb_ld64(x) (ecb_clz64 (x) ^ 63)
129 #define ecb_popcount32(x) __builtin_popcount (x) 469 #define ecb_popcount32(x) __builtin_popcount (x)
470 /* ecb_popcount64 is more difficult, see below */
130#else 471#else
131 ecb_function_ int ecb_ctz32 (uint32_t x) ecb_const; 472 ecb_function_ ecb_const int ecb_ctz32 (uint32_t x);
132 ecb_function_ int 473 ecb_function_ ecb_const int
133 ecb_ctz32 (uint32_t x) 474 ecb_ctz32 (uint32_t x)
134 { 475 {
476#if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM)
477 unsigned long r;
478 _BitScanForward (&r, x);
479 return (int)r;
480#else
135 int r = 0; 481 int r;
136 482
137 x &= -x; /* this isolates the lowest bit */ 483 x &= ~x + 1; /* this isolates the lowest bit */
138 484
485 #if 1
486 /* David Seal's algorithm, Message-ID: <32975@armltd.uucp> from 1994 */
487 /* This happens to return 32 for x == 0, but the API does not support this */
488
489 /* -0 marks unused entries */
490 static unsigned char table[64] =
491 {
492 32, 0, 1, 12, 2, 6, -0, 13, 3, -0, 7, -0, -0, -0, -0, 14,
493 10, 4, -0, -0, 8, -0, -0, 25, -0, -0, -0, -0, -0, 21, 27, 15,
494 31, 11, 5, -0, -0, -0, -0, -0, 9, -0, -0, 24, -0, -0, 20, 26,
495 30, -0, -0, -0, -0, 23, -0, 19, 29, -0, 22, 18, 28, 17, 16, -0
496 };
497
498 /* magic constant results in 33 unique values in the upper 6 bits */
499 x *= 0x0450fbafU; /* == 17 * 65 * 65535 */
500
501 r = table [x >> 26];
502 #elif 0 /* branchless on i386, typically */
503 r = 0;
504 r += !!(x & 0xaaaaaaaa) << 0;
505 r += !!(x & 0xcccccccc) << 1;
506 r += !!(x & 0xf0f0f0f0) << 2;
507 r += !!(x & 0xff00ff00) << 3;
508 r += !!(x & 0xffff0000) << 4;
509 #else /* branchless on modern compilers, typically */
510 r = 0;
139 if (x & 0xaaaaaaaa) r += 1; 511 if (x & 0xaaaaaaaa) r += 1;
140 if (x & 0xcccccccc) r += 2; 512 if (x & 0xcccccccc) r += 2;
141 if (x & 0xf0f0f0f0) r += 4; 513 if (x & 0xf0f0f0f0) r += 4;
142 if (x & 0xff00ff00) r += 8; 514 if (x & 0xff00ff00) r += 8;
143 if (x & 0xffff0000) r += 16; 515 if (x & 0xffff0000) r += 16;
516#endif
144 517
145 return r; 518 return r;
519#endif
146 } 520 }
147 521
522 ecb_function_ ecb_const int ecb_ctz64 (uint64_t x);
523 ecb_function_ ecb_const int
524 ecb_ctz64 (uint64_t x)
525 {
526#if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM)
527 unsigned long r;
528 _BitScanForward64 (&r, x);
529 return (int)r;
530#else
531 int shift = x & 0xffffffff ? 0 : 32;
532 return ecb_ctz32 (x >> shift) + shift;
533#endif
534 }
535
536 ecb_function_ ecb_const int ecb_clz32 (uint32_t x);
537 ecb_function_ ecb_const int
538 ecb_clz32 (uint32_t x)
539 {
540#if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM)
541 unsigned long r;
542 _BitScanReverse (&r, x);
543 return (int)r;
544#else
545
546 /* Robert Harley's algorithm from comp.arch 1996-12-07 */
547 /* This happens to return 32 for x == 0, but the API does not support this */
548
549 /* -0 marks unused table elements */
550 static unsigned char table[64] =
551 {
552 32, 31, -0, 16, -0, 30, 3, -0, 15, -0, -0, -0, 29, 10, 2, -0,
553 -0, -0, 12, 14, 21, -0, 19, -0, -0, 28, -0, 25, -0, 9, 1, -0,
554 17, -0, 4, -0, -0, -0, 11, -0, 13, 22, 20, -0, 26, -0, -0, 18,
555 5, -0, -0, 23, -0, 27, -0, 6, -0, 24, 7, -0, 8, -0, 0, -0
556 };
557
558 /* propagate leftmost 1 bit to the right */
559 x |= x >> 1;
560 x |= x >> 2;
561 x |= x >> 4;
562 x |= x >> 8;
563 x |= x >> 16;
564
565 /* magic constant results in 33 unique values in the upper 6 bits */
566 x *= 0x06EB14F9U; /* == 7 * 255 * 255 * 255 */
567
568 return table [x >> 26];
569#endif
570 }
571
572 ecb_function_ ecb_const int ecb_clz64 (uint64_t x);
573 ecb_function_ ecb_const int
574 ecb_clz64 (uint64_t x)
575 {
576#if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM)
577 unsigned long r;
578 _BitScanReverse64 (&r, x);
579 return (int)r;
580#else
581 uint32_t l = x >> 32;
582 int shift = l ? 0 : 32;
583 return ecb_clz32 (l ? l : x) + shift;
584#endif
585 }
586
148 ecb_function_ int ecb_popcount32 (uint32_t x) ecb_const; 587 ecb_function_ ecb_const int ecb_popcount32 (uint32_t x);
149 ecb_function_ int 588 ecb_function_ ecb_const int
150 ecb_popcount32 (uint32_t x) 589 ecb_popcount32 (uint32_t x)
151 { 590 {
152 x -= (x >> 1) & 0x55555555; 591 x -= (x >> 1) & 0x55555555;
153 x = ((x >> 2) & 0x33333333) + (x & 0x33333333); 592 x = ((x >> 2) & 0x33333333) + (x & 0x33333333);
154 x = ((x >> 4) + x) & 0x0f0f0f0f; 593 x = ((x >> 4) + x) & 0x0f0f0f0f;
155 x *= 0x01010101; 594 x *= 0x01010101;
156 595
157 return x >> 24; 596 return x >> 24;
158 } 597 }
159#endif
160 598
161#if ECB_GCC_VERSION(4,3) 599 ecb_function_ ecb_const int ecb_ld32 (uint32_t x);
600 ecb_function_ ecb_const int ecb_ld32 (uint32_t x)
601 {
602#if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM)
603 unsigned long r;
604 _BitScanReverse (&r, x);
605 return (int)r;
606#else
607 int r = 0;
608
609 if (x >> 16) { x >>= 16; r += 16; }
610 if (x >> 8) { x >>= 8; r += 8; }
611 if (x >> 4) { x >>= 4; r += 4; }
612 if (x >> 2) { x >>= 2; r += 2; }
613 if (x >> 1) { r += 1; }
614
615 return r;
616#endif
617 }
618
619 ecb_function_ ecb_const int ecb_ld64 (uint64_t x);
620 ecb_function_ ecb_const int ecb_ld64 (uint64_t x)
621 {
622#if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM)
623 unsigned long r;
624 _BitScanReverse64 (&r, x);
625 return (int)r;
626#else
627 int r = 0;
628
629 if (x >> 32) { x >>= 32; r += 32; }
630
631 return r + ecb_ld32 (x);
632#endif
633 }
634#endif
635
636ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x);
637ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); }
638ecb_function_ ecb_const ecb_bool ecb_is_pot64 (uint64_t x);
639ecb_function_ ecb_const ecb_bool ecb_is_pot64 (uint64_t x) { return !(x & (x - 1)); }
640
641ecb_function_ ecb_const uint8_t ecb_bitrev8 (uint8_t x);
642ecb_function_ ecb_const uint8_t ecb_bitrev8 (uint8_t x)
643{
644 return ( (x * 0x0802U & 0x22110U)
645 | (x * 0x8020U & 0x88440U)) * 0x10101U >> 16;
646}
647
648ecb_function_ ecb_const uint16_t ecb_bitrev16 (uint16_t x);
649ecb_function_ ecb_const uint16_t ecb_bitrev16 (uint16_t x)
650{
651 x = ((x >> 1) & 0x5555) | ((x & 0x5555) << 1);
652 x = ((x >> 2) & 0x3333) | ((x & 0x3333) << 2);
653 x = ((x >> 4) & 0x0f0f) | ((x & 0x0f0f) << 4);
654 x = ( x >> 8 ) | ( x << 8);
655
656 return x;
657}
658
659ecb_function_ ecb_const uint32_t ecb_bitrev32 (uint32_t x);
660ecb_function_ ecb_const uint32_t ecb_bitrev32 (uint32_t x)
661{
662 x = ((x >> 1) & 0x55555555) | ((x & 0x55555555) << 1);
663 x = ((x >> 2) & 0x33333333) | ((x & 0x33333333) << 2);
664 x = ((x >> 4) & 0x0f0f0f0f) | ((x & 0x0f0f0f0f) << 4);
665 x = ((x >> 8) & 0x00ff00ff) | ((x & 0x00ff00ff) << 8);
666 x = ( x >> 16 ) | ( x << 16);
667
668 return x;
669}
670
671ecb_function_ ecb_const int ecb_popcount64 (uint64_t x);
672ecb_function_ ecb_const int
673ecb_popcount64 (uint64_t x)
674{
675 /* popcount64 is only available on 64 bit cpus as gcc builtin. */
676 /* also, gcc/clang make this surprisingly difficult to use */
677#if (__SIZEOF_LONG__ == 8) && (ECB_GCC_VERSION(3,4) || ECB_CLANG_BUILTIN (__builtin_popcountl))
678 return __builtin_popcountl (x);
679#else
680 return ecb_popcount32 (x) + ecb_popcount32 (x >> 32);
681#endif
682}
683
684ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count);
685ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count);
686ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count);
687ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count);
688ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count);
689ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count);
690ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count);
691ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count);
692
693ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> (-count & 7)) | (x << (count & 7)); }
694ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << (-count & 7)) | (x >> (count & 7)); }
695ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (-count & 15)) | (x << (count & 15)); }
696ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (-count & 15)) | (x >> (count & 15)); }
697ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (-count & 31)) | (x << (count & 31)); }
698ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (-count & 31)) | (x >> (count & 31)); }
699ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (-count & 63)) | (x << (count & 63)); }
700ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (-count & 63)) | (x >> (count & 63)); }
701
702#if ECB_CPP
703
704inline uint8_t ecb_ctz (uint8_t v) { return ecb_ctz32 (v); }
705inline uint16_t ecb_ctz (uint16_t v) { return ecb_ctz32 (v); }
706inline uint32_t ecb_ctz (uint32_t v) { return ecb_ctz32 (v); }
707inline uint64_t ecb_ctz (uint64_t v) { return ecb_ctz64 (v); }
708
709inline bool ecb_is_pot (uint8_t v) { return ecb_is_pot32 (v); }
710inline bool ecb_is_pot (uint16_t v) { return ecb_is_pot32 (v); }
711inline bool ecb_is_pot (uint32_t v) { return ecb_is_pot32 (v); }
712inline bool ecb_is_pot (uint64_t v) { return ecb_is_pot64 (v); }
713
714inline int ecb_ld (uint8_t v) { return ecb_ld32 (v); }
715inline int ecb_ld (uint16_t v) { return ecb_ld32 (v); }
716inline int ecb_ld (uint32_t v) { return ecb_ld32 (v); }
717inline int ecb_ld (uint64_t v) { return ecb_ld64 (v); }
718
719inline int ecb_popcount (uint8_t v) { return ecb_popcount32 (v); }
720inline int ecb_popcount (uint16_t v) { return ecb_popcount32 (v); }
721inline int ecb_popcount (uint32_t v) { return ecb_popcount32 (v); }
722inline int ecb_popcount (uint64_t v) { return ecb_popcount64 (v); }
723
724inline uint8_t ecb_bitrev (uint8_t v) { return ecb_bitrev8 (v); }
725inline uint16_t ecb_bitrev (uint16_t v) { return ecb_bitrev16 (v); }
726inline uint32_t ecb_bitrev (uint32_t v) { return ecb_bitrev32 (v); }
727
728inline uint8_t ecb_rotl (uint8_t v, unsigned int count) { return ecb_rotl8 (v, count); }
729inline uint16_t ecb_rotl (uint16_t v, unsigned int count) { return ecb_rotl16 (v, count); }
730inline uint32_t ecb_rotl (uint32_t v, unsigned int count) { return ecb_rotl32 (v, count); }
731inline uint64_t ecb_rotl (uint64_t v, unsigned int count) { return ecb_rotl64 (v, count); }
732
733inline uint8_t ecb_rotr (uint8_t v, unsigned int count) { return ecb_rotr8 (v, count); }
734inline uint16_t ecb_rotr (uint16_t v, unsigned int count) { return ecb_rotr16 (v, count); }
735inline uint32_t ecb_rotr (uint32_t v, unsigned int count) { return ecb_rotr32 (v, count); }
736inline uint64_t ecb_rotr (uint64_t v, unsigned int count) { return ecb_rotr64 (v, count); }
737
738#endif
739
740#if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64))
741 #if ECB_GCC_VERSION(4,8) || ECB_CLANG_BUILTIN(__builtin_bswap16)
162 #define ecb_bswap32(x) __builtin_bswap32 (x) 742 #define ecb_bswap16(x) __builtin_bswap16 (x)
743 #else
163 #define ecb_bswap16(x) (__builtin_bswap32(x) >> 16) 744 #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16)
745 #endif
746 #define ecb_bswap32(x) __builtin_bswap32 (x)
747 #define ecb_bswap64(x) __builtin_bswap64 (x)
748#elif _MSC_VER
749 #include <stdlib.h>
750 #define ecb_bswap16(x) ((uint16_t)_byteswap_ushort ((uint16_t)(x)))
751 #define ecb_bswap32(x) ((uint32_t)_byteswap_ulong ((uint32_t)(x)))
752 #define ecb_bswap64(x) ((uint64_t)_byteswap_uint64 ((uint64_t)(x)))
164#else 753#else
754 ecb_function_ ecb_const uint16_t ecb_bswap16 (uint16_t x);
755 ecb_function_ ecb_const uint16_t
756 ecb_bswap16 (uint16_t x)
757 {
758 return ecb_rotl16 (x, 8);
759 }
760
165 ecb_function_ uint32_t ecb_bswap32 (uint32_t x) ecb_const; 761 ecb_function_ ecb_const uint32_t ecb_bswap32 (uint32_t x);
166 ecb_function_ uint32_t 762 ecb_function_ ecb_const uint32_t
167 ecb_bswap32 (uint32_t x) 763 ecb_bswap32 (uint32_t x)
168 { 764 {
169 return (x >> 24) 765 return (((uint32_t)ecb_bswap16 (x)) << 16) | ecb_bswap16 (x >> 16);
170 | ((x >> 8) & 0x0000ff00)
171 | ((x << 8) & 0x00ff0000)
172 | (x << 24);
173 } 766 }
174 767
175 ecb_function_ uint32_t ecb_bswap16 (uint32_t x) ecb_const; 768 ecb_function_ ecb_const uint64_t ecb_bswap64 (uint64_t x);
176 ecb_function_ uint32_t 769 ecb_function_ ecb_const uint64_t
177 ecb_bswap16 (uint32_t x) 770 ecb_bswap64 (uint64_t x)
178 { 771 {
179 return ((x >> 8) & 0xff) 772 return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32);
180 | ((x << 8) & 0x00ff0000)
181 | (x << 24);
182 } 773 }
183#endif 774#endif
184 775
185#if ECB_GCC_VERSION(4,5) 776#if ECB_GCC_VERSION(4,5) || ECB_CLANG_BUILTIN(__builtin_unreachable)
186 #define ecb_unreachable() __builtin_unreachable () 777 #define ecb_unreachable() __builtin_unreachable ()
187#else 778#else
188 /* this seems to work fine, but gcc always emits a warning for it :/ */ 779 /* this seems to work fine, but gcc always emits a warning for it :/ */
189 ecb_function_ void ecb_unreachable (void) ecb_noreturn; 780 ecb_inline ecb_noreturn void ecb_unreachable (void);
190 ecb_function_ void ecb_unreachable (void) { } 781 ecb_inline ecb_noreturn void ecb_unreachable (void) { }
191#endif 782#endif
192 783
193ecb_function_ unsigned char ecb_byteorder_helper (void) ecb_const; 784/* try to tell the compiler that some condition is definitely true */
194ecb_function_ unsigned char 785#define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0
786
787ecb_inline ecb_const uint32_t ecb_byteorder_helper (void);
788ecb_inline ecb_const uint32_t
195ecb_byteorder_helper (void) 789ecb_byteorder_helper (void)
196{ 790{
197 const uint32_t u = 0x11223344; 791 /* the union code still generates code under pressure in gcc, */
198 return *(unsigned char *)&u; 792 /* but less than using pointers, and always seems to */
793 /* successfully return a constant. */
794 /* the reason why we have this horrible preprocessor mess */
795 /* is to avoid it in all cases, at least on common architectures */
796 /* or when using a recent enough gcc version (>= 4.6) */
797#if (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \
798 || ((__i386 || __i386__ || _M_IX86 || ECB_GCC_AMD64 || ECB_MSVC_AMD64) && !__VOS__)
799 #define ECB_LITTLE_ENDIAN 1
800 return 0x44332211;
801#elif (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) \
802 || ((__AARCH64EB__ || __MIPSEB__ || __ARMEB__) && !__VOS__)
803 #define ECB_BIG_ENDIAN 1
804 return 0x11223344;
805#else
806 union
807 {
808 uint8_t c[4];
809 uint32_t u;
810 } u = { 0x11, 0x22, 0x33, 0x44 };
811 return u.u;
812#endif
199} 813}
200 814
201ecb_function_ ecb_bool ecb_big_endian (void) ecb_const; 815ecb_inline ecb_const ecb_bool ecb_big_endian (void);
202ecb_function_ ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; }; 816ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11223344; }
203ecb_function_ ecb_bool ecb_little_endian (void) ecb_const; 817ecb_inline ecb_const ecb_bool ecb_little_endian (void);
204ecb_function_ ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44; }; 818ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44332211; }
819
820/*****************************************************************************/
821/* unaligned load/store */
822
823ecb_inline uint_fast16_t ecb_be_u16_to_host (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; }
824ecb_inline uint_fast32_t ecb_be_u32_to_host (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; }
825ecb_inline uint_fast64_t ecb_be_u64_to_host (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; }
826
827ecb_inline uint_fast16_t ecb_le_u16_to_host (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; }
828ecb_inline uint_fast32_t ecb_le_u32_to_host (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; }
829ecb_inline uint_fast64_t ecb_le_u64_to_host (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; }
830
831ecb_inline uint_fast16_t ecb_peek_u16_u (const void *ptr) { uint16_t v; memcpy (&v, ptr, sizeof (v)); return v; }
832ecb_inline uint_fast32_t ecb_peek_u32_u (const void *ptr) { uint32_t v; memcpy (&v, ptr, sizeof (v)); return v; }
833ecb_inline uint_fast64_t ecb_peek_u64_u (const void *ptr) { uint64_t v; memcpy (&v, ptr, sizeof (v)); return v; }
834
835ecb_inline uint_fast16_t ecb_peek_be_u16_u (const void *ptr) { return ecb_be_u16_to_host (ecb_peek_u16_u (ptr)); }
836ecb_inline uint_fast32_t ecb_peek_be_u32_u (const void *ptr) { return ecb_be_u32_to_host (ecb_peek_u32_u (ptr)); }
837ecb_inline uint_fast64_t ecb_peek_be_u64_u (const void *ptr) { return ecb_be_u64_to_host (ecb_peek_u64_u (ptr)); }
838
839ecb_inline uint_fast16_t ecb_peek_le_u16_u (const void *ptr) { return ecb_le_u16_to_host (ecb_peek_u16_u (ptr)); }
840ecb_inline uint_fast32_t ecb_peek_le_u32_u (const void *ptr) { return ecb_le_u32_to_host (ecb_peek_u32_u (ptr)); }
841ecb_inline uint_fast64_t ecb_peek_le_u64_u (const void *ptr) { return ecb_le_u64_to_host (ecb_peek_u64_u (ptr)); }
842
843ecb_inline uint_fast16_t ecb_host_to_be_u16 (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; }
844ecb_inline uint_fast32_t ecb_host_to_be_u32 (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; }
845ecb_inline uint_fast64_t ecb_host_to_be_u64 (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; }
846
847ecb_inline uint_fast16_t ecb_host_to_le_u16 (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; }
848ecb_inline uint_fast32_t ecb_host_to_le_u32 (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; }
849ecb_inline uint_fast64_t ecb_host_to_le_u64 (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; }
850
851ecb_inline void ecb_poke_u16_u (void *ptr, uint16_t v) { memcpy (ptr, &v, sizeof (v)); }
852ecb_inline void ecb_poke_u32_u (void *ptr, uint32_t v) { memcpy (ptr, &v, sizeof (v)); }
853ecb_inline void ecb_poke_u64_u (void *ptr, uint64_t v) { memcpy (ptr, &v, sizeof (v)); }
854
855ecb_inline void ecb_poke_be_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_be_u16 (v)); }
856ecb_inline void ecb_poke_be_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_be_u32 (v)); }
857ecb_inline void ecb_poke_be_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_be_u64 (v)); }
858
859ecb_inline void ecb_poke_le_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_le_u16 (v)); }
860ecb_inline void ecb_poke_le_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_le_u32 (v)); }
861ecb_inline void ecb_poke_le_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_le_u64 (v)); }
862
863#if ECB_CPP
864
865inline uint8_t ecb_bswap (uint8_t v) { return v; }
866inline uint16_t ecb_bswap (uint16_t v) { return ecb_bswap16 (v); }
867inline uint32_t ecb_bswap (uint32_t v) { return ecb_bswap32 (v); }
868inline uint64_t ecb_bswap (uint64_t v) { return ecb_bswap64 (v); }
869
870template<typename T> inline T ecb_be_to_host (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; }
871template<typename T> inline T ecb_le_to_host (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; }
872template<typename T> inline T ecb_peek (const void *ptr) { return *(const T *)ptr; }
873template<typename T> inline T ecb_peek_be (const void *ptr) { return ecb_be_to_host (ecb_peek <T> (ptr)); }
874template<typename T> inline T ecb_peek_le (const void *ptr) { return ecb_le_to_host (ecb_peek <T> (ptr)); }
875template<typename T> inline T ecb_peek_u (const void *ptr) { T v; memcpy (&v, ptr, sizeof (v)); return v; }
876template<typename T> inline T ecb_peek_be_u (const void *ptr) { return ecb_be_to_host (ecb_peek_u<T> (ptr)); }
877template<typename T> inline T ecb_peek_le_u (const void *ptr) { return ecb_le_to_host (ecb_peek_u<T> (ptr)); }
878
879template<typename T> inline T ecb_host_to_be (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; }
880template<typename T> inline T ecb_host_to_le (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; }
881template<typename T> inline void ecb_poke (void *ptr, T v) { *(T *)ptr = v; }
882template<typename T> inline void ecb_poke_be (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_be (v)); }
883template<typename T> inline void ecb_poke_le (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_le (v)); }
884template<typename T> inline void ecb_poke_u (void *ptr, T v) { memcpy (ptr, &v, sizeof (v)); }
885template<typename T> inline void ecb_poke_be_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_be (v)); }
886template<typename T> inline void ecb_poke_le_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_le (v)); }
887
888#endif
889
890/*****************************************************************************/
891/* pointer/integer hashing */
892
893/* based on hash by Chris Wellons, https://nullprogram.com/blog/2018/07/31/ */
894ecb_function_ uint32_t ecb_mix32 (uint32_t v);
895ecb_function_ uint32_t ecb_mix32 (uint32_t v)
896{
897 v ^= v >> 16; v *= 0x7feb352dU;
898 v ^= v >> 15; v *= 0x846ca68bU;
899 v ^= v >> 16;
900 return v;
901}
902
903ecb_function_ uint32_t ecb_unmix32 (uint32_t v);
904ecb_function_ uint32_t ecb_unmix32 (uint32_t v)
905{
906 v ^= v >> 16 ; v *= 0x43021123U;
907 v ^= v >> 15 ^ v >> 30; v *= 0x1d69e2a5U;
908 v ^= v >> 16 ;
909 return v;
910}
911
912/* based on splitmix64, by Sebastiona Vigna, https://prng.di.unimi.it/splitmix64.c */
913ecb_function_ uint64_t ecb_mix64 (uint64_t v);
914ecb_function_ uint64_t ecb_mix64 (uint64_t v)
915{
916 v ^= v >> 30; v *= 0xbf58476d1ce4e5b9U;
917 v ^= v >> 27; v *= 0x94d049bb133111ebU;
918 v ^= v >> 31;
919 return v;
920}
921
922ecb_function_ uint64_t ecb_unmix64 (uint64_t v);
923ecb_function_ uint64_t ecb_unmix64 (uint64_t v)
924{
925 v ^= v >> 31 ^ v >> 62; v *= 0x319642b2d24d8ec3U;
926 v ^= v >> 27 ^ v >> 54; v *= 0x96de1b173f119089U;
927 v ^= v >> 30 ^ v >> 60;
928 return v;
929}
930
931ecb_function_ uintptr_t ecb_ptrmix (void *p);
932ecb_function_ uintptr_t ecb_ptrmix (void *p)
933{
934 #if ECB_PTRSIZE <= 4
935 return ecb_mix32 ((uint32_t)p);
936 #else
937 return ecb_mix64 ((uint64_t)p);
938 #endif
939}
940
941ecb_function_ void *ecb_ptrunmix (uintptr_t v);
942ecb_function_ void *ecb_ptrunmix (uintptr_t v)
943{
944 #if ECB_PTRSIZE <= 4
945 return (void *)ecb_unmix32 (v);
946 #else
947 return (void *)ecb_unmix64 (v);
948 #endif
949}
950
951#if ECB_CPP
952
953template<typename T>
954inline uintptr_t ecb_ptrmix (T *p)
955{
956 return ecb_ptrmix (static_cast<void *>(p));
957}
958
959template<typename T>
960inline T *ecb_ptrunmix (uintptr_t v)
961{
962 return static_cast<T *>(ecb_ptrunmix (v));
963}
964
965#endif
966
967/*****************************************************************************/
968/* gray code */
969
970ecb_inline uint_fast8_t ecb_gray_encode8 (uint_fast8_t b) { return b ^ (b >> 1); }
971ecb_inline uint_fast16_t ecb_gray_encode16 (uint_fast16_t b) { return b ^ (b >> 1); }
972ecb_inline uint_fast32_t ecb_gray_encode32 (uint_fast32_t b) { return b ^ (b >> 1); }
973ecb_inline uint_fast64_t ecb_gray_encode64 (uint_fast64_t b) { return b ^ (b >> 1); }
974
975ecb_function_ uint8_t ecb_gray_decode8 (uint8_t g);
976ecb_function_ uint8_t ecb_gray_decode8 (uint8_t g)
977{
978 g ^= g >> 1;
979 g ^= g >> 2;
980 g ^= g >> 4;
981
982 return g;
983}
984
985ecb_function_ uint16_t ecb_gray_decode16 (uint16_t g);
986ecb_function_ uint16_t ecb_gray_decode16 (uint16_t g)
987{
988 g ^= g >> 1;
989 g ^= g >> 2;
990 g ^= g >> 4;
991 g ^= g >> 8;
992
993 return g;
994}
995
996ecb_function_ uint32_t ecb_gray_decode32 (uint32_t g);
997ecb_function_ uint32_t ecb_gray_decode32 (uint32_t g)
998{
999 g ^= g >> 1;
1000 g ^= g >> 2;
1001 g ^= g >> 4;
1002 g ^= g >> 8;
1003 g ^= g >> 16;
1004
1005 return g;
1006}
1007
1008ecb_function_ uint64_t ecb_gray_decode64 (uint64_t g);
1009ecb_function_ uint64_t ecb_gray_decode64 (uint64_t g)
1010{
1011 g ^= g >> 1;
1012 g ^= g >> 2;
1013 g ^= g >> 4;
1014 g ^= g >> 8;
1015 g ^= g >> 16;
1016 g ^= g >> 32;
1017
1018 return g;
1019}
1020
1021#if ECB_CPP
1022
1023ecb_inline uint8_t ecb_gray_encode (uint8_t b) { return ecb_gray_encode8 (b); }
1024ecb_inline uint16_t ecb_gray_encode (uint16_t b) { return ecb_gray_encode16 (b); }
1025ecb_inline uint32_t ecb_gray_encode (uint32_t b) { return ecb_gray_encode32 (b); }
1026ecb_inline uint64_t ecb_gray_encode (uint64_t b) { return ecb_gray_encode64 (b); }
1027
1028ecb_inline uint8_t ecb_gray_decode (uint8_t g) { return ecb_gray_decode8 (g); }
1029ecb_inline uint16_t ecb_gray_decode (uint16_t g) { return ecb_gray_decode16 (g); }
1030ecb_inline uint32_t ecb_gray_decode (uint32_t g) { return ecb_gray_decode32 (g); }
1031ecb_inline uint64_t ecb_gray_decode (uint64_t g) { return ecb_gray_decode64 (g); }
1032
1033#endif
1034
1035/*****************************************************************************/
1036/* 2d hilbert curves */
1037
1038/* algorithm from the book Hacker's Delight, modified to not */
1039/* run into undefined behaviour for n==16 */
1040static uint32_t ecb_hilbert2d_index_to_coord32 (int n, uint32_t s);
1041static uint32_t ecb_hilbert2d_index_to_coord32 (int n, uint32_t s)
1042{
1043 uint32_t comp, swap, cs, t, sr;
1044
1045 /* pad s on the left (unused) bits with 01 (no change groups) */
1046 s |= 0x55555555U << n << n;
1047 /* "s shift right" */
1048 sr = (s >> 1) & 0x55555555U;
1049 /* compute complement and swap info in two-bit groups */
1050 cs = ((s & 0x55555555U) + sr) ^ 0x55555555U;
1051
1052 /* parallel prefix xor op to propagate both complement
1053 * and swap info together from left to right (there is
1054 * no step "cs ^= cs >> 1", so in effect it computes
1055 * two independent parallel prefix operations on two
1056 * interleaved sets of sixteen bits).
1057 */
1058 cs ^= cs >> 2;
1059 cs ^= cs >> 4;
1060 cs ^= cs >> 8;
1061 cs ^= cs >> 16;
1062
1063 /* separate swap and complement bits */
1064 swap = cs & 0x55555555U;
1065 comp = (cs >> 1) & 0x55555555U;
1066
1067 /* calculate coordinates in odd and even bit positions */
1068 t = (s & swap) ^ comp;
1069 s = s ^ sr ^ t ^ (t << 1);
1070
1071 /* unpad/clear out any junk on the left */
1072 s = s & ((1 << n << n) - 1);
1073
1074 /* Now "unshuffle" to separate the x and y bits. */
1075 t = (s ^ (s >> 1)) & 0x22222222U; s ^= t ^ (t << 1);
1076 t = (s ^ (s >> 2)) & 0x0c0c0c0cU; s ^= t ^ (t << 2);
1077 t = (s ^ (s >> 4)) & 0x00f000f0U; s ^= t ^ (t << 4);
1078 t = (s ^ (s >> 8)) & 0x0000ff00U; s ^= t ^ (t << 8);
1079
1080 /* now s contains two 16-bit coordinates */
1081 return s;
1082}
1083
1084/* 64 bit, a straightforward extension to the 32 bit case */
1085static uint64_t ecb_hilbert2d_index_to_coord64 (int n, uint64_t s);
1086static uint64_t ecb_hilbert2d_index_to_coord64 (int n, uint64_t s)
1087{
1088 uint64_t comp, swap, cs, t, sr;
1089
1090 /* pad s on the left (unused) bits with 01 (no change groups) */
1091 s |= 0x5555555555555555U << n << n;
1092 /* "s shift right" */
1093 sr = (s >> 1) & 0x5555555555555555U;
1094 /* compute complement and swap info in two-bit groups */
1095 cs = ((s & 0x5555555555555555U) + sr) ^ 0x5555555555555555U;
1096
1097 /* parallel prefix xor op to propagate both complement
1098 * and swap info together from left to right (there is
1099 * no step "cs ^= cs >> 1", so in effect it computes
1100 * two independent parallel prefix operations on two
1101 * interleaved sets of thirty-two bits).
1102 */
1103 cs ^= cs >> 2;
1104 cs ^= cs >> 4;
1105 cs ^= cs >> 8;
1106 cs ^= cs >> 16;
1107 cs ^= cs >> 32;
1108
1109 /* separate swap and complement bits */
1110 swap = cs & 0x5555555555555555U;
1111 comp = (cs >> 1) & 0x5555555555555555U;
1112
1113 /* calculate coordinates in odd and even bit positions */
1114 t = (s & swap) ^ comp;
1115 s = s ^ sr ^ t ^ (t << 1);
1116
1117 /* unpad/clear out any junk on the left */
1118 s = s & ((1 << n << n) - 1);
1119
1120 /* Now "unshuffle" to separate the x and y bits. */
1121 t = (s ^ (s >> 1)) & 0x2222222222222222U; s ^= t ^ (t << 1);
1122 t = (s ^ (s >> 2)) & 0x0c0c0c0c0c0c0c0cU; s ^= t ^ (t << 2);
1123 t = (s ^ (s >> 4)) & 0x00f000f000f000f0U; s ^= t ^ (t << 4);
1124 t = (s ^ (s >> 8)) & 0x0000ff000000ff00U; s ^= t ^ (t << 8);
1125 t = (s ^ (s >> 16)) & 0x00000000ffff0000U; s ^= t ^ (t << 16);
1126
1127 /* now s contains two 32-bit coordinates */
1128 return s;
1129}
1130
1131/* algorithm from the book Hacker's Delight, but a similar algorithm*/
1132/* is given in https://doi.org/10.1002/spe.4380160103 */
1133/* this has been slightly improved over the original version */
1134ecb_function_ uint32_t ecb_hilbert2d_coord_to_index32 (int n, uint32_t xy);
1135ecb_function_ uint32_t ecb_hilbert2d_coord_to_index32 (int n, uint32_t xy)
1136{
1137 uint32_t row;
1138 uint32_t state = 0;
1139 uint32_t s = 0;
1140
1141 do
1142 {
1143 --n;
1144
1145 row = 4 * state
1146 | (2 & (xy >> n >> 15))
1147 | (1 & (xy >> n ));
1148
1149 /* these funky constants are lookup tables for two-bit values */
1150 s = (s << 2) | (0x361e9cb4U >> 2 * row) & 3;
1151 state = (0x8fe65831U >> 2 * row) & 3;
1152 }
1153 while (n > 0);
1154
1155 return s;
1156}
1157
1158/* 64 bit, essentially the same as 32 bit */
1159ecb_function_ uint64_t ecb_hilbert2d_coord_to_index64 (int n, uint64_t xy);
1160ecb_function_ uint64_t ecb_hilbert2d_coord_to_index64 (int n, uint64_t xy)
1161{
1162 uint32_t row;
1163 uint32_t state = 0;
1164 uint64_t s = 0;
1165
1166 do
1167 {
1168 --n;
1169
1170 row = 4 * state
1171 | (2 & (xy >> n >> 31))
1172 | (1 & (xy >> n ));
1173
1174 /* these funky constants are lookup tables for two-bit values */
1175 s = (s << 2) | (0x361e9cb4U >> 2 * row) & 3;
1176 state = (0x8fe65831U >> 2 * row) & 3;
1177 }
1178 while (n > 0);
1179
1180 return s;
1181}
1182
1183/*****************************************************************************/
1184/* division */
205 1185
206#if ECB_GCC_VERSION(3,0) || ECB_C99 1186#if ECB_GCC_VERSION(3,0) || ECB_C99
1187 /* C99 tightened the definition of %, so we can use a more efficient version */
207 #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0)) 1188 #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0))
208#else 1189#else
209 #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n))) 1190 #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n)))
210#endif 1191#endif
1192
1193#if ECB_CPP
1194 template<typename T>
1195 static inline T ecb_div_rd (T val, T div)
1196 {
1197 return val < 0 ? - ((-val + div - 1) / div) : (val ) / div;
1198 }
1199 template<typename T>
1200 static inline T ecb_div_ru (T val, T div)
1201 {
1202 return val < 0 ? - ((-val ) / div) : (val + div - 1) / div;
1203 }
1204#else
1205 #define ecb_div_rd(val,div) ((val) < 0 ? - ((-(val) + (div) - 1) / (div)) : ((val) ) / (div))
1206 #define ecb_div_ru(val,div) ((val) < 0 ? - ((-(val) ) / (div)) : ((val) + (div) - 1) / (div))
1207#endif
1208
1209/*****************************************************************************/
1210/* array length */
211 1211
212#if ecb_cplusplus_does_not_suck 1212#if ecb_cplusplus_does_not_suck
213 /* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */ 1213 /* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */
214 template<typename T, int N> 1214 template<typename T, int N>
215 static inline int ecb_array_length (const T (&arr)[N]) 1215 static inline int ecb_array_length (const T (&arr)[N])
218 } 1218 }
219#else 1219#else
220 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) 1220 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0]))
221#endif 1221#endif
222 1222
223ECB_INLINE uint32_t ecb_rotr32 (uint32_t x, unsigned int count) ecb_const; 1223/*****************************************************************************/
224ECB_INLINE uint32_t 1224/* IEEE 754-2008 half float conversions */
225ecb_rotr32 (uint32_t x, unsigned int count)
226{
227 return (x << (32 - count)) | (x >> count);
228}
229 1225
230ECB_INLINE uint32_t ecb_rotl32 (uint32_t x, unsigned int count) ecb_const; 1226ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x);
231ECB_INLINE uint32_t 1227ecb_function_ ecb_const uint32_t
232ecb_rotl32 (uint32_t x, unsigned int count) 1228ecb_binary16_to_binary32 (uint32_t x)
233{ 1229{
234 return (x >> (32 - count)) | (x << count); 1230 unsigned int s = (x & 0x8000) << (31 - 15);
235} 1231 int e = (x >> 10) & 0x001f;
1232 unsigned int m = x & 0x03ff;
236 1233
1234 if (ecb_expect_false (e == 31))
1235 /* infinity or NaN */
1236 e = 255 - (127 - 15);
1237 else if (ecb_expect_false (!e))
1238 {
1239 if (ecb_expect_true (!m))
1240 /* zero, handled by code below by forcing e to 0 */
1241 e = 0 - (127 - 15);
1242 else
1243 {
1244 /* subnormal, renormalise */
1245 unsigned int s = 10 - ecb_ld32 (m);
1246
1247 m = (m << s) & 0x3ff; /* mask implicit bit */
1248 e -= s - 1;
1249 }
1250 }
1251
1252 /* e and m now are normalised, or zero, (or inf or nan) */
1253 e += 127 - 15;
1254
1255 return s | (e << 23) | (m << (23 - 10));
1256}
1257
1258ecb_function_ ecb_const uint16_t ecb_binary32_to_binary16 (uint32_t x);
1259ecb_function_ ecb_const uint16_t
1260ecb_binary32_to_binary16 (uint32_t x)
1261{
1262 unsigned int s = (x >> 16) & 0x00008000; /* sign bit, the easy part */
1263 int e = ((x >> 23) & 0x000000ff) - (127 - 15); /* the desired exponent */
1264 unsigned int m = x & 0x007fffff;
1265
1266 x &= 0x7fffffff;
1267
1268 /* if it's within range of binary16 normals, use fast path */
1269 if (ecb_expect_true (0x38800000 <= x && x <= 0x477fefff))
1270 {
1271 /* mantissa round-to-even */
1272 m += 0x00000fff + ((m >> (23 - 10)) & 1);
1273
1274 /* handle overflow */
1275 if (ecb_expect_false (m >= 0x00800000))
1276 {
1277 m >>= 1;
1278 e += 1;
1279 }
1280
1281 return s | (e << 10) | (m >> (23 - 10));
1282 }
1283
1284 /* handle large numbers and infinity */
1285 if (ecb_expect_true (0x477fefff < x && x <= 0x7f800000))
1286 return s | 0x7c00;
1287
1288 /* handle zero, subnormals and small numbers */
1289 if (ecb_expect_true (x < 0x38800000))
1290 {
1291 /* zero */
1292 if (ecb_expect_true (!x))
1293 return s;
1294
1295 /* handle subnormals */
1296
1297 /* too small, will be zero */
1298 if (e < (14 - 24)) /* might not be sharp, but is good enough */
1299 return s;
1300
1301 m |= 0x00800000; /* make implicit bit explicit */
1302
1303 /* very tricky - we need to round to the nearest e (+10) bit value */
1304 {
1305 unsigned int bits = 14 - e;
1306 unsigned int half = (1 << (bits - 1)) - 1;
1307 unsigned int even = (m >> bits) & 1;
1308
1309 /* if this overflows, we will end up with a normalised number */
1310 m = (m + half + even) >> bits;
1311 }
1312
1313 return s | m;
1314 }
1315
1316 /* handle NaNs, preserve leftmost nan bits, but make sure we don't turn them into infinities */
1317 m >>= 13;
1318
1319 return s | 0x7c00 | m | !m;
1320}
1321
1322/*******************************************************************************/
1323/* fast integer to ascii */
1324
1325/*
1326 * This code is pretty complicated because it is general. The idea behind it,
1327 * however, is pretty simple: first, the number is multiplied with a scaling
1328 * factor (2**bits / 10**(digits-1)) to convert the integer into a fixed-point
1329 * number with the first digit in the upper bits.
1330 * Then this digit is converted to text and masked out. The resulting number
1331 * is then multiplied by 10, by multiplying the fixed point representation
1332 * by 5 and shifting the (binary) decimal point one to the right, so a 4.28
1333 * format becomes 5.27, 6.26 and so on.
1334 * The rest involves only advancing the pointer if we already generated a
1335 * non-zero digit, so leading zeroes are overwritten.
1336 */
1337
1338/* simply return a mask with "bits" bits set */
1339#define ecb_i2a_mask(type,bits) ((((type)1) << (bits)) - 1)
1340
1341/* oputput a single digit. maskvalue is 10**digitidx */
1342#define ecb_i2a_digit(type,bits,digitmask,maskvalue,digitidx) \
1343 if (digitmask >= maskvalue) /* constant, used to decide how many digits to generate */ \
1344 { \
1345 char digit = x >> (bits - digitidx); /* calculate the topmost digit */ \
1346 *ptr = digit + '0'; /* output it */ \
1347 nz = (digitmask == maskvalue) || nz || digit; /* first term == always output last digit */ \
1348 ptr += nz; /* output digit only if non-zero digit seen */ \
1349 x = (x & ecb_i2a_mask (type, bits - digitidx)) * 5; /* *10, but shift decimal point right */ \
1350 }
1351
1352/* convert integer to fixed point format and multiply out digits, highest first */
1353/* requires magic constants: max. digits and number of bits after the decimal point */
1354#define ecb_i2a_def(suffix,ptr,v,type,bits,digitmask,lz) \
1355ecb_inline char *ecb_i2a_ ## suffix (char *ptr, uint32_t u) \
1356{ \
1357 char nz = lz; /* non-zero digit seen? */ \
1358 /* convert to x.bits fixed-point */ \
1359 type x = u * ((ecb_i2a_mask (type, bits) + digitmask) / digitmask); \
1360 /* output up to 10 digits */ \
1361 ecb_i2a_digit (type,bits,digitmask, 1, 0); \
1362 ecb_i2a_digit (type,bits,digitmask, 10, 1); \
1363 ecb_i2a_digit (type,bits,digitmask, 100, 2); \
1364 ecb_i2a_digit (type,bits,digitmask, 1000, 3); \
1365 ecb_i2a_digit (type,bits,digitmask, 10000, 4); \
1366 ecb_i2a_digit (type,bits,digitmask, 100000, 5); \
1367 ecb_i2a_digit (type,bits,digitmask, 1000000, 6); \
1368 ecb_i2a_digit (type,bits,digitmask, 10000000, 7); \
1369 ecb_i2a_digit (type,bits,digitmask, 100000000, 8); \
1370 ecb_i2a_digit (type,bits,digitmask, 1000000000, 9); \
1371 return ptr; \
1372}
1373
1374/* predefined versions of the above, for various digits */
1375/* ecb_i2a_xN = almost N digits, limit defined by macro */
1376/* ecb_i2a_N = up to N digits, leading zeroes suppressed */
1377/* ecb_i2a_0N = exactly N digits, including leading zeroes */
1378
1379/* non-leading-zero versions, limited range */
1380#define ECB_I2A_MAX_X5 59074 /* limit for ecb_i2a_x5 */
1381#define ECB_I2A_MAX_X10 2932500665 /* limit for ecb_i2a_x10 */
1382ecb_i2a_def ( x5, ptr, v, uint32_t, 26, 10000, 0)
1383ecb_i2a_def (x10, ptr, v, uint64_t, 60, 1000000000, 0)
1384
1385/* non-leading zero versions, all digits, 4 and 9 are optimal for 32/64 bit */
1386ecb_i2a_def ( 2, ptr, v, uint32_t, 10, 10, 0)
1387ecb_i2a_def ( 3, ptr, v, uint32_t, 12, 100, 0)
1388ecb_i2a_def ( 4, ptr, v, uint32_t, 26, 1000, 0)
1389ecb_i2a_def ( 5, ptr, v, uint64_t, 30, 10000, 0)
1390ecb_i2a_def ( 6, ptr, v, uint64_t, 36, 100000, 0)
1391ecb_i2a_def ( 7, ptr, v, uint64_t, 44, 1000000, 0)
1392ecb_i2a_def ( 8, ptr, v, uint64_t, 50, 10000000, 0)
1393ecb_i2a_def ( 9, ptr, v, uint64_t, 56, 100000000, 0)
1394
1395/* leading-zero versions, all digits, 04 and 09 are optimal for 32/64 bit */
1396ecb_i2a_def (02, ptr, v, uint32_t, 10, 10, 1)
1397ecb_i2a_def (03, ptr, v, uint32_t, 12, 100, 1)
1398ecb_i2a_def (04, ptr, v, uint32_t, 26, 1000, 1)
1399ecb_i2a_def (05, ptr, v, uint64_t, 30, 10000, 1)
1400ecb_i2a_def (06, ptr, v, uint64_t, 36, 100000, 1)
1401ecb_i2a_def (07, ptr, v, uint64_t, 44, 1000000, 1)
1402ecb_i2a_def (08, ptr, v, uint64_t, 50, 10000000, 1)
1403ecb_i2a_def (09, ptr, v, uint64_t, 56, 100000000, 1)
1404
1405#define ECB_I2A_I32_DIGITS 11
1406#define ECB_I2A_U32_DIGITS 10
1407#define ECB_I2A_I64_DIGITS 20
1408#define ECB_I2A_U64_DIGITS 21
1409#define ECB_I2A_MAX_DIGITS 21
1410
1411ecb_inline char *
1412ecb_i2a_u32 (char *ptr, uint32_t u)
1413{
1414 #if ECB_64BIT_NATIVE
1415 if (ecb_expect_true (u <= ECB_I2A_MAX_X10))
1416 ptr = ecb_i2a_x10 (ptr, u);
1417 else /* x10 almost, but not fully, covers 32 bit */
1418 {
1419 uint32_t u1 = u % 1000000000;
1420 uint32_t u2 = u / 1000000000;
1421
1422 *ptr++ = u2 + '0';
1423 ptr = ecb_i2a_09 (ptr, u1);
1424 }
1425 #else
1426 if (ecb_expect_true (u <= ECB_I2A_MAX_X5))
1427 ecb_i2a_x5 (ptr, u);
1428 else if (ecb_expect_true (u <= ECB_I2A_MAX_X5 * 10000))
1429 {
1430 uint32_t u1 = u % 10000;
1431 uint32_t u2 = u / 10000;
1432
1433 ptr = ecb_i2a_x5 (ptr, u2);
1434 ptr = ecb_i2a_04 (ptr, u1);
1435 }
1436 else
1437 {
1438 uint32_t u1 = u % 10000;
1439 uint32_t ua = u / 10000;
1440 uint32_t u2 = ua % 10000;
1441 uint32_t u3 = ua / 10000;
1442
1443 ptr = ecb_i2a_2 (ptr, u3);
1444 ptr = ecb_i2a_04 (ptr, u2);
1445 ptr = ecb_i2a_04 (ptr, u1);
1446 }
237#endif 1447 #endif
238 1448
1449 return ptr;
1450}
1451
1452ecb_inline char *
1453ecb_i2a_i32 (char *ptr, int32_t v)
1454{
1455 *ptr = '-'; ptr += v < 0;
1456 uint32_t u = v < 0 ? -(uint32_t)v : v;
1457
1458 #if ECB_64BIT_NATIVE
1459 ptr = ecb_i2a_x10 (ptr, u); /* x10 fully covers 31 bit */
1460 #else
1461 ptr = ecb_i2a_u32 (ptr, u);
1462 #endif
1463
1464 return ptr;
1465}
1466
1467ecb_inline char *
1468ecb_i2a_u64 (char *ptr, uint64_t u)
1469{
1470 #if ECB_64BIT_NATIVE
1471 if (ecb_expect_true (u <= ECB_I2A_MAX_X10))
1472 ptr = ecb_i2a_x10 (ptr, u);
1473 else if (ecb_expect_false (u <= ECB_I2A_MAX_X10 * 1000000000))
1474 {
1475 uint64_t u1 = u % 1000000000;
1476 uint64_t u2 = u / 1000000000;
1477
1478 ptr = ecb_i2a_x10 (ptr, u2);
1479 ptr = ecb_i2a_09 (ptr, u1);
1480 }
1481 else
1482 {
1483 uint64_t u1 = u % 1000000000;
1484 uint64_t ua = u / 1000000000;
1485 uint64_t u2 = ua % 1000000000;
1486 uint64_t u3 = ua / 1000000000;
1487
1488 ptr = ecb_i2a_2 (ptr, u3);
1489 ptr = ecb_i2a_09 (ptr, u2);
1490 ptr = ecb_i2a_09 (ptr, u1);
1491 }
1492 #else
1493 if (ecb_expect_true (u <= ECB_I2A_MAX_X5))
1494 ptr = ecb_i2a_x5 (ptr, u);
1495 else
1496 {
1497 uint64_t u1 = u % 10000;
1498 uint64_t u2 = u / 10000;
1499
1500 ptr = ecb_i2a_u64 (ptr, u2);
1501 ptr = ecb_i2a_04 (ptr, u1);
1502 }
1503 #endif
1504
1505 return ptr;
1506}
1507
1508ecb_inline char *
1509ecb_i2a_i64 (char *ptr, int64_t v)
1510{
1511 *ptr = '-'; ptr += v < 0;
1512 uint64_t u = v < 0 ? -(uint64_t)v : v;
1513
1514 #if ECB_64BIT_NATIVE
1515 if (ecb_expect_true (u <= ECB_I2A_MAX_X10))
1516 ptr = ecb_i2a_x10 (ptr, u);
1517 else if (ecb_expect_false (u <= ECB_I2A_MAX_X10 * 1000000000))
1518 {
1519 uint64_t u1 = u % 1000000000;
1520 uint64_t u2 = u / 1000000000;
1521
1522 ptr = ecb_i2a_x10 (ptr, u2);
1523 ptr = ecb_i2a_09 (ptr, u1);
1524 }
1525 else
1526 {
1527 uint64_t u1 = u % 1000000000;
1528 uint64_t ua = u / 1000000000;
1529 uint64_t u2 = ua % 1000000000;
1530 uint64_t u3 = ua / 1000000000;
1531
1532 /* 2**31 is 19 digits, so the top is exactly one digit */
1533 *ptr++ = u3 + '0';
1534 ptr = ecb_i2a_09 (ptr, u2);
1535 ptr = ecb_i2a_09 (ptr, u1);
1536 }
1537 #else
1538 ptr = ecb_i2a_u64 (ptr, u);
1539 #endif
1540
1541 return ptr;
1542}
1543
1544/*******************************************************************************/
1545/* floating point stuff, can be disabled by defining ECB_NO_LIBM */
1546
1547/* basically, everything uses "ieee pure-endian" floating point numbers */
1548/* the only noteworthy exception is ancient armle, which uses order 43218765 */
1549#if 0 \
1550 || __i386 || __i386__ \
1551 || ECB_GCC_AMD64 \
1552 || __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \
1553 || defined __s390__ || defined __s390x__ \
1554 || defined __mips__ \
1555 || defined __alpha__ \
1556 || defined __hppa__ \
1557 || defined __ia64__ \
1558 || defined __m68k__ \
1559 || defined __m88k__ \
1560 || defined __sh__ \
1561 || defined _M_IX86 || defined ECB_MSVC_AMD64 || defined _M_IA64 \
1562 || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \
1563 || defined __aarch64__
1564 #define ECB_STDFP 1
1565#else
1566 #define ECB_STDFP 0
1567#endif
1568
1569#ifndef ECB_NO_LIBM
1570
1571 #include <math.h> /* for frexp*, ldexp*, INFINITY, NAN */
1572
1573 /* only the oldest of old doesn't have this one. solaris. */
1574 #ifdef INFINITY
1575 #define ECB_INFINITY INFINITY
1576 #else
1577 #define ECB_INFINITY HUGE_VAL
1578 #endif
1579
1580 #ifdef NAN
1581 #define ECB_NAN NAN
1582 #else
1583 #define ECB_NAN ECB_INFINITY
1584 #endif
1585
1586 #if ECB_C99 || _XOPEN_VERSION >= 600 || _POSIX_VERSION >= 200112L
1587 #define ecb_ldexpf(x,e) ldexpf ((x), (e))
1588 #define ecb_frexpf(x,e) frexpf ((x), (e))
1589 #else
1590 #define ecb_ldexpf(x,e) (float) ldexp ((double) (x), (e))
1591 #define ecb_frexpf(x,e) (float) frexp ((double) (x), (e))
1592 #endif
1593
1594 /* convert a float to ieee single/binary32 */
1595 ecb_function_ ecb_const uint32_t ecb_float_to_binary32 (float x);
1596 ecb_function_ ecb_const uint32_t
1597 ecb_float_to_binary32 (float x)
1598 {
1599 uint32_t r;
1600
1601 #if ECB_STDFP
1602 memcpy (&r, &x, 4);
1603 #else
1604 /* slow emulation, works for anything but -0 */
1605 uint32_t m;
1606 int e;
1607
1608 if (x == 0e0f ) return 0x00000000U;
1609 if (x > +3.40282346638528860e+38f) return 0x7f800000U;
1610 if (x < -3.40282346638528860e+38f) return 0xff800000U;
1611 if (x != x ) return 0x7fbfffffU;
1612
1613 m = ecb_frexpf (x, &e) * 0x1000000U;
1614
1615 r = m & 0x80000000U;
1616
1617 if (r)
1618 m = -m;
1619
1620 if (e <= -126)
1621 {
1622 m &= 0xffffffU;
1623 m >>= (-125 - e);
1624 e = -126;
1625 }
1626
1627 r |= (e + 126) << 23;
1628 r |= m & 0x7fffffU;
1629 #endif
1630
1631 return r;
1632 }
1633
1634 /* converts an ieee single/binary32 to a float */
1635 ecb_function_ ecb_const float ecb_binary32_to_float (uint32_t x);
1636 ecb_function_ ecb_const float
1637 ecb_binary32_to_float (uint32_t x)
1638 {
1639 float r;
1640
1641 #if ECB_STDFP
1642 memcpy (&r, &x, 4);
1643 #else
1644 /* emulation, only works for normals and subnormals and +0 */
1645 int neg = x >> 31;
1646 int e = (x >> 23) & 0xffU;
1647
1648 x &= 0x7fffffU;
1649
1650 if (e)
1651 x |= 0x800000U;
1652 else
1653 e = 1;
1654
1655 /* we distrust ldexpf a bit and do the 2**-24 scaling by an extra multiply */
1656 r = ecb_ldexpf (x * (0.5f / 0x800000U), e - 126);
1657
1658 r = neg ? -r : r;
1659 #endif
1660
1661 return r;
1662 }
1663
1664 /* convert a double to ieee double/binary64 */
1665 ecb_function_ ecb_const uint64_t ecb_double_to_binary64 (double x);
1666 ecb_function_ ecb_const uint64_t
1667 ecb_double_to_binary64 (double x)
1668 {
1669 uint64_t r;
1670
1671 #if ECB_STDFP
1672 memcpy (&r, &x, 8);
1673 #else
1674 /* slow emulation, works for anything but -0 */
1675 uint64_t m;
1676 int e;
1677
1678 if (x == 0e0 ) return 0x0000000000000000U;
1679 if (x > +1.79769313486231470e+308) return 0x7ff0000000000000U;
1680 if (x < -1.79769313486231470e+308) return 0xfff0000000000000U;
1681 if (x != x ) return 0X7ff7ffffffffffffU;
1682
1683 m = frexp (x, &e) * 0x20000000000000U;
1684
1685 r = m & 0x8000000000000000;;
1686
1687 if (r)
1688 m = -m;
1689
1690 if (e <= -1022)
1691 {
1692 m &= 0x1fffffffffffffU;
1693 m >>= (-1021 - e);
1694 e = -1022;
1695 }
1696
1697 r |= ((uint64_t)(e + 1022)) << 52;
1698 r |= m & 0xfffffffffffffU;
1699 #endif
1700
1701 return r;
1702 }
1703
1704 /* converts an ieee double/binary64 to a double */
1705 ecb_function_ ecb_const double ecb_binary64_to_double (uint64_t x);
1706 ecb_function_ ecb_const double
1707 ecb_binary64_to_double (uint64_t x)
1708 {
1709 double r;
1710
1711 #if ECB_STDFP
1712 memcpy (&r, &x, 8);
1713 #else
1714 /* emulation, only works for normals and subnormals and +0 */
1715 int neg = x >> 63;
1716 int e = (x >> 52) & 0x7ffU;
1717
1718 x &= 0xfffffffffffffU;
1719
1720 if (e)
1721 x |= 0x10000000000000U;
1722 else
1723 e = 1;
1724
1725 /* we distrust ldexp a bit and do the 2**-53 scaling by an extra multiply */
1726 r = ldexp (x * (0.5 / 0x10000000000000U), e - 1022);
1727
1728 r = neg ? -r : r;
1729 #endif
1730
1731 return r;
1732 }
1733
1734 /* convert a float to ieee half/binary16 */
1735 ecb_function_ ecb_const uint16_t ecb_float_to_binary16 (float x);
1736 ecb_function_ ecb_const uint16_t
1737 ecb_float_to_binary16 (float x)
1738 {
1739 return ecb_binary32_to_binary16 (ecb_float_to_binary32 (x));
1740 }
1741
1742 /* convert an ieee half/binary16 to float */
1743 ecb_function_ ecb_const float ecb_binary16_to_float (uint16_t x);
1744 ecb_function_ ecb_const float
1745 ecb_binary16_to_float (uint16_t x)
1746 {
1747 return ecb_binary32_to_float (ecb_binary16_to_binary32 (x));
1748 }
1749
1750#endif
1751
1752#endif
1753

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines