ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libecb/ecb.h
(Generate patch)

Comparing libecb/ecb.h (file contents):
Revision 1.90 by root, Tue May 29 14:09:49 2012 UTC vs.
Revision 1.213 by root, Fri Mar 25 15:34:12 2022 UTC

1/* 1/*
2 * libecb - http://software.schmorp.de/pkg/libecb 2 * libecb - http://software.schmorp.de/pkg/libecb
3 * 3 *
4 * Copyright (©) 2009-2012 Marc Alexander Lehmann <libecb@schmorp.de> 4 * Copyright (©) 2009-2015,2018-2021 Marc Alexander Lehmann <libecb@schmorp.de>
5 * Copyright (©) 2011 Emanuele Giaquinta 5 * Copyright (©) 2011 Emanuele Giaquinta
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without modifica- 8 * Redistribution and use in source and binary forms, with or without modifica-
9 * tion, are permitted provided that the following conditions are met: 9 * tion, are permitted provided that the following conditions are met:
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- 25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
26 * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 26 * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
27 * OF THE POSSIBILITY OF SUCH DAMAGE. 27 * OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * Alternatively, the contents of this file may be used under the terms of
30 * the GNU General Public License ("GPL") version 2 or any later version,
31 * in which case the provisions of the GPL are applicable instead of
32 * the above. If you wish to allow the use of your version of this file
33 * only under the terms of the GPL and not to allow others to use your
34 * version of this file under the BSD license, indicate your decision
35 * by deleting the provisions above and replace them with the notice
36 * and other provisions required by the GPL. If you do not delete the
37 * provisions above, a recipient may use your version of this file under
38 * either the BSD or the GPL.
28 */ 39 */
29 40
30#ifndef ECB_H 41#ifndef ECB_H
31#define ECB_H 42#define ECB_H
32 43
33/* 16 bits major, 16 bits minor */ 44/* 16 bits major, 16 bits minor */
34#define ECB_VERSION 0x00010001 45#define ECB_VERSION 0x0001000c
35 46
36#ifdef _WIN32 47#include <string.h> /* for memcpy */
48
49#if defined (_WIN32) && !defined (__MINGW32__)
37 typedef signed char int8_t; 50 typedef signed char int8_t;
38 typedef unsigned char uint8_t; 51 typedef unsigned char uint8_t;
52 typedef signed char int_fast8_t;
53 typedef unsigned char uint_fast8_t;
39 typedef signed short int16_t; 54 typedef signed short int16_t;
40 typedef unsigned short uint16_t; 55 typedef unsigned short uint16_t;
56 typedef signed int int_fast16_t;
57 typedef unsigned int uint_fast16_t;
41 typedef signed int int32_t; 58 typedef signed int int32_t;
42 typedef unsigned int uint32_t; 59 typedef unsigned int uint32_t;
60 typedef signed int int_fast32_t;
61 typedef unsigned int uint_fast32_t;
43 #if __GNUC__ 62 #if __GNUC__
44 typedef signed long long int64_t; 63 typedef signed long long int64_t;
45 typedef unsigned long long uint64_t; 64 typedef unsigned long long uint64_t;
46 #else /* _MSC_VER || __BORLANDC__ */ 65 #else /* _MSC_VER || __BORLANDC__ */
47 typedef signed __int64 int64_t; 66 typedef signed __int64 int64_t;
48 typedef unsigned __int64 uint64_t; 67 typedef unsigned __int64 uint64_t;
49 #endif 68 #endif
69 typedef int64_t int_fast64_t;
70 typedef uint64_t uint_fast64_t;
50 #ifdef _WIN64 71 #ifdef _WIN64
51 #define ECB_PTRSIZE 8 72 #define ECB_PTRSIZE 8
52 typedef uint64_t uintptr_t; 73 typedef uint64_t uintptr_t;
53 typedef int64_t intptr_t; 74 typedef int64_t intptr_t;
54 #else 75 #else
55 #define ECB_PTRSIZE 4 76 #define ECB_PTRSIZE 4
56 typedef uint32_t uintptr_t; 77 typedef uint32_t uintptr_t;
57 typedef int32_t intptr_t; 78 typedef int32_t intptr_t;
58 #endif 79 #endif
59 typedef intptr_t ptrdiff_t;
60#else 80#else
61 #include <inttypes.h> 81 #include <inttypes.h>
62 #if UINTMAX_MAX > 0xffffffffU 82 #if (defined INTPTR_MAX ? INTPTR_MAX : ULONG_MAX) > 0xffffffffU
63 #define ECB_PTRSIZE 8 83 #define ECB_PTRSIZE 8
64 #else 84 #else
65 #define ECB_PTRSIZE 4 85 #define ECB_PTRSIZE 4
66 #endif 86 #endif
87#endif
88
89#define ECB_GCC_AMD64 (__amd64 || __amd64__ || __x86_64 || __x86_64__)
90#define ECB_MSVC_AMD64 (_M_AMD64 || _M_X64)
91
92#ifndef ECB_OPTIMIZE_SIZE
93 #if __OPTIMIZE_SIZE__
94 #define ECB_OPTIMIZE_SIZE 1
95 #else
96 #define ECB_OPTIMIZE_SIZE 0
97 #endif
98#endif
99
100/* work around x32 idiocy by defining proper macros */
101#if ECB_GCC_AMD64 || ECB_MSVC_AMD64
102 #if _ILP32
103 #define ECB_AMD64_X32 1
104 #else
105 #define ECB_AMD64 1
106 #endif
107#endif
108
109#if ECB_PTRSIZE >= 8 || ECB_AMD64_X32
110 #define ECB_64BIT_NATIVE 1
111#else
112 #define ECB_64BIT_NATIVE 0
67#endif 113#endif
68 114
69/* many compilers define _GNUC_ to some versions but then only implement 115/* many compilers define _GNUC_ to some versions but then only implement
70 * what their idiot authors think are the "more important" extensions, 116 * what their idiot authors think are the "more important" extensions,
71 * causing enormous grief in return for some better fake benchmark numbers. 117 * causing enormous grief in return for some better fake benchmark numbers.
72 * or so. 118 * or so.
73 * we try to detect these and simply assume they are not gcc - if they have 119 * we try to detect these and simply assume they are not gcc - if they have
74 * an issue with that they should have done it right in the first place. 120 * an issue with that they should have done it right in the first place.
75 */ 121 */
76#ifndef ECB_GCC_VERSION
77 #if !defined __GNUC_MINOR__ || defined __INTEL_COMPILER || defined __SUNPRO_C || defined __SUNPRO_CC || defined __llvm__ || defined __clang__ 122#if !defined __GNUC_MINOR__ || defined __INTEL_COMPILER || defined __SUNPRO_C || defined __SUNPRO_CC || defined __llvm__ || defined __clang__
78 #define ECB_GCC_VERSION(major,minor) 0 123 #define ECB_GCC_VERSION(major,minor) 0
79 #else 124#else
80 #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) 125 #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))
81 #endif 126#endif
82#endif
83 127
128#define ECB_CLANG_VERSION(major,minor) (__clang_major__ > (major) || (__clang_major__ == (major) && __clang_minor__ >= (minor)))
129
130#if __clang__ && defined __has_builtin
131 #define ECB_CLANG_BUILTIN(x) __has_builtin (x)
132#else
133 #define ECB_CLANG_BUILTIN(x) 0
134#endif
135
136#if __clang__ && defined __has_extension
137 #define ECB_CLANG_EXTENSION(x) __has_extension (x)
138#else
139 #define ECB_CLANG_EXTENSION(x) 0
140#endif
141
142#define ECB_CPP (__cplusplus+0)
143#define ECB_CPP11 (__cplusplus >= 201103L)
144#define ECB_CPP14 (__cplusplus >= 201402L)
145#define ECB_CPP17 (__cplusplus >= 201703L)
146
147#if ECB_CPP
148 #define ECB_C 0
149 #define ECB_STDC_VERSION 0
150#else
151 #define ECB_C 1
152 #define ECB_STDC_VERSION __STDC_VERSION__
153#endif
154
84#define ECB_C99 (__STDC_VERSION__ >= 199901L) 155#define ECB_C99 (ECB_STDC_VERSION >= 199901L)
85#define ECB_C11 (__STDC_VERSION__ >= 201112L) 156#define ECB_C11 (ECB_STDC_VERSION >= 201112L)
157#define ECB_C17 (ECB_STDC_VERSION >= 201710L)
158
159#if ECB_CPP
160 #define ECB_EXTERN_C extern "C"
161 #define ECB_EXTERN_C_BEG ECB_EXTERN_C {
162 #define ECB_EXTERN_C_END }
163#else
164 #define ECB_EXTERN_C extern
165 #define ECB_EXTERN_C_BEG
166 #define ECB_EXTERN_C_END
167#endif
86 168
87/*****************************************************************************/ 169/*****************************************************************************/
88 170
89/* ECB_NO_THREADS - ecb is not used by multiple threads, ever */ 171/* ECB_NO_THREADS - ecb is not used by multiple threads, ever */
90/* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */ 172/* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */
91 173
92#if ECB_NO_THREADS 174#if ECB_NO_THREADS
93# define ECB_NO_SMP 1 175 #define ECB_NO_SMP 1
94#endif 176#endif
95 177
96#if ECB_NO_THREADS || ECB_NO_SMP 178#if ECB_NO_SMP
97 #define ECB_MEMORY_FENCE do { } while (0) 179 #define ECB_MEMORY_FENCE do { } while (0)
98#endif 180#endif
99 181
100#ifndef ECB_MEMORY_FENCE 182/* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/compiler_ref/compiler_builtins.html */
101 #if ECB_C11 && !defined __STDC_NO_ATOMICS__ 183#if __xlC__ && ECB_CPP
102 /* we assume that these memory fences work on all variables/all memory accesses, */ 184 #include <builtins.h>
103 /* not just C11 atomics and atomic accesses */
104 #include <stdatomic.h>
105 #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_acq_rel)
106 #define ECB_MEMORY_FENCE_ACQUIRE atomic_thread_fence (memory_order_acquire)
107 #define ECB_MEMORY_FENCE_RELEASE atomic_thread_fence (memory_order_release)
108 #endif 185#endif
186
187#if 1400 <= _MSC_VER
188 #include <intrin.h> /* fence functions _ReadBarrier, also bit search functions _BitScanReverse */
109#endif 189#endif
110 190
111#ifndef ECB_MEMORY_FENCE 191#ifndef ECB_MEMORY_FENCE
112 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 192 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
193 #define ECB_MEMORY_FENCE_RELAXED __asm__ __volatile__ ("" : : : "memory")
113 #if __i386 || __i386__ 194 #if __i386 || __i386__
114 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") 195 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
115 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE /* non-lock xchg might be enough */ 196 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
116 #define ECB_MEMORY_FENCE_RELEASE do { } while (0) /* unlikely to change in future cpus */ 197 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory")
117 #elif __amd64 || __amd64__ || __x86_64 || __x86_64__ 198 #elif ECB_GCC_AMD64
118 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") 199 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory")
119 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("lfence" : : : "memory") 200 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
120 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("sfence") /* play safe - not needed in any current cpu */ 201 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory")
121 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ 202 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__
122 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") 203 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory")
204 #elif defined __ARM_ARCH_2__ \
205 || defined __ARM_ARCH_3__ || defined __ARM_ARCH_3M__ \
206 || defined __ARM_ARCH_4__ || defined __ARM_ARCH_4T__ \
207 || defined __ARM_ARCH_5__ || defined __ARM_ARCH_5E__ \
208 || defined __ARM_ARCH_5T__ || defined __ARM_ARCH_5TE__ \
209 || defined __ARM_ARCH_5TEJ__
210 /* should not need any, unless running old code on newer cpu - arm doesn't support that */
123 #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \ 211 #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \
124 || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ 212 || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ \
213 || defined __ARM_ARCH_6T2__
125 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") 214 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory")
126 #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ 215 #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \
127 || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__ 216 || defined __ARM_ARCH_7R__ || defined __ARM_ARCH_7M__
128 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") 217 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory")
129 #elif __sparc || __sparc__ 218 #elif __aarch64__
219 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory")
220 #elif (__sparc || __sparc__) && !(__sparc_v8__ || defined __sparcv8)
130 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad | " : : : "memory") 221 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory")
131 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory") 222 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory")
132 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore") 223 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore")
133 #elif defined __s390__ || defined __s390x__ 224 #elif defined __s390__ || defined __s390x__
134 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory") 225 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory")
135 #elif defined __mips__ 226 #elif defined __mips__
227 /* GNU/Linux emulates sync on mips1 architectures, so we force its use */
228 /* anybody else who still uses mips1 is supposed to send in their version, with detection code. */
136 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") 229 #define ECB_MEMORY_FENCE __asm__ __volatile__ (".set mips2; sync; .set mips0" : : : "memory")
137 #elif defined __alpha__ 230 #elif defined __alpha__
138 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory") 231 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory")
232 #elif defined __hppa__
233 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
234 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
235 #elif defined __ia64__
236 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory")
237 #elif defined __m68k__
238 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
239 #elif defined __m88k__
240 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("tb1 0,%%r0,128" : : : "memory")
241 #elif defined __sh__
242 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
139 #endif 243 #endif
140 #endif 244 #endif
141#endif 245#endif
142 246
143#ifndef ECB_MEMORY_FENCE 247#ifndef ECB_MEMORY_FENCE
248 #if ECB_GCC_VERSION(4,7)
249 /* see comment below (stdatomic.h) about the C11 memory model. */
250 #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST)
251 #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE)
252 #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE)
253 #undef ECB_MEMORY_FENCE_RELAXED
254 #define ECB_MEMORY_FENCE_RELAXED __atomic_thread_fence (__ATOMIC_RELAXED)
255
256 #elif ECB_CLANG_EXTENSION(c_atomic)
257 /* see comment below (stdatomic.h) about the C11 memory model. */
258 #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST)
259 #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE)
260 #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE)
261 #undef ECB_MEMORY_FENCE_RELAXED
262 #define ECB_MEMORY_FENCE_RELAXED __c11_atomic_thread_fence (__ATOMIC_RELAXED)
263
144 #if ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ 264 #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__
145 #define ECB_MEMORY_FENCE __sync_synchronize () 265 #define ECB_MEMORY_FENCE __sync_synchronize ()
146 /*#define ECB_MEMORY_FENCE_ACQUIRE ({ char dummy = 0; __sync_lock_test_and_set (&dummy, 1); }) */ 266 #elif _MSC_VER >= 1500 /* VC++ 2008 */
147 /*#define ECB_MEMORY_FENCE_RELEASE ({ char dummy = 1; __sync_lock_release (&dummy ); }) */ 267 /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */
268 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
269 #define ECB_MEMORY_FENCE _ReadWriteBarrier (); MemoryBarrier()
270 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier (); MemoryBarrier() /* according to msdn, _ReadBarrier is not a load fence */
271 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier (); MemoryBarrier()
148 #elif _MSC_VER >= 1400 /* VC++ 2005 */ 272 #elif _MSC_VER >= 1400 /* VC++ 2005 */
149 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) 273 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
150 #define ECB_MEMORY_FENCE _ReadWriteBarrier () 274 #define ECB_MEMORY_FENCE _ReadWriteBarrier ()
151 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */ 275 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */
152 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier () 276 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier ()
153 #elif defined _WIN32 277 #elif defined _WIN32
154 #include <WinNT.h> 278 #include <WinNT.h>
155 #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */ 279 #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */
156 #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 280 #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
157 #include <mbarrier.h> 281 #include <mbarrier.h>
158 #define ECB_MEMORY_FENCE __machine_rw_barrier () 282 #define ECB_MEMORY_FENCE __machine_rw_barrier ()
159 #define ECB_MEMORY_FENCE_ACQUIRE __machine_r_barrier () 283 #define ECB_MEMORY_FENCE_ACQUIRE __machine_acq_barrier ()
160 #define ECB_MEMORY_FENCE_RELEASE __machine_w_barrier () 284 #define ECB_MEMORY_FENCE_RELEASE __machine_rel_barrier ()
285 #define ECB_MEMORY_FENCE_RELAXED __compiler_barrier ()
161 #elif __xlC__ 286 #elif __xlC__
162 #define ECB_MEMORY_FENCE __sync () 287 #define ECB_MEMORY_FENCE __sync ()
288 #endif
289#endif
290
291#ifndef ECB_MEMORY_FENCE
292 #if ECB_C11 && !defined __STDC_NO_ATOMICS__
293 /* we assume that these memory fences work on all variables/all memory accesses, */
294 /* not just C11 atomics and atomic accesses */
295 #include <stdatomic.h>
296 #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst)
297 #define ECB_MEMORY_FENCE_ACQUIRE atomic_thread_fence (memory_order_acquire)
298 #define ECB_MEMORY_FENCE_RELEASE atomic_thread_fence (memory_order_release)
163 #endif 299 #endif
164#endif 300#endif
165 301
166#ifndef ECB_MEMORY_FENCE 302#ifndef ECB_MEMORY_FENCE
167 #if !ECB_AVOID_PTHREADS 303 #if !ECB_AVOID_PTHREADS
187 323
188#if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE 324#if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE
189 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE 325 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
190#endif 326#endif
191 327
328#if !defined ECB_MEMORY_FENCE_RELAXED && defined ECB_MEMORY_FENCE
329 #define ECB_MEMORY_FENCE_RELAXED ECB_MEMORY_FENCE /* very heavy-handed */
330#endif
331
192/*****************************************************************************/ 332/*****************************************************************************/
193 333
194#if __cplusplus 334#if ECB_CPP
195 #define ecb_inline static inline 335 #define ecb_inline static inline
196#elif ECB_GCC_VERSION(2,5) 336#elif ECB_GCC_VERSION(2,5)
197 #define ecb_inline static __inline__ 337 #define ecb_inline static __inline__
198#elif ECB_C99 338#elif ECB_C99
199 #define ecb_inline static inline 339 #define ecb_inline static inline
213 353
214#define ECB_CONCAT_(a, b) a ## b 354#define ECB_CONCAT_(a, b) a ## b
215#define ECB_CONCAT(a, b) ECB_CONCAT_(a, b) 355#define ECB_CONCAT(a, b) ECB_CONCAT_(a, b)
216#define ECB_STRINGIFY_(a) # a 356#define ECB_STRINGIFY_(a) # a
217#define ECB_STRINGIFY(a) ECB_STRINGIFY_(a) 357#define ECB_STRINGIFY(a) ECB_STRINGIFY_(a)
358#define ECB_STRINGIFY_EXPR(expr) ((expr), ECB_STRINGIFY_ (expr))
218 359
360/* This marks larger functions that do not neccessarily need to be inlined */
361/* The idea is to possibly compile the header twice, */
362/* once exposing only the declarations, another time to define external functions */
363/* TODO: possibly static would be best for these at the moment? */
219#define ecb_function_ ecb_inline 364#define ecb_function_ ecb_inline
220 365
221#if ECB_GCC_VERSION(3,1) 366#if ECB_GCC_VERSION(3,1) || ECB_CLANG_VERSION(2,8)
222 #define ecb_attribute(attrlist) __attribute__(attrlist) 367 #define ecb_attribute(attrlist) __attribute__ (attrlist)
368#else
369 #define ecb_attribute(attrlist)
370#endif
371
372#if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_constant_p)
223 #define ecb_is_constant(expr) __builtin_constant_p (expr) 373 #define ecb_is_constant(expr) __builtin_constant_p (expr)
374#else
375 /* possible C11 impl for integral types
376 typedef struct ecb_is_constant_struct ecb_is_constant_struct;
377 #define ecb_is_constant(expr) _Generic ((1 ? (struct ecb_is_constant_struct *)0 : (void *)((expr) - (expr)), ecb_is_constant_struct *: 0, default: 1)) */
378
379 #define ecb_is_constant(expr) 0
380#endif
381
382#if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_expect)
224 #define ecb_expect(expr,value) __builtin_expect ((expr),(value)) 383 #define ecb_expect(expr,value) __builtin_expect ((expr),(value))
384#else
385 #define ecb_expect(expr,value) (expr)
386#endif
387
388#if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_prefetch)
225 #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality) 389 #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality)
226#else 390#else
227 #define ecb_attribute(attrlist)
228 #define ecb_is_constant(expr) 0
229 #define ecb_expect(expr,value) (expr)
230 #define ecb_prefetch(addr,rw,locality) 391 #define ecb_prefetch(addr,rw,locality)
231#endif 392#endif
232 393
233/* no emulation for ecb_decltype */ 394/* no emulation for ecb_decltype */
234#if ECB_GCC_VERSION(4,5) 395#if ECB_CPP11
396 // older implementations might have problems with decltype(x)::type, work around it
397 template<class T> struct ecb_decltype_t { typedef T type; };
235 #define ecb_decltype(x) __decltype(x) 398 #define ecb_decltype(x) ecb_decltype_t<decltype (x)>::type
236#elif ECB_GCC_VERSION(3,0) 399#elif ECB_GCC_VERSION(3,0) || ECB_CLANG_VERSION(2,8)
237 #define ecb_decltype(x) __typeof(x) 400 #define ecb_decltype(x) __typeof__ (x)
238#endif 401#endif
239 402
403#if _MSC_VER >= 1300
404 #define ecb_deprecated __declspec (deprecated)
405#else
406 #define ecb_deprecated ecb_attribute ((__deprecated__))
407#endif
408
409#if _MSC_VER >= 1500
410 #define ecb_deprecated_message(msg) __declspec (deprecated (msg))
411#elif ECB_GCC_VERSION(4,5)
412 #define ecb_deprecated_message(msg) ecb_attribute ((__deprecated__ (msg))
413#else
414 #define ecb_deprecated_message(msg) ecb_deprecated
415#endif
416
417#if _MSC_VER >= 1400
418 #define ecb_noinline __declspec (noinline)
419#else
240#define ecb_noinline ecb_attribute ((__noinline__)) 420 #define ecb_noinline ecb_attribute ((__noinline__))
421#endif
422
241#define ecb_unused ecb_attribute ((__unused__)) 423#define ecb_unused ecb_attribute ((__unused__))
242#define ecb_const ecb_attribute ((__const__)) 424#define ecb_const ecb_attribute ((__const__))
243#define ecb_pure ecb_attribute ((__pure__)) 425#define ecb_pure ecb_attribute ((__pure__))
244 426
245#if ECB_C11 427#if ECB_C11 || __IBMC_NORETURN
428 /* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/language_ref/noreturn.html */
246 #define ecb_noreturn _Noreturn 429 #define ecb_noreturn _Noreturn
430#elif ECB_CPP11
431 #define ecb_noreturn [[noreturn]]
432#elif _MSC_VER >= 1200
433 /* http://msdn.microsoft.com/en-us/library/k6ktzx3s.aspx */
434 #define ecb_noreturn __declspec (noreturn)
247#else 435#else
248 #define ecb_noreturn ecb_attribute ((__noreturn__)) 436 #define ecb_noreturn ecb_attribute ((__noreturn__))
249#endif 437#endif
250 438
251#if ECB_GCC_VERSION(4,3) 439#if ECB_GCC_VERSION(4,3)
266/* for compatibility to the rest of the world */ 454/* for compatibility to the rest of the world */
267#define ecb_likely(expr) ecb_expect_true (expr) 455#define ecb_likely(expr) ecb_expect_true (expr)
268#define ecb_unlikely(expr) ecb_expect_false (expr) 456#define ecb_unlikely(expr) ecb_expect_false (expr)
269 457
270/* count trailing zero bits and count # of one bits */ 458/* count trailing zero bits and count # of one bits */
271#if ECB_GCC_VERSION(3,4) 459#if ECB_GCC_VERSION(3,4) \
272 /* we assume int == 32 bit, long == 32 or 64 bit and long long == 64 bit */ 460 || (ECB_CLANG_BUILTIN(__builtin_clz) && ECB_CLANG_BUILTIN(__builtin_clzll) \
273 #define ecb_ld32(x) (__builtin_clz (x) ^ 31) 461 && ECB_CLANG_BUILTIN(__builtin_ctz) && ECB_CLANG_BUILTIN(__builtin_ctzll) \
274 #define ecb_ld64(x) (__builtin_clzll (x) ^ 63) 462 && ECB_CLANG_BUILTIN(__builtin_popcount))
275 #define ecb_ctz32(x) __builtin_ctz (x) 463 #define ecb_ctz32(x) __builtin_ctz (x)
464 #define ecb_ctz64(x) (__SIZEOF_LONG__ == 64 ? __builtin_ctzl (x) : __builtin_ctzll (x))
276 #define ecb_ctz64(x) __builtin_ctzll (x) 465 #define ecb_clz32(x) __builtin_clz (x)
466 #define ecb_clz64(x) (__SIZEOF_LONG__ == 64 ? __builtin_clzl (x) : __builtin_clzll (x))
467 #define ecb_ld32(x) (ecb_clz32 (x) ^ 31)
468 #define ecb_ld64(x) (ecb_clz64 (x) ^ 63)
277 #define ecb_popcount32(x) __builtin_popcount (x) 469 #define ecb_popcount32(x) __builtin_popcount (x)
278 /* no popcountll */ 470 /* ecb_popcount64 is more difficult, see below */
279#else 471#else
280 ecb_function_ int ecb_ctz32 (uint32_t x) ecb_const; 472 ecb_function_ ecb_const int ecb_ctz32 (uint32_t x);
281 ecb_function_ int 473 ecb_function_ ecb_const int ecb_ctz32 (uint32_t x)
282 ecb_ctz32 (uint32_t x)
283 { 474 {
475#if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM)
476 unsigned long r;
477 _BitScanForward (&r, x);
478 return (int)r;
479#else
284 int r = 0; 480 int r;
285 481
286 x &= ~x + 1; /* this isolates the lowest bit */ 482 x &= ~x + 1; /* this isolates the lowest bit */
287 483
288#if ECB_branchless_on_i386 484 #if 1
485 /* David Seal's algorithm, Message-ID: <32975@armltd.uucp> from 1994 */
486 /* This happens to return 32 for x == 0, but the API does not support this */
487
488 /* -0 marks unused entries */
489 static unsigned char table[64] =
490 {
491 32, 0, 1, 12, 2, 6, -0, 13, 3, -0, 7, -0, -0, -0, -0, 14,
492 10, 4, -0, -0, 8, -0, -0, 25, -0, -0, -0, -0, -0, 21, 27, 15,
493 31, 11, 5, -0, -0, -0, -0, -0, 9, -0, -0, 24, -0, -0, 20, 26,
494 30, -0, -0, -0, -0, 23, -0, 19, 29, -0, 22, 18, 28, 17, 16, -0
495 };
496
497 /* magic constant results in 33 unique values in the upper 6 bits */
498 x *= 0x0450fbafU; /* == 17 * 65 * 65535 */
499
500 r = table [x >> 26];
501 #elif 0 /* branchless on i386, typically */
502 r = 0;
289 r += !!(x & 0xaaaaaaaa) << 0; 503 r += !!(x & 0xaaaaaaaa) << 0;
290 r += !!(x & 0xcccccccc) << 1; 504 r += !!(x & 0xcccccccc) << 1;
291 r += !!(x & 0xf0f0f0f0) << 2; 505 r += !!(x & 0xf0f0f0f0) << 2;
292 r += !!(x & 0xff00ff00) << 3; 506 r += !!(x & 0xff00ff00) << 3;
293 r += !!(x & 0xffff0000) << 4; 507 r += !!(x & 0xffff0000) << 4;
294#else 508 #else /* branchless on modern compilers, typically */
509 r = 0;
295 if (x & 0xaaaaaaaa) r += 1; 510 if (x & 0xaaaaaaaa) r += 1;
296 if (x & 0xcccccccc) r += 2; 511 if (x & 0xcccccccc) r += 2;
297 if (x & 0xf0f0f0f0) r += 4; 512 if (x & 0xf0f0f0f0) r += 4;
298 if (x & 0xff00ff00) r += 8; 513 if (x & 0xff00ff00) r += 8;
299 if (x & 0xffff0000) r += 16; 514 if (x & 0xffff0000) r += 16;
300#endif 515#endif
301 516
302 return r; 517 return r;
518#endif
303 } 519 }
304 520
305 ecb_function_ int ecb_ctz64 (uint64_t x) ecb_const; 521 ecb_function_ ecb_const int ecb_ctz64 (uint64_t x);
306 ecb_function_ int 522 ecb_function_ ecb_const int ecb_ctz64 (uint64_t x)
307 ecb_ctz64 (uint64_t x)
308 { 523 {
524#if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM)
525 unsigned long r;
526 _BitScanForward64 (&r, x);
527 return (int)r;
528#else
309 int shift = x & 0xffffffffU ? 0 : 32; 529 int shift = x & 0xffffffff ? 0 : 32;
310 return ecb_ctz32 (x >> shift) + shift; 530 return ecb_ctz32 (x >> shift) + shift;
531#endif
311 } 532 }
312 533
534 ecb_function_ ecb_const int ecb_clz32 (uint32_t x);
535 ecb_function_ ecb_const int ecb_clz32 (uint32_t x)
536 {
537#if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM)
538 unsigned long r;
539 _BitScanReverse (&r, x);
540 return (int)r;
541#else
542
543 /* Robert Harley's algorithm from comp.arch 1996-12-07 */
544 /* This happens to return 32 for x == 0, but the API does not support this */
545
546 /* -0 marks unused table elements */
547 static unsigned char table[64] =
548 {
549 32, 31, -0, 16, -0, 30, 3, -0, 15, -0, -0, -0, 29, 10, 2, -0,
550 -0, -0, 12, 14, 21, -0, 19, -0, -0, 28, -0, 25, -0, 9, 1, -0,
551 17, -0, 4, -0, -0, -0, 11, -0, 13, 22, 20, -0, 26, -0, -0, 18,
552 5, -0, -0, 23, -0, 27, -0, 6, -0, 24, 7, -0, 8, -0, 0, -0
553 };
554
555 /* propagate leftmost 1 bit to the right */
556 x |= x >> 1;
557 x |= x >> 2;
558 x |= x >> 4;
559 x |= x >> 8;
560 x |= x >> 16;
561
562 /* magic constant results in 33 unique values in the upper 6 bits */
563 x *= 0x06EB14F9U; /* == 7 * 255 * 255 * 255 */
564
565 return table [x >> 26];
566#endif
567 }
568
569 ecb_function_ ecb_const int ecb_clz64 (uint64_t x);
570 ecb_function_ ecb_const int ecb_clz64 (uint64_t x)
571 {
572#if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM)
573 unsigned long r;
574 _BitScanReverse64 (&r, x);
575 return (int)r;
576#else
577 uint32_t l = x >> 32;
578 int shift = l ? 0 : 32;
579 return ecb_clz32 (l ? l : x) + shift;
580#endif
581 }
582
313 ecb_function_ int ecb_popcount32 (uint32_t x) ecb_const; 583 ecb_function_ ecb_const int ecb_popcount32 (uint32_t x);
314 ecb_function_ int 584 ecb_function_ ecb_const int
315 ecb_popcount32 (uint32_t x) 585 ecb_popcount32 (uint32_t x)
316 { 586 {
317 x -= (x >> 1) & 0x55555555; 587 x -= (x >> 1) & 0x55555555;
318 x = ((x >> 2) & 0x33333333) + (x & 0x33333333); 588 x = ((x >> 2) & 0x33333333) + (x & 0x33333333);
319 x = ((x >> 4) + x) & 0x0f0f0f0f; 589 x = ((x >> 4) + x) & 0x0f0f0f0f;
320 x *= 0x01010101; 590 x *= 0x01010101;
321 591
322 return x >> 24; 592 return x >> 24;
323 } 593 }
324 594
325 ecb_function_ int ecb_ld32 (uint32_t x) ecb_const; 595 ecb_function_ ecb_const int ecb_ld32 (uint32_t x);
326 ecb_function_ int ecb_ld32 (uint32_t x) 596 ecb_function_ ecb_const int ecb_ld32 (uint32_t x)
327 { 597 {
598#if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM)
599 unsigned long r;
600 _BitScanReverse (&r, x);
601 return (int)r;
602#else
328 int r = 0; 603 int r = 0;
329 604
330 if (x >> 16) { x >>= 16; r += 16; } 605 if (x >> 16) { x >>= 16; r += 16; }
331 if (x >> 8) { x >>= 8; r += 8; } 606 if (x >> 8) { x >>= 8; r += 8; }
332 if (x >> 4) { x >>= 4; r += 4; } 607 if (x >> 4) { x >>= 4; r += 4; }
333 if (x >> 2) { x >>= 2; r += 2; } 608 if (x >> 2) { x >>= 2; r += 2; }
334 if (x >> 1) { r += 1; } 609 if (x >> 1) { r += 1; }
335 610
336 return r; 611 return r;
612#endif
337 } 613 }
338 614
339 ecb_function_ int ecb_ld64 (uint64_t x) ecb_const; 615 ecb_function_ ecb_const int ecb_ld64 (uint64_t x);
340 ecb_function_ int ecb_ld64 (uint64_t x) 616 ecb_function_ ecb_const int ecb_ld64 (uint64_t x)
341 { 617 {
618#if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM)
619 unsigned long r;
620 _BitScanReverse64 (&r, x);
621 return (int)r;
622#else
342 int r = 0; 623 int r = 0;
343 624
344 if (x >> 32) { x >>= 32; r += 32; } 625 if (x >> 32) { x >>= 32; r += 32; }
345 626
346 return r + ecb_ld32 (x); 627 return r + ecb_ld32 (x);
347 }
348#endif 628#endif
629 }
630#endif
349 631
350ecb_function_ ecb_bool ecb_is_pot32 (uint32_t x) ecb_const; 632ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x);
351ecb_function_ ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); } 633ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); }
352ecb_function_ ecb_bool ecb_is_pot64 (uint64_t x) ecb_const; 634ecb_function_ ecb_const ecb_bool ecb_is_pot64 (uint64_t x);
353ecb_function_ ecb_bool ecb_is_pot64 (uint64_t x) { return !(x & (x - 1)); } 635ecb_function_ ecb_const ecb_bool ecb_is_pot64 (uint64_t x) { return !(x & (x - 1)); }
354 636
355ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) ecb_const; 637ecb_function_ ecb_const uint8_t ecb_bitrev8 (uint8_t x);
356ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) 638ecb_function_ ecb_const uint8_t ecb_bitrev8 (uint8_t x)
357{ 639{
358 return ( (x * 0x0802U & 0x22110U) 640 return ( (x * 0x0802U & 0x22110U)
359 | (x * 0x8020U & 0x88440U)) * 0x10101U >> 16; 641 | (x * 0x8020U & 0x88440U)) * 0x10101U >> 16;
360} 642}
361 643
362ecb_function_ uint16_t ecb_bitrev16 (uint16_t x) ecb_const; 644ecb_function_ ecb_const uint16_t ecb_bitrev16 (uint16_t x);
363ecb_function_ uint16_t ecb_bitrev16 (uint16_t x) 645ecb_function_ ecb_const uint16_t ecb_bitrev16 (uint16_t x)
364{ 646{
365 x = ((x >> 1) & 0x5555) | ((x & 0x5555) << 1); 647 x = ((x >> 1) & 0x5555) | ((x & 0x5555) << 1);
366 x = ((x >> 2) & 0x3333) | ((x & 0x3333) << 2); 648 x = ((x >> 2) & 0x3333) | ((x & 0x3333) << 2);
367 x = ((x >> 4) & 0x0f0f) | ((x & 0x0f0f) << 4); 649 x = ((x >> 4) & 0x0f0f) | ((x & 0x0f0f) << 4);
368 x = ( x >> 8 ) | ( x << 8); 650 x = ( x >> 8 ) | ( x << 8);
369 651
370 return x; 652 return x;
371} 653}
372 654
373ecb_function_ uint32_t ecb_bitrev32 (uint32_t x) ecb_const; 655ecb_function_ ecb_const uint32_t ecb_bitrev32 (uint32_t x);
374ecb_function_ uint32_t ecb_bitrev32 (uint32_t x) 656ecb_function_ ecb_const uint32_t ecb_bitrev32 (uint32_t x)
375{ 657{
376 x = ((x >> 1) & 0x55555555) | ((x & 0x55555555) << 1); 658 x = ((x >> 1) & 0x55555555) | ((x & 0x55555555) << 1);
377 x = ((x >> 2) & 0x33333333) | ((x & 0x33333333) << 2); 659 x = ((x >> 2) & 0x33333333) | ((x & 0x33333333) << 2);
378 x = ((x >> 4) & 0x0f0f0f0f) | ((x & 0x0f0f0f0f) << 4); 660 x = ((x >> 4) & 0x0f0f0f0f) | ((x & 0x0f0f0f0f) << 4);
379 x = ((x >> 8) & 0x00ff00ff) | ((x & 0x00ff00ff) << 8); 661 x = ((x >> 8) & 0x00ff00ff) | ((x & 0x00ff00ff) << 8);
380 x = ( x >> 16 ) | ( x << 16); 662 x = ( x >> 16 ) | ( x << 16);
381 663
382 return x; 664 return x;
383} 665}
384 666
667ecb_function_ ecb_const int ecb_popcount64 (uint64_t x);
668ecb_function_ ecb_const int ecb_popcount64 (uint64_t x)
669{
385/* popcount64 is only available on 64 bit cpus as gcc builtin */ 670 /* popcount64 is only available on 64 bit cpus as gcc builtin. */
386/* so for this version we are lazy */ 671 /* also, gcc/clang make this surprisingly difficult to use */
387ecb_function_ int ecb_popcount64 (uint64_t x) ecb_const; 672#if (__SIZEOF_LONG__ == 8) && (ECB_GCC_VERSION(3,4) || ECB_CLANG_BUILTIN (__builtin_popcountl))
388ecb_function_ int 673 return __builtin_popcountl (x);
389ecb_popcount64 (uint64_t x) 674#else
390{
391 return ecb_popcount32 (x) + ecb_popcount32 (x >> 32); 675 return ecb_popcount32 (x) + ecb_popcount32 (x >> 32);
676#endif
392} 677}
393 678
394ecb_inline uint8_t ecb_rotl8 (uint8_t x, unsigned int count) ecb_const; 679ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count);
395ecb_inline uint8_t ecb_rotr8 (uint8_t x, unsigned int count) ecb_const; 680ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count);
396ecb_inline uint16_t ecb_rotl16 (uint16_t x, unsigned int count) ecb_const; 681ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count);
397ecb_inline uint16_t ecb_rotr16 (uint16_t x, unsigned int count) ecb_const; 682ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count);
398ecb_inline uint32_t ecb_rotl32 (uint32_t x, unsigned int count) ecb_const; 683ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count);
399ecb_inline uint32_t ecb_rotr32 (uint32_t x, unsigned int count) ecb_const; 684ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count);
400ecb_inline uint64_t ecb_rotl64 (uint64_t x, unsigned int count) ecb_const; 685ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count);
401ecb_inline uint64_t ecb_rotr64 (uint64_t x, unsigned int count) ecb_const; 686ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count);
402 687
403ecb_inline uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> ( 8 - count)) | (x << count); } 688ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> (-count & 7)) | (x << (count & 7)); }
404ecb_inline uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << ( 8 - count)) | (x >> count); } 689ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << (-count & 7)) | (x >> (count & 7)); }
405ecb_inline uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (16 - count)) | (x << count); } 690ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (-count & 15)) | (x << (count & 15)); }
406ecb_inline uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (16 - count)) | (x >> count); } 691ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (-count & 15)) | (x >> (count & 15)); }
407ecb_inline uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); } 692ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (-count & 31)) | (x << (count & 31)); }
408ecb_inline uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); } 693ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (-count & 31)) | (x >> (count & 31)); }
409ecb_inline uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); } 694ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (-count & 63)) | (x << (count & 63)); }
410ecb_inline uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); } 695ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (-count & 63)) | (x >> (count & 63)); }
411 696
412#if ECB_GCC_VERSION(4,3) 697#if ECB_CPP
698
699inline uint8_t ecb_ctz (uint8_t v) { return ecb_ctz32 (v); }
700inline uint16_t ecb_ctz (uint16_t v) { return ecb_ctz32 (v); }
701inline uint32_t ecb_ctz (uint32_t v) { return ecb_ctz32 (v); }
702inline uint64_t ecb_ctz (uint64_t v) { return ecb_ctz64 (v); }
703
704inline bool ecb_is_pot (uint8_t v) { return ecb_is_pot32 (v); }
705inline bool ecb_is_pot (uint16_t v) { return ecb_is_pot32 (v); }
706inline bool ecb_is_pot (uint32_t v) { return ecb_is_pot32 (v); }
707inline bool ecb_is_pot (uint64_t v) { return ecb_is_pot64 (v); }
708
709inline int ecb_ld (uint8_t v) { return ecb_ld32 (v); }
710inline int ecb_ld (uint16_t v) { return ecb_ld32 (v); }
711inline int ecb_ld (uint32_t v) { return ecb_ld32 (v); }
712inline int ecb_ld (uint64_t v) { return ecb_ld64 (v); }
713
714inline int ecb_popcount (uint8_t v) { return ecb_popcount32 (v); }
715inline int ecb_popcount (uint16_t v) { return ecb_popcount32 (v); }
716inline int ecb_popcount (uint32_t v) { return ecb_popcount32 (v); }
717inline int ecb_popcount (uint64_t v) { return ecb_popcount64 (v); }
718
719inline uint8_t ecb_bitrev (uint8_t v) { return ecb_bitrev8 (v); }
720inline uint16_t ecb_bitrev (uint16_t v) { return ecb_bitrev16 (v); }
721inline uint32_t ecb_bitrev (uint32_t v) { return ecb_bitrev32 (v); }
722
723inline uint8_t ecb_rotl (uint8_t v, unsigned int count) { return ecb_rotl8 (v, count); }
724inline uint16_t ecb_rotl (uint16_t v, unsigned int count) { return ecb_rotl16 (v, count); }
725inline uint32_t ecb_rotl (uint32_t v, unsigned int count) { return ecb_rotl32 (v, count); }
726inline uint64_t ecb_rotl (uint64_t v, unsigned int count) { return ecb_rotl64 (v, count); }
727
728inline uint8_t ecb_rotr (uint8_t v, unsigned int count) { return ecb_rotr8 (v, count); }
729inline uint16_t ecb_rotr (uint16_t v, unsigned int count) { return ecb_rotr16 (v, count); }
730inline uint32_t ecb_rotr (uint32_t v, unsigned int count) { return ecb_rotr32 (v, count); }
731inline uint64_t ecb_rotr (uint64_t v, unsigned int count) { return ecb_rotr64 (v, count); }
732
733#endif
734
735#if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64))
736 #if ECB_GCC_VERSION(4,8) || ECB_CLANG_BUILTIN(__builtin_bswap16)
737 #define ecb_bswap16(x) __builtin_bswap16 (x)
738 #else
413 #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16) 739 #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16)
740 #endif
414 #define ecb_bswap32(x) __builtin_bswap32 (x) 741 #define ecb_bswap32(x) __builtin_bswap32 (x)
415 #define ecb_bswap64(x) __builtin_bswap64 (x) 742 #define ecb_bswap64(x) __builtin_bswap64 (x)
743#elif _MSC_VER
744 #include <stdlib.h>
745 #define ecb_bswap16(x) ((uint16_t)_byteswap_ushort ((uint16_t)(x)))
746 #define ecb_bswap32(x) ((uint32_t)_byteswap_ulong ((uint32_t)(x)))
747 #define ecb_bswap64(x) ((uint64_t)_byteswap_uint64 ((uint64_t)(x)))
416#else 748#else
417 ecb_function_ uint16_t ecb_bswap16 (uint16_t x) ecb_const; 749 ecb_function_ ecb_const uint16_t ecb_bswap16 (uint16_t x);
418 ecb_function_ uint16_t 750 ecb_function_ ecb_const uint16_t ecb_bswap16 (uint16_t x)
419 ecb_bswap16 (uint16_t x)
420 { 751 {
421 return ecb_rotl16 (x, 8); 752 return ecb_rotl16 (x, 8);
422 } 753 }
423 754
424 ecb_function_ uint32_t ecb_bswap32 (uint32_t x) ecb_const; 755 ecb_function_ ecb_const uint32_t ecb_bswap32 (uint32_t x);
425 ecb_function_ uint32_t 756 ecb_function_ ecb_const uint32_t ecb_bswap32 (uint32_t x)
426 ecb_bswap32 (uint32_t x)
427 { 757 {
428 return (((uint32_t)ecb_bswap16 (x)) << 16) | ecb_bswap16 (x >> 16); 758 return (((uint32_t)ecb_bswap16 (x)) << 16) | ecb_bswap16 (x >> 16);
429 } 759 }
430 760
431 ecb_function_ uint64_t ecb_bswap64 (uint64_t x) ecb_const; 761 ecb_function_ ecb_const uint64_t ecb_bswap64 (uint64_t x);
432 ecb_function_ uint64_t 762 ecb_function_ ecb_const uint64_t ecb_bswap64 (uint64_t x)
433 ecb_bswap64 (uint64_t x)
434 { 763 {
435 return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32); 764 return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32);
436 } 765 }
437#endif 766#endif
438 767
439#if ECB_GCC_VERSION(4,5) 768#if ECB_GCC_VERSION(4,5) || ECB_CLANG_BUILTIN(__builtin_unreachable)
440 #define ecb_unreachable() __builtin_unreachable () 769 #define ecb_unreachable() __builtin_unreachable ()
441#else 770#else
442 /* this seems to work fine, but gcc always emits a warning for it :/ */ 771 /* this seems to work fine, but gcc always emits a warning for it :/ */
443 ecb_inline void ecb_unreachable (void) ecb_noreturn; 772 ecb_inline ecb_noreturn void ecb_unreachable (void);
444 ecb_inline void ecb_unreachable (void) { } 773 ecb_inline ecb_noreturn void ecb_unreachable (void) { }
445#endif 774#endif
446 775
447/* try to tell the compiler that some condition is definitely true */ 776/* try to tell the compiler that some condition is definitely true */
448#define ecb_assume(cond) do { if (!(cond)) ecb_unreachable (); } while (0) 777#define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0
449 778
450ecb_inline unsigned char ecb_byteorder_helper (void) ecb_const; 779ecb_inline ecb_const uint32_t ecb_byteorder_helper (void);
451ecb_inline unsigned char 780ecb_inline ecb_const uint32_t ecb_byteorder_helper (void)
452ecb_byteorder_helper (void)
453{ 781{
454 const uint32_t u = 0x11223344; 782 /* the union code still generates code under pressure in gcc, */
455 return *(unsigned char *)&u; 783 /* but less than using pointers, and always seems to */
784 /* successfully return a constant. */
785 /* the reason why we have this horrible preprocessor mess */
786 /* is to avoid it in all cases, at least on common architectures */
787 /* or when using a recent enough gcc version (>= 4.6) */
788#if (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \
789 || ((__i386 || __i386__ || _M_IX86 || ECB_GCC_AMD64 || ECB_MSVC_AMD64) && !__VOS__)
790 #define ECB_LITTLE_ENDIAN 1
791 return 0x44332211;
792#elif (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) \
793 || ((__AARCH64EB__ || __MIPSEB__ || __ARMEB__) && !__VOS__)
794 #define ECB_BIG_ENDIAN 1
795 return 0x11223344;
796#else
797 union
798 {
799 uint8_t c[4];
800 uint32_t u;
801 } u = { 0x11, 0x22, 0x33, 0x44 };
802 return u.u;
803#endif
456} 804}
457 805
458ecb_inline ecb_bool ecb_big_endian (void) ecb_const; 806ecb_inline ecb_const ecb_bool ecb_big_endian (void);
459ecb_inline ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; } 807ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11223344; }
460ecb_inline ecb_bool ecb_little_endian (void) ecb_const; 808ecb_inline ecb_const ecb_bool ecb_little_endian (void);
461ecb_inline ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44; } 809ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44332211; }
810
811/*****************************************************************************/
812/* unaligned load/store */
813
814ecb_inline uint_fast16_t ecb_be_u16_to_host (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; }
815ecb_inline uint_fast32_t ecb_be_u32_to_host (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; }
816ecb_inline uint_fast64_t ecb_be_u64_to_host (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; }
817
818ecb_inline uint_fast16_t ecb_le_u16_to_host (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; }
819ecb_inline uint_fast32_t ecb_le_u32_to_host (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; }
820ecb_inline uint_fast64_t ecb_le_u64_to_host (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; }
821
822ecb_inline uint_fast16_t ecb_peek_u16_u (const void *ptr) { uint16_t v; memcpy (&v, ptr, sizeof (v)); return v; }
823ecb_inline uint_fast32_t ecb_peek_u32_u (const void *ptr) { uint32_t v; memcpy (&v, ptr, sizeof (v)); return v; }
824ecb_inline uint_fast64_t ecb_peek_u64_u (const void *ptr) { uint64_t v; memcpy (&v, ptr, sizeof (v)); return v; }
825
826ecb_inline uint_fast16_t ecb_peek_be_u16_u (const void *ptr) { return ecb_be_u16_to_host (ecb_peek_u16_u (ptr)); }
827ecb_inline uint_fast32_t ecb_peek_be_u32_u (const void *ptr) { return ecb_be_u32_to_host (ecb_peek_u32_u (ptr)); }
828ecb_inline uint_fast64_t ecb_peek_be_u64_u (const void *ptr) { return ecb_be_u64_to_host (ecb_peek_u64_u (ptr)); }
829
830ecb_inline uint_fast16_t ecb_peek_le_u16_u (const void *ptr) { return ecb_le_u16_to_host (ecb_peek_u16_u (ptr)); }
831ecb_inline uint_fast32_t ecb_peek_le_u32_u (const void *ptr) { return ecb_le_u32_to_host (ecb_peek_u32_u (ptr)); }
832ecb_inline uint_fast64_t ecb_peek_le_u64_u (const void *ptr) { return ecb_le_u64_to_host (ecb_peek_u64_u (ptr)); }
833
834ecb_inline uint_fast16_t ecb_host_to_be_u16 (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; }
835ecb_inline uint_fast32_t ecb_host_to_be_u32 (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; }
836ecb_inline uint_fast64_t ecb_host_to_be_u64 (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; }
837
838ecb_inline uint_fast16_t ecb_host_to_le_u16 (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; }
839ecb_inline uint_fast32_t ecb_host_to_le_u32 (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; }
840ecb_inline uint_fast64_t ecb_host_to_le_u64 (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; }
841
842ecb_inline void ecb_poke_u16_u (void *ptr, uint16_t v) { memcpy (ptr, &v, sizeof (v)); }
843ecb_inline void ecb_poke_u32_u (void *ptr, uint32_t v) { memcpy (ptr, &v, sizeof (v)); }
844ecb_inline void ecb_poke_u64_u (void *ptr, uint64_t v) { memcpy (ptr, &v, sizeof (v)); }
845
846ecb_inline void ecb_poke_be_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_be_u16 (v)); }
847ecb_inline void ecb_poke_be_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_be_u32 (v)); }
848ecb_inline void ecb_poke_be_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_be_u64 (v)); }
849
850ecb_inline void ecb_poke_le_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_le_u16 (v)); }
851ecb_inline void ecb_poke_le_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_le_u32 (v)); }
852ecb_inline void ecb_poke_le_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_le_u64 (v)); }
853
854#if ECB_CPP
855
856inline uint8_t ecb_bswap (uint8_t v) { return v; }
857inline uint16_t ecb_bswap (uint16_t v) { return ecb_bswap16 (v); }
858inline uint32_t ecb_bswap (uint32_t v) { return ecb_bswap32 (v); }
859inline uint64_t ecb_bswap (uint64_t v) { return ecb_bswap64 (v); }
860
861template<typename T> inline T ecb_be_to_host (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; }
862template<typename T> inline T ecb_le_to_host (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; }
863template<typename T> inline T ecb_peek (const void *ptr) { return *(const T *)ptr; }
864template<typename T> inline T ecb_peek_be (const void *ptr) { return ecb_be_to_host (ecb_peek <T> (ptr)); }
865template<typename T> inline T ecb_peek_le (const void *ptr) { return ecb_le_to_host (ecb_peek <T> (ptr)); }
866template<typename T> inline T ecb_peek_u (const void *ptr) { T v; memcpy (&v, ptr, sizeof (v)); return v; }
867template<typename T> inline T ecb_peek_be_u (const void *ptr) { return ecb_be_to_host (ecb_peek_u<T> (ptr)); }
868template<typename T> inline T ecb_peek_le_u (const void *ptr) { return ecb_le_to_host (ecb_peek_u<T> (ptr)); }
869
870template<typename T> inline T ecb_host_to_be (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; }
871template<typename T> inline T ecb_host_to_le (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; }
872template<typename T> inline void ecb_poke (void *ptr, T v) { *(T *)ptr = v; }
873template<typename T> inline void ecb_poke_be (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_be (v)); }
874template<typename T> inline void ecb_poke_le (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_le (v)); }
875template<typename T> inline void ecb_poke_u (void *ptr, T v) { memcpy (ptr, &v, sizeof (v)); }
876template<typename T> inline void ecb_poke_be_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_be (v)); }
877template<typename T> inline void ecb_poke_le_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_le (v)); }
878
879#endif
880
881/*****************************************************************************/
882/* pointer/integer hashing */
883
884/* based on hash by Chris Wellons, https://nullprogram.com/blog/2018/07/31/ */
885ecb_function_ uint32_t ecb_mix32 (uint32_t v);
886ecb_function_ uint32_t ecb_mix32 (uint32_t v)
887{
888 v ^= v >> 16; v *= 0x7feb352dU;
889 v ^= v >> 15; v *= 0x846ca68bU;
890 v ^= v >> 16;
891 return v;
892}
893
894ecb_function_ uint32_t ecb_unmix32 (uint32_t v);
895ecb_function_ uint32_t ecb_unmix32 (uint32_t v)
896{
897 v ^= v >> 16 ; v *= 0x43021123U;
898 v ^= v >> 15 ^ v >> 30; v *= 0x1d69e2a5U;
899 v ^= v >> 16 ;
900 return v;
901}
902
903/* based on splitmix64, by Sebastiona Vigna, https://prng.di.unimi.it/splitmix64.c */
904ecb_function_ uint64_t ecb_mix64 (uint64_t v);
905ecb_function_ uint64_t ecb_mix64 (uint64_t v)
906{
907 v ^= v >> 30; v *= 0xbf58476d1ce4e5b9U;
908 v ^= v >> 27; v *= 0x94d049bb133111ebU;
909 v ^= v >> 31;
910 return v;
911}
912
913ecb_function_ uint64_t ecb_unmix64 (uint64_t v);
914ecb_function_ uint64_t ecb_unmix64 (uint64_t v)
915{
916 v ^= v >> 31 ^ v >> 62; v *= 0x319642b2d24d8ec3U;
917 v ^= v >> 27 ^ v >> 54; v *= 0x96de1b173f119089U;
918 v ^= v >> 30 ^ v >> 60;
919 return v;
920}
921
922ecb_function_ uintptr_t ecb_ptrmix (void *p);
923ecb_function_ uintptr_t ecb_ptrmix (void *p)
924{
925 #if ECB_PTRSIZE <= 4
926 return ecb_mix32 ((uint32_t)p);
927 #else
928 return ecb_mix64 ((uint64_t)p);
929 #endif
930}
931
932ecb_function_ void *ecb_ptrunmix (uintptr_t v);
933ecb_function_ void *ecb_ptrunmix (uintptr_t v)
934{
935 #if ECB_PTRSIZE <= 4
936 return (void *)ecb_unmix32 (v);
937 #else
938 return (void *)ecb_unmix64 (v);
939 #endif
940}
941
942#if ECB_CPP
943
944template<typename T>
945inline uintptr_t ecb_ptrmix (T *p)
946{
947 return ecb_ptrmix (static_cast<void *>(p));
948}
949
950template<typename T>
951inline T *ecb_ptrunmix (uintptr_t v)
952{
953 return static_cast<T *>(ecb_ptrunmix (v));
954}
955
956#endif
957
958/*****************************************************************************/
959/* gray code */
960
961ecb_inline uint_fast8_t ecb_gray_encode8 (uint_fast8_t b) { return b ^ (b >> 1); }
962ecb_inline uint_fast16_t ecb_gray_encode16 (uint_fast16_t b) { return b ^ (b >> 1); }
963ecb_inline uint_fast32_t ecb_gray_encode32 (uint_fast32_t b) { return b ^ (b >> 1); }
964ecb_inline uint_fast64_t ecb_gray_encode64 (uint_fast64_t b) { return b ^ (b >> 1); }
965
966ecb_function_ uint8_t ecb_gray_decode8 (uint8_t g);
967ecb_function_ uint8_t ecb_gray_decode8 (uint8_t g)
968{
969 g ^= g >> 1;
970 g ^= g >> 2;
971 g ^= g >> 4;
972
973 return g;
974}
975
976ecb_function_ uint16_t ecb_gray_decode16 (uint16_t g);
977ecb_function_ uint16_t ecb_gray_decode16 (uint16_t g)
978{
979 g ^= g >> 1;
980 g ^= g >> 2;
981 g ^= g >> 4;
982 g ^= g >> 8;
983
984 return g;
985}
986
987ecb_function_ uint32_t ecb_gray_decode32 (uint32_t g);
988ecb_function_ uint32_t ecb_gray_decode32 (uint32_t g)
989{
990 g ^= g >> 1;
991 g ^= g >> 2;
992 g ^= g >> 4;
993 g ^= g >> 8;
994 g ^= g >> 16;
995
996 return g;
997}
998
999ecb_function_ uint64_t ecb_gray_decode64 (uint64_t g);
1000ecb_function_ uint64_t ecb_gray_decode64 (uint64_t g)
1001{
1002 g ^= g >> 1;
1003 g ^= g >> 2;
1004 g ^= g >> 4;
1005 g ^= g >> 8;
1006 g ^= g >> 16;
1007 g ^= g >> 32;
1008
1009 return g;
1010}
1011
1012#if ECB_CPP
1013
1014ecb_inline uint8_t ecb_gray_encode (uint8_t b) { return ecb_gray_encode8 (b); }
1015ecb_inline uint16_t ecb_gray_encode (uint16_t b) { return ecb_gray_encode16 (b); }
1016ecb_inline uint32_t ecb_gray_encode (uint32_t b) { return ecb_gray_encode32 (b); }
1017ecb_inline uint64_t ecb_gray_encode (uint64_t b) { return ecb_gray_encode64 (b); }
1018
1019ecb_inline uint8_t ecb_gray_decode (uint8_t g) { return ecb_gray_decode8 (g); }
1020ecb_inline uint16_t ecb_gray_decode (uint16_t g) { return ecb_gray_decode16 (g); }
1021ecb_inline uint32_t ecb_gray_decode (uint32_t g) { return ecb_gray_decode32 (g); }
1022ecb_inline uint64_t ecb_gray_decode (uint64_t g) { return ecb_gray_decode64 (g); }
1023
1024#endif
1025
1026/*****************************************************************************/
1027/* 2d hilbert curves */
1028
1029/* algorithm from the book Hacker's Delight, modified to not */
1030/* run into undefined behaviour for n==16 */
1031static uint32_t ecb_hilbert2d_index_to_coord32 (int n, uint32_t s);
1032static uint32_t ecb_hilbert2d_index_to_coord32 (int n, uint32_t s)
1033{
1034 uint32_t comp, swap, cs, t, sr;
1035
1036 /* pad s on the left (unused) bits with 01 (no change groups) */
1037 s |= 0x55555555U << n << n;
1038 /* "s shift right" */
1039 sr = (s >> 1) & 0x55555555U;
1040 /* compute complement and swap info in two-bit groups */
1041 cs = ((s & 0x55555555U) + sr) ^ 0x55555555U;
1042
1043 /* parallel prefix xor op to propagate both complement
1044 * and swap info together from left to right (there is
1045 * no step "cs ^= cs >> 1", so in effect it computes
1046 * two independent parallel prefix operations on two
1047 * interleaved sets of sixteen bits).
1048 */
1049 cs ^= cs >> 2;
1050 cs ^= cs >> 4;
1051 cs ^= cs >> 8;
1052 cs ^= cs >> 16;
1053
1054 /* separate swap and complement bits */
1055 swap = cs & 0x55555555U;
1056 comp = (cs >> 1) & 0x55555555U;
1057
1058 /* calculate coordinates in odd and even bit positions */
1059 t = (s & swap) ^ comp;
1060 s = s ^ sr ^ t ^ (t << 1);
1061
1062 /* unpad/clear out any junk on the left */
1063 s = s & ((1 << n << n) - 1);
1064
1065 /* Now "unshuffle" to separate the x and y bits. */
1066 t = (s ^ (s >> 1)) & 0x22222222U; s ^= t ^ (t << 1);
1067 t = (s ^ (s >> 2)) & 0x0c0c0c0cU; s ^= t ^ (t << 2);
1068 t = (s ^ (s >> 4)) & 0x00f000f0U; s ^= t ^ (t << 4);
1069 t = (s ^ (s >> 8)) & 0x0000ff00U; s ^= t ^ (t << 8);
1070
1071 /* now s contains two 16-bit coordinates */
1072 return s;
1073}
1074
1075/* 64 bit, a straightforward extension to the 32 bit case */
1076static uint64_t ecb_hilbert2d_index_to_coord64 (int n, uint64_t s);
1077static uint64_t ecb_hilbert2d_index_to_coord64 (int n, uint64_t s)
1078{
1079 uint64_t comp, swap, cs, t, sr;
1080
1081 /* pad s on the left (unused) bits with 01 (no change groups) */
1082 s |= 0x5555555555555555U << n << n;
1083 /* "s shift right" */
1084 sr = (s >> 1) & 0x5555555555555555U;
1085 /* compute complement and swap info in two-bit groups */
1086 cs = ((s & 0x5555555555555555U) + sr) ^ 0x5555555555555555U;
1087
1088 /* parallel prefix xor op to propagate both complement
1089 * and swap info together from left to right (there is
1090 * no step "cs ^= cs >> 1", so in effect it computes
1091 * two independent parallel prefix operations on two
1092 * interleaved sets of thirty-two bits).
1093 */
1094 cs ^= cs >> 2;
1095 cs ^= cs >> 4;
1096 cs ^= cs >> 8;
1097 cs ^= cs >> 16;
1098 cs ^= cs >> 32;
1099
1100 /* separate swap and complement bits */
1101 swap = cs & 0x5555555555555555U;
1102 comp = (cs >> 1) & 0x5555555555555555U;
1103
1104 /* calculate coordinates in odd and even bit positions */
1105 t = (s & swap) ^ comp;
1106 s = s ^ sr ^ t ^ (t << 1);
1107
1108 /* unpad/clear out any junk on the left */
1109 s = s & ((1 << n << n) - 1);
1110
1111 /* Now "unshuffle" to separate the x and y bits. */
1112 t = (s ^ (s >> 1)) & 0x2222222222222222U; s ^= t ^ (t << 1);
1113 t = (s ^ (s >> 2)) & 0x0c0c0c0c0c0c0c0cU; s ^= t ^ (t << 2);
1114 t = (s ^ (s >> 4)) & 0x00f000f000f000f0U; s ^= t ^ (t << 4);
1115 t = (s ^ (s >> 8)) & 0x0000ff000000ff00U; s ^= t ^ (t << 8);
1116 t = (s ^ (s >> 16)) & 0x00000000ffff0000U; s ^= t ^ (t << 16);
1117
1118 /* now s contains two 32-bit coordinates */
1119 return s;
1120}
1121
1122/* algorithm from the book Hacker's Delight, but a similar algorithm*/
1123/* is given in https://doi.org/10.1002/spe.4380160103 */
1124/* this has been slightly improved over the original version */
1125ecb_function_ uint32_t ecb_hilbert2d_coord_to_index32 (int n, uint32_t xy);
1126ecb_function_ uint32_t ecb_hilbert2d_coord_to_index32 (int n, uint32_t xy)
1127{
1128 uint32_t row;
1129 uint32_t state = 0;
1130 uint32_t s = 0;
1131
1132 do
1133 {
1134 --n;
1135
1136 row = 4 * state
1137 | (2 & (xy >> n >> 15))
1138 | (1 & (xy >> n ));
1139
1140 /* these funky constants are lookup tables for two-bit values */
1141 s = (s << 2) | (0x361e9cb4U >> 2 * row) & 3;
1142 state = (0x8fe65831U >> 2 * row) & 3;
1143 }
1144 while (n > 0);
1145
1146 return s;
1147}
1148
1149/* 64 bit, essentially the same as 32 bit */
1150ecb_function_ uint64_t ecb_hilbert2d_coord_to_index64 (int n, uint64_t xy);
1151ecb_function_ uint64_t ecb_hilbert2d_coord_to_index64 (int n, uint64_t xy)
1152{
1153 uint32_t row;
1154 uint32_t state = 0;
1155 uint64_t s = 0;
1156
1157 do
1158 {
1159 --n;
1160
1161 row = 4 * state
1162 | (2 & (xy >> n >> 31))
1163 | (1 & (xy >> n ));
1164
1165 /* these funky constants are lookup tables for two-bit values */
1166 s = (s << 2) | (0x361e9cb4U >> 2 * row) & 3;
1167 state = (0x8fe65831U >> 2 * row) & 3;
1168 }
1169 while (n > 0);
1170
1171 return s;
1172}
1173
1174/*****************************************************************************/
1175/* division */
462 1176
463#if ECB_GCC_VERSION(3,0) || ECB_C99 1177#if ECB_GCC_VERSION(3,0) || ECB_C99
1178 /* C99 tightened the definition of %, so we can use a more efficient version */
464 #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0)) 1179 #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0))
465#else 1180#else
466 #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n))) 1181 #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n)))
467#endif 1182#endif
468 1183
469#if __cplusplus 1184#if ECB_CPP
470 template<typename T> 1185 template<typename T>
471 static inline T ecb_div_rd (T val, T div) 1186 static inline T ecb_div_rd (T val, T div)
472 { 1187 {
473 return val < 0 ? - ((-val + div - 1) / div) : (val ) / div; 1188 return val < 0 ? - ((-val + div - 1) / div) : (val ) / div;
474 } 1189 }
479 } 1194 }
480#else 1195#else
481 #define ecb_div_rd(val,div) ((val) < 0 ? - ((-(val) + (div) - 1) / (div)) : ((val) ) / (div)) 1196 #define ecb_div_rd(val,div) ((val) < 0 ? - ((-(val) + (div) - 1) / (div)) : ((val) ) / (div))
482 #define ecb_div_ru(val,div) ((val) < 0 ? - ((-(val) ) / (div)) : ((val) + (div) - 1) / (div)) 1197 #define ecb_div_ru(val,div) ((val) < 0 ? - ((-(val) ) / (div)) : ((val) + (div) - 1) / (div))
483#endif 1198#endif
1199
1200/*****************************************************************************/
1201/* array length */
484 1202
485#if ecb_cplusplus_does_not_suck 1203#if ecb_cplusplus_does_not_suck
486 /* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */ 1204 /* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */
487 template<typename T, int N> 1205 template<typename T, int N>
488 static inline int ecb_array_length (const T (&arr)[N]) 1206 static inline int ecb_array_length (const T (&arr)[N])
491 } 1209 }
492#else 1210#else
493 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) 1211 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0]))
494#endif 1212#endif
495 1213
1214/*****************************************************************************/
1215/* IEEE 754-2008 half float conversions */
1216
1217ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x);
1218ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x)
1219{
1220 unsigned int s = (x & 0x8000) << (31 - 15);
1221 int e = (x >> 10) & 0x001f;
1222 unsigned int m = x & 0x03ff;
1223
1224 if (ecb_expect_false (e == 31))
1225 /* infinity or NaN */
1226 e = 255 - (127 - 15);
1227 else if (ecb_expect_false (!e))
1228 {
1229 if (ecb_expect_true (!m))
1230 /* zero, handled by code below by forcing e to 0 */
1231 e = 0 - (127 - 15);
1232 else
1233 {
1234 /* subnormal, renormalise */
1235 unsigned int s = 10 - ecb_ld32 (m);
1236
1237 m = (m << s) & 0x3ff; /* mask implicit bit */
1238 e -= s - 1;
1239 }
1240 }
1241
1242 /* e and m now are normalised, or zero, (or inf or nan) */
1243 e += 127 - 15;
1244
1245 return s | (e << 23) | (m << (23 - 10));
1246}
1247
1248ecb_function_ ecb_const uint16_t ecb_binary32_to_binary16 (uint32_t x);
1249ecb_function_ ecb_const uint16_t ecb_binary32_to_binary16 (uint32_t x)
1250{
1251 unsigned int s = (x >> 16) & 0x00008000; /* sign bit, the easy part */
1252 int e = ((x >> 23) & 0x000000ff) - (127 - 15); /* the desired exponent */
1253 unsigned int m = x & 0x007fffff;
1254
1255 x &= 0x7fffffff;
1256
1257 /* if it's within range of binary16 normals, use fast path */
1258 if (ecb_expect_true (0x38800000 <= x && x <= 0x477fefff))
1259 {
1260 /* mantissa round-to-even */
1261 m += 0x00000fff + ((m >> (23 - 10)) & 1);
1262
1263 /* handle overflow */
1264 if (ecb_expect_false (m >= 0x00800000))
1265 {
1266 m >>= 1;
1267 e += 1;
1268 }
1269
1270 return s | (e << 10) | (m >> (23 - 10));
1271 }
1272
1273 /* handle large numbers and infinity */
1274 if (ecb_expect_true (0x477fefff < x && x <= 0x7f800000))
1275 return s | 0x7c00;
1276
1277 /* handle zero, subnormals and small numbers */
1278 if (ecb_expect_true (x < 0x38800000))
1279 {
1280 /* zero */
1281 if (ecb_expect_true (!x))
1282 return s;
1283
1284 /* handle subnormals */
1285
1286 /* too small, will be zero */
1287 if (e < (14 - 24)) /* might not be sharp, but is good enough */
1288 return s;
1289
1290 m |= 0x00800000; /* make implicit bit explicit */
1291
1292 /* very tricky - we need to round to the nearest e (+10) bit value */
1293 {
1294 unsigned int bits = 14 - e;
1295 unsigned int half = (1 << (bits - 1)) - 1;
1296 unsigned int even = (m >> bits) & 1;
1297
1298 /* if this overflows, we will end up with a normalised number */
1299 m = (m + half + even) >> bits;
1300 }
1301
1302 return s | m;
1303 }
1304
1305 /* handle NaNs, preserve leftmost nan bits, but make sure we don't turn them into infinities */
1306 m >>= 13;
1307
1308 return s | 0x7c00 | m | !m;
1309}
1310
1311/*******************************************************************************/
1312/* fast integer to ascii */
1313
1314/*
1315 * This code is pretty complicated because it is general. The idea behind it,
1316 * however, is pretty simple: first, the number is multiplied with a scaling
1317 * factor (2**bits / 10**(digits-1)) to convert the integer into a fixed-point
1318 * number with the first digit in the upper bits.
1319 * Then this digit is converted to text and masked out. The resulting number
1320 * is then multiplied by 10, by multiplying the fixed point representation
1321 * by 5 and shifting the (binary) decimal point one to the right, so a 4.28
1322 * format becomes 5.27, 6.26 and so on.
1323 * The rest involves only advancing the pointer if we already generated a
1324 * non-zero digit, so leading zeroes are overwritten.
1325 */
1326
1327/* simply return a mask with "bits" bits set */
1328#define ecb_i2a_mask(type,bits) ((((type)1) << (bits)) - 1)
1329
1330/* oputput a single digit. maskvalue is 10**digitidx */
1331#define ecb_i2a_digit(type,bits,digitmask,maskvalue,digitidx) \
1332 if (digitmask >= maskvalue) /* constant, used to decide how many digits to generate */ \
1333 { \
1334 char digit = x >> (bits - digitidx); /* calculate the topmost digit */ \
1335 *ptr = digit + '0'; /* output it */ \
1336 nz = (digitmask == maskvalue) || nz || digit; /* first term == always output last digit */ \
1337 ptr += nz; /* output digit only if non-zero digit seen */ \
1338 x = (x & ecb_i2a_mask (type, bits - digitidx)) * 5; /* *10, but shift decimal point right */ \
1339 }
1340
1341/* convert integer to fixed point format and multiply out digits, highest first */
1342/* requires magic constants: max. digits and number of bits after the decimal point */
1343#define ecb_i2a_def(suffix,ptr,v,type,bits,digitmask,lz) \
1344ecb_inline char *ecb_i2a_ ## suffix (char *ptr, uint32_t u) \
1345{ \
1346 char nz = lz; /* non-zero digit seen? */ \
1347 /* convert to x.bits fixed-point */ \
1348 type x = u * ((ecb_i2a_mask (type, bits) + digitmask) / digitmask); \
1349 /* output up to 10 digits */ \
1350 ecb_i2a_digit (type,bits,digitmask, 1, 0); \
1351 ecb_i2a_digit (type,bits,digitmask, 10, 1); \
1352 ecb_i2a_digit (type,bits,digitmask, 100, 2); \
1353 ecb_i2a_digit (type,bits,digitmask, 1000, 3); \
1354 ecb_i2a_digit (type,bits,digitmask, 10000, 4); \
1355 ecb_i2a_digit (type,bits,digitmask, 100000, 5); \
1356 ecb_i2a_digit (type,bits,digitmask, 1000000, 6); \
1357 ecb_i2a_digit (type,bits,digitmask, 10000000, 7); \
1358 ecb_i2a_digit (type,bits,digitmask, 100000000, 8); \
1359 ecb_i2a_digit (type,bits,digitmask, 1000000000, 9); \
1360 return ptr; \
1361}
1362
1363/* predefined versions of the above, for various digits */
1364/* ecb_i2a_xN = almost N digits, limit defined by macro */
1365/* ecb_i2a_N = up to N digits, leading zeroes suppressed */
1366/* ecb_i2a_0N = exactly N digits, including leading zeroes */
1367
1368/* non-leading-zero versions, limited range */
1369#define ECB_I2A_MAX_X5 59074 /* limit for ecb_i2a_x5 */
1370#define ECB_I2A_MAX_X10 2932500665 /* limit for ecb_i2a_x10 */
1371ecb_i2a_def ( x5, ptr, v, uint32_t, 26, 10000, 0)
1372ecb_i2a_def (x10, ptr, v, uint64_t, 60, 1000000000, 0)
1373
1374/* non-leading zero versions, all digits, 4 and 9 are optimal for 32/64 bit */
1375ecb_i2a_def ( 2, ptr, v, uint32_t, 10, 10, 0)
1376ecb_i2a_def ( 3, ptr, v, uint32_t, 12, 100, 0)
1377ecb_i2a_def ( 4, ptr, v, uint32_t, 26, 1000, 0)
1378ecb_i2a_def ( 5, ptr, v, uint64_t, 30, 10000, 0)
1379ecb_i2a_def ( 6, ptr, v, uint64_t, 36, 100000, 0)
1380ecb_i2a_def ( 7, ptr, v, uint64_t, 44, 1000000, 0)
1381ecb_i2a_def ( 8, ptr, v, uint64_t, 50, 10000000, 0)
1382ecb_i2a_def ( 9, ptr, v, uint64_t, 56, 100000000, 0)
1383
1384/* leading-zero versions, all digits, 04 and 09 are optimal for 32/64 bit */
1385ecb_i2a_def (02, ptr, v, uint32_t, 10, 10, 1)
1386ecb_i2a_def (03, ptr, v, uint32_t, 12, 100, 1)
1387ecb_i2a_def (04, ptr, v, uint32_t, 26, 1000, 1)
1388ecb_i2a_def (05, ptr, v, uint64_t, 30, 10000, 1)
1389ecb_i2a_def (06, ptr, v, uint64_t, 36, 100000, 1)
1390ecb_i2a_def (07, ptr, v, uint64_t, 44, 1000000, 1)
1391ecb_i2a_def (08, ptr, v, uint64_t, 50, 10000000, 1)
1392ecb_i2a_def (09, ptr, v, uint64_t, 56, 100000000, 1)
1393
1394#define ECB_I2A_I32_DIGITS 11
1395#define ECB_I2A_U32_DIGITS 10
1396#define ECB_I2A_I64_DIGITS 20
1397#define ECB_I2A_U64_DIGITS 21
1398#define ECB_I2A_MAX_DIGITS 21
1399
1400ecb_function_ char * ecb_i2a_u32 (char *ptr, uint32_t u)
1401ecb_function_ char * ecb_i2a_u32 (char *ptr, uint32_t u)
1402{
1403 #if ECB_64BIT_NATIVE
1404 if (ecb_expect_true (u <= ECB_I2A_MAX_X10))
1405 ptr = ecb_i2a_x10 (ptr, u);
1406 else /* x10 almost, but not fully, covers 32 bit */
1407 {
1408 uint32_t u1 = u % 1000000000;
1409 uint32_t u2 = u / 1000000000;
1410
1411 *ptr++ = u2 + '0';
1412 ptr = ecb_i2a_09 (ptr, u1);
1413 }
1414 #else
1415 if (ecb_expect_true (u <= ECB_I2A_MAX_X5))
1416 ecb_i2a_x5 (ptr, u);
1417 else if (ecb_expect_true (u <= ECB_I2A_MAX_X5 * 10000))
1418 {
1419 uint32_t u1 = u % 10000;
1420 uint32_t u2 = u / 10000;
1421
1422 ptr = ecb_i2a_x5 (ptr, u2);
1423 ptr = ecb_i2a_04 (ptr, u1);
1424 }
1425 else
1426 {
1427 uint32_t u1 = u % 10000;
1428 uint32_t ua = u / 10000;
1429 uint32_t u2 = ua % 10000;
1430 uint32_t u3 = ua / 10000;
1431
1432 ptr = ecb_i2a_2 (ptr, u3);
1433 ptr = ecb_i2a_04 (ptr, u2);
1434 ptr = ecb_i2a_04 (ptr, u1);
1435 }
496#endif 1436 #endif
497 1437
1438 return ptr;
1439}
1440
1441ecb_function_ char * ecb_i2a_i32 (char *ptr, int32_t v);
1442ecb_function_ char * ecb_i2a_i32 (char *ptr, int32_t v)
1443{
1444 *ptr = '-'; ptr += v < 0;
1445 uint32_t u = v < 0 ? -(uint32_t)v : v;
1446
1447 #if ECB_64BIT_NATIVE
1448 ptr = ecb_i2a_x10 (ptr, u); /* x10 fully covers 31 bit */
1449 #else
1450 ptr = ecb_i2a_u32 (ptr, u);
1451 #endif
1452
1453 return ptr;
1454}
1455
1456ecb_function_ char * ecb_i2a_u64 (char *ptr, uint64_t u);
1457ecb_function_ char * ecb_i2a_u64 (char *ptr, uint64_t u)
1458{
1459 #if ECB_64BIT_NATIVE
1460 if (ecb_expect_true (u <= ECB_I2A_MAX_X10))
1461 ptr = ecb_i2a_x10 (ptr, u);
1462 else if (ecb_expect_false (u <= ECB_I2A_MAX_X10 * 1000000000))
1463 {
1464 uint64_t u1 = u % 1000000000;
1465 uint64_t u2 = u / 1000000000;
1466
1467 ptr = ecb_i2a_x10 (ptr, u2);
1468 ptr = ecb_i2a_09 (ptr, u1);
1469 }
1470 else
1471 {
1472 uint64_t u1 = u % 1000000000;
1473 uint64_t ua = u / 1000000000;
1474 uint64_t u2 = ua % 1000000000;
1475 uint64_t u3 = ua / 1000000000;
1476
1477 ptr = ecb_i2a_2 (ptr, u3);
1478 ptr = ecb_i2a_09 (ptr, u2);
1479 ptr = ecb_i2a_09 (ptr, u1);
1480 }
1481 #else
1482 if (ecb_expect_true (u <= ECB_I2A_MAX_X5))
1483 ptr = ecb_i2a_x5 (ptr, u);
1484 else
1485 {
1486 uint64_t u1 = u % 10000;
1487 uint64_t u2 = u / 10000;
1488
1489 ptr = ecb_i2a_u64 (ptr, u2);
1490 ptr = ecb_i2a_04 (ptr, u1);
1491 }
1492 #endif
1493
1494 return ptr;
1495}
1496
1497ecb_function_ char * ecb_i2a_i64 (char *ptr, int64_t v)
1498ecb_function_ char * ecb_i2a_i64 (char *ptr, int64_t v)
1499{
1500 *ptr = '-'; ptr += v < 0;
1501 uint64_t u = v < 0 ? -(uint64_t)v : v;
1502
1503 #if ECB_64BIT_NATIVE
1504 if (ecb_expect_true (u <= ECB_I2A_MAX_X10))
1505 ptr = ecb_i2a_x10 (ptr, u);
1506 else if (ecb_expect_false (u <= ECB_I2A_MAX_X10 * 1000000000))
1507 {
1508 uint64_t u1 = u % 1000000000;
1509 uint64_t u2 = u / 1000000000;
1510
1511 ptr = ecb_i2a_x10 (ptr, u2);
1512 ptr = ecb_i2a_09 (ptr, u1);
1513 }
1514 else
1515 {
1516 uint64_t u1 = u % 1000000000;
1517 uint64_t ua = u / 1000000000;
1518 uint64_t u2 = ua % 1000000000;
1519 uint64_t u3 = ua / 1000000000;
1520
1521 /* 2**31 is 19 digits, so the top is exactly one digit */
1522 *ptr++ = u3 + '0';
1523 ptr = ecb_i2a_09 (ptr, u2);
1524 ptr = ecb_i2a_09 (ptr, u1);
1525 }
1526 #else
1527 ptr = ecb_i2a_u64 (ptr, u);
1528 #endif
1529
1530 return ptr;
1531}
1532
1533/*******************************************************************************/
1534/* floating point stuff, can be disabled by defining ECB_NO_LIBM */
1535
1536/* basically, everything uses "ieee pure-endian" floating point numbers */
1537/* the only noteworthy exception is ancient armle, which uses order 43218765 */
1538#if 0 \
1539 || __i386 || __i386__ \
1540 || ECB_GCC_AMD64 \
1541 || __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \
1542 || defined __s390__ || defined __s390x__ \
1543 || defined __mips__ \
1544 || defined __alpha__ \
1545 || defined __hppa__ \
1546 || defined __ia64__ \
1547 || defined __m68k__ \
1548 || defined __m88k__ \
1549 || defined __sh__ \
1550 || defined _M_IX86 || defined ECB_MSVC_AMD64 || defined _M_IA64 \
1551 || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \
1552 || defined __aarch64__
1553 #define ECB_STDFP 1
1554#else
1555 #define ECB_STDFP 0
1556#endif
1557
1558#ifndef ECB_NO_LIBM
1559
1560 #include <math.h> /* for frexp*, ldexp*, INFINITY, NAN */
1561
1562 /* only the oldest of old doesn't have this one. solaris. */
1563 #ifdef INFINITY
1564 #define ECB_INFINITY INFINITY
1565 #else
1566 #define ECB_INFINITY HUGE_VAL
1567 #endif
1568
1569 #ifdef NAN
1570 #define ECB_NAN NAN
1571 #else
1572 #define ECB_NAN ECB_INFINITY
1573 #endif
1574
1575 #if ECB_C99 || _XOPEN_VERSION >= 600 || _POSIX_VERSION >= 200112L
1576 #define ecb_ldexpf(x,e) ldexpf ((x), (e))
1577 #define ecb_frexpf(x,e) frexpf ((x), (e))
1578 #else
1579 #define ecb_ldexpf(x,e) (float) ldexp ((double) (x), (e))
1580 #define ecb_frexpf(x,e) (float) frexp ((double) (x), (e))
1581 #endif
1582
1583 /* convert a float to ieee single/binary32 */
1584 ecb_function_ ecb_const uint32_t ecb_float_to_binary32 (float x);
1585 ecb_function_ ecb_const uint32_t ecb_float_to_binary32 (float x)
1586 {
1587 uint32_t r;
1588
1589 #if ECB_STDFP
1590 memcpy (&r, &x, 4);
1591 #else
1592 /* slow emulation, works for anything but -0 */
1593 uint32_t m;
1594 int e;
1595
1596 if (x == 0e0f ) return 0x00000000U;
1597 if (x > +3.40282346638528860e+38f) return 0x7f800000U;
1598 if (x < -3.40282346638528860e+38f) return 0xff800000U;
1599 if (x != x ) return 0x7fbfffffU;
1600
1601 m = ecb_frexpf (x, &e) * 0x1000000U;
1602
1603 r = m & 0x80000000U;
1604
1605 if (r)
1606 m = -m;
1607
1608 if (e <= -126)
1609 {
1610 m &= 0xffffffU;
1611 m >>= (-125 - e);
1612 e = -126;
1613 }
1614
1615 r |= (e + 126) << 23;
1616 r |= m & 0x7fffffU;
1617 #endif
1618
1619 return r;
1620 }
1621
1622 /* converts an ieee single/binary32 to a float */
1623 ecb_function_ ecb_const float ecb_binary32_to_float (uint32_t x);
1624 ecb_function_ ecb_const float ecb_binary32_to_float (uint32_t x)
1625 {
1626 float r;
1627
1628 #if ECB_STDFP
1629 memcpy (&r, &x, 4);
1630 #else
1631 /* emulation, only works for normals and subnormals and +0 */
1632 int neg = x >> 31;
1633 int e = (x >> 23) & 0xffU;
1634
1635 x &= 0x7fffffU;
1636
1637 if (e)
1638 x |= 0x800000U;
1639 else
1640 e = 1;
1641
1642 /* we distrust ldexpf a bit and do the 2**-24 scaling by an extra multiply */
1643 r = ecb_ldexpf (x * (0.5f / 0x800000U), e - 126);
1644
1645 r = neg ? -r : r;
1646 #endif
1647
1648 return r;
1649 }
1650
1651 /* convert a double to ieee double/binary64 */
1652 ecb_function_ ecb_const uint64_t ecb_double_to_binary64 (double x);
1653 ecb_function_ ecb_const uint64_t ecb_double_to_binary64 (double x)
1654 {
1655 uint64_t r;
1656
1657 #if ECB_STDFP
1658 memcpy (&r, &x, 8);
1659 #else
1660 /* slow emulation, works for anything but -0 */
1661 uint64_t m;
1662 int e;
1663
1664 if (x == 0e0 ) return 0x0000000000000000U;
1665 if (x > +1.79769313486231470e+308) return 0x7ff0000000000000U;
1666 if (x < -1.79769313486231470e+308) return 0xfff0000000000000U;
1667 if (x != x ) return 0X7ff7ffffffffffffU;
1668
1669 m = frexp (x, &e) * 0x20000000000000U;
1670
1671 r = m & 0x8000000000000000;;
1672
1673 if (r)
1674 m = -m;
1675
1676 if (e <= -1022)
1677 {
1678 m &= 0x1fffffffffffffU;
1679 m >>= (-1021 - e);
1680 e = -1022;
1681 }
1682
1683 r |= ((uint64_t)(e + 1022)) << 52;
1684 r |= m & 0xfffffffffffffU;
1685 #endif
1686
1687 return r;
1688 }
1689
1690 /* converts an ieee double/binary64 to a double */
1691 ecb_function_ ecb_const double ecb_binary64_to_double (uint64_t x);
1692 ecb_function_ ecb_const double ecb_binary64_to_double (uint64_t x)
1693 {
1694 double r;
1695
1696 #if ECB_STDFP
1697 memcpy (&r, &x, 8);
1698 #else
1699 /* emulation, only works for normals and subnormals and +0 */
1700 int neg = x >> 63;
1701 int e = (x >> 52) & 0x7ffU;
1702
1703 x &= 0xfffffffffffffU;
1704
1705 if (e)
1706 x |= 0x10000000000000U;
1707 else
1708 e = 1;
1709
1710 /* we distrust ldexp a bit and do the 2**-53 scaling by an extra multiply */
1711 r = ldexp (x * (0.5 / 0x10000000000000U), e - 1022);
1712
1713 r = neg ? -r : r;
1714 #endif
1715
1716 return r;
1717 }
1718
1719 /* convert a float to ieee half/binary16 */
1720 ecb_function_ ecb_const uint16_t ecb_float_to_binary16 (float x);
1721 ecb_function_ ecb_const uint16_t ecb_float_to_binary16 (float x)
1722 {
1723 return ecb_binary32_to_binary16 (ecb_float_to_binary32 (x));
1724 }
1725
1726 /* convert an ieee half/binary16 to float */
1727 ecb_function_ ecb_const float ecb_binary16_to_float (uint16_t x);
1728 ecb_function_ ecb_const float ecb_binary16_to_float (uint16_t x)
1729 {
1730 return ecb_binary32_to_float (ecb_binary16_to_binary32 (x));
1731 }
1732
1733#endif
1734
1735#endif
1736

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines