1 | /* |
1 | /* |
2 | * libecb |
2 | * libecb - http://software.schmorp.de/pkg/libecb |
3 | * |
3 | * |
4 | * Copyright (©) 2009-2011 Marc Alexander Lehmann |
4 | * Copyright (©) 2009-2015,2018-2021 Marc Alexander Lehmann <libecb@schmorp.de> |
|
|
5 | * Copyright (©) 2011 Emanuele Giaquinta |
5 | * All rights reserved. |
6 | * All rights reserved. |
6 | * |
7 | * |
7 | * Redistribution and use in source and binary forms, with or without modifica- |
8 | * Redistribution and use in source and binary forms, with or without modifica- |
8 | * tion, are permitted provided that the following conditions are met: |
9 | * tion, are permitted provided that the following conditions are met: |
9 | * |
10 | * |
… | |
… | |
22 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; |
23 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; |
23 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
24 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
24 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- |
25 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- |
25 | * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED |
26 | * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED |
26 | * OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | * OF THE POSSIBILITY OF SUCH DAMAGE. |
|
|
28 | * |
|
|
29 | * Alternatively, the contents of this file may be used under the terms of |
|
|
30 | * the GNU General Public License ("GPL") version 2 or any later version, |
|
|
31 | * in which case the provisions of the GPL are applicable instead of |
|
|
32 | * the above. If you wish to allow the use of your version of this file |
|
|
33 | * only under the terms of the GPL and not to allow others to use your |
|
|
34 | * version of this file under the BSD license, indicate your decision |
|
|
35 | * by deleting the provisions above and replace them with the notice |
|
|
36 | * and other provisions required by the GPL. If you do not delete the |
|
|
37 | * provisions above, a recipient may use your version of this file under |
|
|
38 | * either the BSD or the GPL. |
27 | */ |
39 | */ |
28 | |
40 | |
29 | #ifndef ECB_H |
41 | #ifndef ECB_H |
30 | #define ECB_H |
42 | #define ECB_H |
31 | |
43 | |
|
|
44 | /* 16 bits major, 16 bits minor */ |
|
|
45 | #define ECB_VERSION 0x0001000c |
|
|
46 | |
|
|
47 | #include <string.h> /* for memcpy */ |
|
|
48 | |
|
|
49 | #if defined (_WIN32) && !defined (__MINGW32__) |
|
|
50 | typedef signed char int8_t; |
|
|
51 | typedef unsigned char uint8_t; |
|
|
52 | typedef signed char int_fast8_t; |
|
|
53 | typedef unsigned char uint_fast8_t; |
|
|
54 | typedef signed short int16_t; |
|
|
55 | typedef unsigned short uint16_t; |
|
|
56 | typedef signed int int_fast16_t; |
|
|
57 | typedef unsigned int uint_fast16_t; |
|
|
58 | typedef signed int int32_t; |
|
|
59 | typedef unsigned int uint32_t; |
|
|
60 | typedef signed int int_fast32_t; |
|
|
61 | typedef unsigned int uint_fast32_t; |
|
|
62 | #if __GNUC__ |
|
|
63 | typedef signed long long int64_t; |
|
|
64 | typedef unsigned long long uint64_t; |
|
|
65 | #else /* _MSC_VER || __BORLANDC__ */ |
|
|
66 | typedef signed __int64 int64_t; |
|
|
67 | typedef unsigned __int64 uint64_t; |
|
|
68 | #endif |
|
|
69 | typedef int64_t int_fast64_t; |
|
|
70 | typedef uint64_t uint_fast64_t; |
|
|
71 | #ifdef _WIN64 |
|
|
72 | #define ECB_PTRSIZE 8 |
|
|
73 | typedef uint64_t uintptr_t; |
|
|
74 | typedef int64_t intptr_t; |
|
|
75 | #else |
|
|
76 | #define ECB_PTRSIZE 4 |
|
|
77 | typedef uint32_t uintptr_t; |
|
|
78 | typedef int32_t intptr_t; |
|
|
79 | #endif |
|
|
80 | #else |
|
|
81 | #include <inttypes.h> |
|
|
82 | #if (defined INTPTR_MAX ? INTPTR_MAX : ULONG_MAX) > 0xffffffffU |
|
|
83 | #define ECB_PTRSIZE 8 |
|
|
84 | #else |
|
|
85 | #define ECB_PTRSIZE 4 |
|
|
86 | #endif |
|
|
87 | #endif |
|
|
88 | |
|
|
89 | #define ECB_GCC_AMD64 (__amd64 || __amd64__ || __x86_64 || __x86_64__) |
|
|
90 | #define ECB_MSVC_AMD64 (_M_AMD64 || _M_X64) |
|
|
91 | |
|
|
92 | #ifndef ECB_OPTIMIZE_SIZE |
|
|
93 | #if __OPTIMIZE_SIZE__ |
|
|
94 | #define ECB_OPTIMIZE_SIZE 1 |
|
|
95 | #else |
|
|
96 | #define ECB_OPTIMIZE_SIZE 0 |
|
|
97 | #endif |
|
|
98 | #endif |
|
|
99 | |
|
|
100 | /* work around x32 idiocy by defining proper macros */ |
|
|
101 | #if ECB_GCC_AMD64 || ECB_MSVC_AMD64 |
|
|
102 | #if _ILP32 |
|
|
103 | #define ECB_AMD64_X32 1 |
|
|
104 | #else |
|
|
105 | #define ECB_AMD64 1 |
|
|
106 | #endif |
|
|
107 | #endif |
|
|
108 | |
|
|
109 | #if ECB_PTRSIZE >= 8 || ECB_AMD64_X32 |
|
|
110 | #define ECB_64BIT_NATIVE 1 |
|
|
111 | #else |
|
|
112 | #define ECB_64BIT_NATIVE 0 |
|
|
113 | #endif |
|
|
114 | |
|
|
115 | /* many compilers define _GNUC_ to some versions but then only implement |
|
|
116 | * what their idiot authors think are the "more important" extensions, |
|
|
117 | * causing enormous grief in return for some better fake benchmark numbers. |
|
|
118 | * or so. |
|
|
119 | * we try to detect these and simply assume they are not gcc - if they have |
|
|
120 | * an issue with that they should have done it right in the first place. |
|
|
121 | */ |
|
|
122 | #if !defined __GNUC_MINOR__ || defined __INTEL_COMPILER || defined __SUNPRO_C || defined __SUNPRO_CC || defined __llvm__ || defined __clang__ |
|
|
123 | #define ECB_GCC_VERSION(major,minor) 0 |
|
|
124 | #else |
32 | #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) |
125 | #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) |
|
|
126 | #endif |
33 | |
127 | |
34 | #define ECB_HEADER_INLINE static inline |
128 | #define ECB_CLANG_VERSION(major,minor) (__clang_major__ > (major) || (__clang_major__ == (major) && __clang_minor__ >= (minor))) |
35 | |
129 | |
|
|
130 | #if __clang__ && defined __has_builtin |
|
|
131 | #define ECB_CLANG_BUILTIN(x) __has_builtin (x) |
|
|
132 | #else |
|
|
133 | #define ECB_CLANG_BUILTIN(x) 0 |
|
|
134 | #endif |
|
|
135 | |
|
|
136 | #if __clang__ && defined __has_extension |
|
|
137 | #define ECB_CLANG_EXTENSION(x) __has_extension (x) |
|
|
138 | #else |
|
|
139 | #define ECB_CLANG_EXTENSION(x) 0 |
|
|
140 | #endif |
|
|
141 | |
|
|
142 | #define ECB_CPP (__cplusplus+0) |
|
|
143 | #define ECB_CPP11 (__cplusplus >= 201103L) |
|
|
144 | #define ECB_CPP14 (__cplusplus >= 201402L) |
|
|
145 | #define ECB_CPP17 (__cplusplus >= 201703L) |
|
|
146 | |
|
|
147 | #if ECB_CPP |
|
|
148 | #define ECB_C 0 |
|
|
149 | #define ECB_STDC_VERSION 0 |
|
|
150 | #else |
|
|
151 | #define ECB_C 1 |
|
|
152 | #define ECB_STDC_VERSION __STDC_VERSION__ |
|
|
153 | #endif |
|
|
154 | |
|
|
155 | #define ECB_C99 (ECB_STDC_VERSION >= 199901L) |
|
|
156 | #define ECB_C11 (ECB_STDC_VERSION >= 201112L) |
|
|
157 | #define ECB_C17 (ECB_STDC_VERSION >= 201710L) |
|
|
158 | |
|
|
159 | #if ECB_CPP |
|
|
160 | #define ECB_EXTERN_C extern "C" |
|
|
161 | #define ECB_EXTERN_C_BEG ECB_EXTERN_C { |
|
|
162 | #define ECB_EXTERN_C_END } |
|
|
163 | #else |
|
|
164 | #define ECB_EXTERN_C extern |
|
|
165 | #define ECB_EXTERN_C_BEG |
|
|
166 | #define ECB_EXTERN_C_END |
|
|
167 | #endif |
|
|
168 | |
|
|
169 | /*****************************************************************************/ |
|
|
170 | |
|
|
171 | /* ECB_NO_THREADS - ecb is not used by multiple threads, ever */ |
|
|
172 | /* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */ |
|
|
173 | |
|
|
174 | #if ECB_NO_THREADS |
|
|
175 | #define ECB_NO_SMP 1 |
|
|
176 | #endif |
|
|
177 | |
|
|
178 | #if ECB_NO_SMP |
|
|
179 | #define ECB_MEMORY_FENCE do { } while (0) |
|
|
180 | #endif |
|
|
181 | |
|
|
182 | /* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/compiler_ref/compiler_builtins.html */ |
|
|
183 | #if __xlC__ && ECB_CPP |
|
|
184 | #include <builtins.h> |
|
|
185 | #endif |
|
|
186 | |
|
|
187 | #if 1400 <= _MSC_VER |
|
|
188 | #include <intrin.h> /* fence functions _ReadBarrier, also bit search functions _BitScanReverse */ |
|
|
189 | #endif |
|
|
190 | |
|
|
191 | #ifndef ECB_MEMORY_FENCE |
|
|
192 | #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 |
|
|
193 | #define ECB_MEMORY_FENCE_RELAXED __asm__ __volatile__ ("" : : : "memory") |
|
|
194 | #if __i386 || __i386__ |
|
|
195 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") |
|
|
196 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") |
|
|
197 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory") |
|
|
198 | #elif ECB_GCC_AMD64 |
|
|
199 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") |
|
|
200 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") |
|
|
201 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory") |
|
|
202 | #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ |
|
|
203 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") |
|
|
204 | #elif defined __ARM_ARCH_2__ \ |
|
|
205 | || defined __ARM_ARCH_3__ || defined __ARM_ARCH_3M__ \ |
|
|
206 | || defined __ARM_ARCH_4__ || defined __ARM_ARCH_4T__ \ |
|
|
207 | || defined __ARM_ARCH_5__ || defined __ARM_ARCH_5E__ \ |
|
|
208 | || defined __ARM_ARCH_5T__ || defined __ARM_ARCH_5TE__ \ |
|
|
209 | || defined __ARM_ARCH_5TEJ__ |
|
|
210 | /* should not need any, unless running old code on newer cpu - arm doesn't support that */ |
|
|
211 | #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \ |
|
|
212 | || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ \ |
|
|
213 | || defined __ARM_ARCH_6T2__ |
|
|
214 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") |
|
|
215 | #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ |
|
|
216 | || defined __ARM_ARCH_7R__ || defined __ARM_ARCH_7M__ |
|
|
217 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") |
|
|
218 | #elif __aarch64__ |
|
|
219 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory") |
|
|
220 | #elif (__sparc || __sparc__) && !(__sparc_v8__ || defined __sparcv8) |
|
|
221 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory") |
|
|
222 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory") |
|
|
223 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore") |
|
|
224 | #elif defined __s390__ || defined __s390x__ |
|
|
225 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory") |
|
|
226 | #elif defined __mips__ |
|
|
227 | /* GNU/Linux emulates sync on mips1 architectures, so we force its use */ |
|
|
228 | /* anybody else who still uses mips1 is supposed to send in their version, with detection code. */ |
|
|
229 | #define ECB_MEMORY_FENCE __asm__ __volatile__ (".set mips2; sync; .set mips0" : : : "memory") |
|
|
230 | #elif defined __alpha__ |
|
|
231 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory") |
|
|
232 | #elif defined __hppa__ |
|
|
233 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory") |
|
|
234 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") |
|
|
235 | #elif defined __ia64__ |
|
|
236 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory") |
|
|
237 | #elif defined __m68k__ |
|
|
238 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory") |
|
|
239 | #elif defined __m88k__ |
|
|
240 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("tb1 0,%%r0,128" : : : "memory") |
|
|
241 | #elif defined __sh__ |
|
|
242 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory") |
|
|
243 | #endif |
|
|
244 | #endif |
|
|
245 | #endif |
|
|
246 | |
|
|
247 | #ifndef ECB_MEMORY_FENCE |
|
|
248 | #if ECB_GCC_VERSION(4,7) |
|
|
249 | /* see comment below (stdatomic.h) about the C11 memory model. */ |
|
|
250 | #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST) |
|
|
251 | #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE) |
|
|
252 | #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE) |
|
|
253 | #undef ECB_MEMORY_FENCE_RELAXED |
|
|
254 | #define ECB_MEMORY_FENCE_RELAXED __atomic_thread_fence (__ATOMIC_RELAXED) |
|
|
255 | |
|
|
256 | #elif ECB_CLANG_EXTENSION(c_atomic) |
|
|
257 | /* see comment below (stdatomic.h) about the C11 memory model. */ |
|
|
258 | #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST) |
|
|
259 | #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE) |
|
|
260 | #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE) |
|
|
261 | #undef ECB_MEMORY_FENCE_RELAXED |
|
|
262 | #define ECB_MEMORY_FENCE_RELAXED __c11_atomic_thread_fence (__ATOMIC_RELAXED) |
|
|
263 | |
|
|
264 | #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ |
|
|
265 | #define ECB_MEMORY_FENCE __sync_synchronize () |
|
|
266 | #elif _MSC_VER >= 1500 /* VC++ 2008 */ |
|
|
267 | /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */ |
|
|
268 | #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) |
|
|
269 | #define ECB_MEMORY_FENCE _ReadWriteBarrier (); MemoryBarrier() |
|
|
270 | #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier (); MemoryBarrier() /* according to msdn, _ReadBarrier is not a load fence */ |
|
|
271 | #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier (); MemoryBarrier() |
|
|
272 | #elif _MSC_VER >= 1400 /* VC++ 2005 */ |
|
|
273 | #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) |
|
|
274 | #define ECB_MEMORY_FENCE _ReadWriteBarrier () |
|
|
275 | #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */ |
|
|
276 | #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier () |
|
|
277 | #elif defined _WIN32 |
|
|
278 | #include <WinNT.h> |
|
|
279 | #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */ |
|
|
280 | #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 |
|
|
281 | #include <mbarrier.h> |
|
|
282 | #define ECB_MEMORY_FENCE __machine_rw_barrier () |
|
|
283 | #define ECB_MEMORY_FENCE_ACQUIRE __machine_acq_barrier () |
|
|
284 | #define ECB_MEMORY_FENCE_RELEASE __machine_rel_barrier () |
|
|
285 | #define ECB_MEMORY_FENCE_RELAXED __compiler_barrier () |
|
|
286 | #elif __xlC__ |
|
|
287 | #define ECB_MEMORY_FENCE __sync () |
|
|
288 | #endif |
|
|
289 | #endif |
|
|
290 | |
|
|
291 | #ifndef ECB_MEMORY_FENCE |
|
|
292 | #if ECB_C11 && !defined __STDC_NO_ATOMICS__ |
|
|
293 | /* we assume that these memory fences work on all variables/all memory accesses, */ |
|
|
294 | /* not just C11 atomics and atomic accesses */ |
|
|
295 | #include <stdatomic.h> |
|
|
296 | #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst) |
|
|
297 | #define ECB_MEMORY_FENCE_ACQUIRE atomic_thread_fence (memory_order_acquire) |
|
|
298 | #define ECB_MEMORY_FENCE_RELEASE atomic_thread_fence (memory_order_release) |
|
|
299 | #endif |
|
|
300 | #endif |
|
|
301 | |
|
|
302 | #ifndef ECB_MEMORY_FENCE |
|
|
303 | #if !ECB_AVOID_PTHREADS |
|
|
304 | /* |
|
|
305 | * if you get undefined symbol references to pthread_mutex_lock, |
|
|
306 | * or failure to find pthread.h, then you should implement |
|
|
307 | * the ECB_MEMORY_FENCE operations for your cpu/compiler |
|
|
308 | * OR provide pthread.h and link against the posix thread library |
|
|
309 | * of your system. |
|
|
310 | */ |
|
|
311 | #include <pthread.h> |
|
|
312 | #define ECB_NEEDS_PTHREADS 1 |
|
|
313 | #define ECB_MEMORY_FENCE_NEEDS_PTHREADS 1 |
|
|
314 | |
|
|
315 | static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER; |
|
|
316 | #define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0) |
|
|
317 | #endif |
|
|
318 | #endif |
|
|
319 | |
|
|
320 | #if !defined ECB_MEMORY_FENCE_ACQUIRE && defined ECB_MEMORY_FENCE |
|
|
321 | #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE |
|
|
322 | #endif |
|
|
323 | |
|
|
324 | #if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE |
|
|
325 | #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE |
|
|
326 | #endif |
|
|
327 | |
|
|
328 | #if !defined ECB_MEMORY_FENCE_RELAXED && defined ECB_MEMORY_FENCE |
|
|
329 | #define ECB_MEMORY_FENCE_RELAXED ECB_MEMORY_FENCE /* very heavy-handed */ |
|
|
330 | #endif |
|
|
331 | |
|
|
332 | /*****************************************************************************/ |
|
|
333 | |
|
|
334 | #if ECB_CPP |
|
|
335 | #define ecb_inline static inline |
|
|
336 | #elif ECB_GCC_VERSION(2,5) |
|
|
337 | #define ecb_inline static __inline__ |
|
|
338 | #elif ECB_C99 |
|
|
339 | #define ecb_inline static inline |
|
|
340 | #else |
|
|
341 | #define ecb_inline static |
|
|
342 | #endif |
|
|
343 | |
36 | #if ECB_GCC_VERSION(3,1) |
344 | #if ECB_GCC_VERSION(3,3) |
|
|
345 | #define ecb_restrict __restrict__ |
|
|
346 | #elif ECB_C99 |
|
|
347 | #define ecb_restrict restrict |
|
|
348 | #else |
|
|
349 | #define ecb_restrict |
|
|
350 | #endif |
|
|
351 | |
|
|
352 | typedef int ecb_bool; |
|
|
353 | |
|
|
354 | #define ECB_CONCAT_(a, b) a ## b |
|
|
355 | #define ECB_CONCAT(a, b) ECB_CONCAT_(a, b) |
|
|
356 | #define ECB_STRINGIFY_(a) # a |
|
|
357 | #define ECB_STRINGIFY(a) ECB_STRINGIFY_(a) |
|
|
358 | #define ECB_STRINGIFY_EXPR(expr) ((expr), ECB_STRINGIFY_ (expr)) |
|
|
359 | |
|
|
360 | #define ecb_function_ ecb_inline |
|
|
361 | |
|
|
362 | #if ECB_GCC_VERSION(3,1) || ECB_CLANG_VERSION(2,8) |
37 | # define ecb_attribute(attrlist) __attribute__(attrlist) |
363 | #define ecb_attribute(attrlist) __attribute__ (attrlist) |
|
|
364 | #else |
|
|
365 | #define ecb_attribute(attrlist) |
|
|
366 | #endif |
|
|
367 | |
|
|
368 | #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_constant_p) |
38 | # define ecb_is_constant(c) __builtin_constant_p (c) |
369 | #define ecb_is_constant(expr) __builtin_constant_p (expr) |
|
|
370 | #else |
|
|
371 | /* possible C11 impl for integral types |
|
|
372 | typedef struct ecb_is_constant_struct ecb_is_constant_struct; |
|
|
373 | #define ecb_is_constant(expr) _Generic ((1 ? (struct ecb_is_constant_struct *)0 : (void *)((expr) - (expr)), ecb_is_constant_struct *: 0, default: 1)) */ |
|
|
374 | |
|
|
375 | #define ecb_is_constant(expr) 0 |
|
|
376 | #endif |
|
|
377 | |
|
|
378 | #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_expect) |
39 | # define ecb_expect(expr,value) __builtin_expect ((expr),(value)) |
379 | #define ecb_expect(expr,value) __builtin_expect ((expr),(value)) |
|
|
380 | #else |
|
|
381 | #define ecb_expect(expr,value) (expr) |
|
|
382 | #endif |
|
|
383 | |
|
|
384 | #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_prefetch) |
40 | # define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality) |
385 | #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality) |
41 | #else |
386 | #else |
42 | # define ecb_attribute(attrlist) |
|
|
43 | # define ecb_is_constant(c) 0 |
|
|
44 | # define ecb_expect(expr,value) (expr) |
|
|
45 | # define ecb_prefetch(addr,rw,locality) |
387 | #define ecb_prefetch(addr,rw,locality) |
46 | #endif |
388 | #endif |
47 | |
389 | |
48 | /* no emulation for ecb_decltype */ |
390 | /* no emulation for ecb_decltype */ |
|
|
391 | #if ECB_CPP11 |
|
|
392 | // older implementations might have problems with decltype(x)::type, work around it |
|
|
393 | template<class T> struct ecb_decltype_t { typedef T type; }; |
|
|
394 | #define ecb_decltype(x) ecb_decltype_t<decltype (x)>::type |
|
|
395 | #elif ECB_GCC_VERSION(3,0) || ECB_CLANG_VERSION(2,8) |
|
|
396 | #define ecb_decltype(x) __typeof__ (x) |
|
|
397 | #endif |
|
|
398 | |
|
|
399 | #if _MSC_VER >= 1300 |
|
|
400 | #define ecb_deprecated __declspec (deprecated) |
|
|
401 | #else |
|
|
402 | #define ecb_deprecated ecb_attribute ((__deprecated__)) |
|
|
403 | #endif |
|
|
404 | |
|
|
405 | #if _MSC_VER >= 1500 |
|
|
406 | #define ecb_deprecated_message(msg) __declspec (deprecated (msg)) |
49 | #if ECB_GCC_VERSION(4,5) |
407 | #elif ECB_GCC_VERSION(4,5) |
50 | # define ecb_decltype(x) __decltype(x) |
408 | #define ecb_deprecated_message(msg) ecb_attribute ((__deprecated__ (msg)) |
51 | #elif ECB_GCC_VERSION(3,0) |
409 | #else |
52 | # define ecb_decltype(x) typeof(x) |
410 | #define ecb_deprecated_message(msg) ecb_deprecated |
53 | #endif |
411 | #endif |
54 | |
412 | |
|
|
413 | #if _MSC_VER >= 1400 |
|
|
414 | #define ecb_noinline __declspec (noinline) |
|
|
415 | #else |
55 | #define ecb_noinline ecb_attribute ((noinline)) |
416 | #define ecb_noinline ecb_attribute ((__noinline__)) |
56 | #define ecb_noreturn ecb_attribute ((noreturn)) |
417 | #endif |
|
|
418 | |
57 | #define ecb_unused ecb_attribute ((unused)) |
419 | #define ecb_unused ecb_attribute ((__unused__)) |
58 | #define ecb_const ecb_attribute ((const)) |
420 | #define ecb_const ecb_attribute ((__const__)) |
59 | #define ecb_pure ecb_attribute ((pure)) |
421 | #define ecb_pure ecb_attribute ((__pure__)) |
60 | #define ecb_hot ecb_attribute ((hot)) // 4.3 |
|
|
61 | #define ecb_cold ecb_attribute ((cold)) // 4.3 |
|
|
62 | |
422 | |
|
|
423 | #if ECB_C11 || __IBMC_NORETURN |
|
|
424 | /* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/language_ref/noreturn.html */ |
|
|
425 | #define ecb_noreturn _Noreturn |
|
|
426 | #elif ECB_CPP11 |
|
|
427 | #define ecb_noreturn [[noreturn]] |
|
|
428 | #elif _MSC_VER >= 1200 |
|
|
429 | /* http://msdn.microsoft.com/en-us/library/k6ktzx3s.aspx */ |
|
|
430 | #define ecb_noreturn __declspec (noreturn) |
|
|
431 | #else |
|
|
432 | #define ecb_noreturn ecb_attribute ((__noreturn__)) |
|
|
433 | #endif |
|
|
434 | |
63 | #if ECB_GCC_VERSION(4,5) |
435 | #if ECB_GCC_VERSION(4,3) |
|
|
436 | #define ecb_artificial ecb_attribute ((__artificial__)) |
|
|
437 | #define ecb_hot ecb_attribute ((__hot__)) |
|
|
438 | #define ecb_cold ecb_attribute ((__cold__)) |
|
|
439 | #else |
|
|
440 | #define ecb_artificial |
|
|
441 | #define ecb_hot |
|
|
442 | #define ecb_cold |
|
|
443 | #endif |
|
|
444 | |
|
|
445 | /* put around conditional expressions if you are very sure that the */ |
|
|
446 | /* expression is mostly true or mostly false. note that these return */ |
|
|
447 | /* booleans, not the expression. */ |
|
|
448 | #define ecb_expect_false(expr) ecb_expect (!!(expr), 0) |
|
|
449 | #define ecb_expect_true(expr) ecb_expect (!!(expr), 1) |
|
|
450 | /* for compatibility to the rest of the world */ |
|
|
451 | #define ecb_likely(expr) ecb_expect_true (expr) |
|
|
452 | #define ecb_unlikely(expr) ecb_expect_false (expr) |
|
|
453 | |
|
|
454 | /* count trailing zero bits and count # of one bits */ |
|
|
455 | #if ECB_GCC_VERSION(3,4) \ |
|
|
456 | || (ECB_CLANG_BUILTIN(__builtin_clz) && ECB_CLANG_BUILTIN(__builtin_clzll) \ |
|
|
457 | && ECB_CLANG_BUILTIN(__builtin_ctz) && ECB_CLANG_BUILTIN(__builtin_ctzll) \ |
|
|
458 | && ECB_CLANG_BUILTIN(__builtin_popcount)) |
|
|
459 | #define ecb_ctz32(x) __builtin_ctz (x) |
|
|
460 | #define ecb_ctz64(x) (__SIZEOF_LONG__ == 64 ? __builtin_ctzl (x) : __builtin_ctzll (x)) |
|
|
461 | #define ecb_clz32(x) __builtin_clz (x) |
|
|
462 | #define ecb_clz64(x) (__SIZEOF_LONG__ == 64 ? __builtin_clzl (x) : __builtin_clzll (x)) |
|
|
463 | #define ecb_ld32(x) (ecb_clz32 (x) ^ 31) |
|
|
464 | #define ecb_ld64(x) (ecb_clz64 (x) ^ 63) |
|
|
465 | #define ecb_popcount32(x) __builtin_popcount (x) |
|
|
466 | /* ecb_popcount64 is more difficult, see below */ |
|
|
467 | #else |
|
|
468 | ecb_function_ ecb_const int ecb_ctz32 (uint32_t x); |
|
|
469 | ecb_function_ ecb_const int |
|
|
470 | ecb_ctz32 (uint32_t x) |
|
|
471 | { |
|
|
472 | #if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM) |
|
|
473 | unsigned long r; |
|
|
474 | _BitScanForward (&r, x); |
|
|
475 | return (int)r; |
|
|
476 | #else |
|
|
477 | int r = 0; |
|
|
478 | |
|
|
479 | /* todo: use david seal's algorithm */ |
|
|
480 | |
|
|
481 | x &= ~x + 1; /* this isolates the lowest bit */ |
|
|
482 | |
|
|
483 | #if ECB_branchless_on_i386 |
|
|
484 | r += !!(x & 0xaaaaaaaa) << 0; |
|
|
485 | r += !!(x & 0xcccccccc) << 1; |
|
|
486 | r += !!(x & 0xf0f0f0f0) << 2; |
|
|
487 | r += !!(x & 0xff00ff00) << 3; |
|
|
488 | r += !!(x & 0xffff0000) << 4; |
|
|
489 | #else |
|
|
490 | if (x & 0xaaaaaaaa) r += 1; |
|
|
491 | if (x & 0xcccccccc) r += 2; |
|
|
492 | if (x & 0xf0f0f0f0) r += 4; |
|
|
493 | if (x & 0xff00ff00) r += 8; |
|
|
494 | if (x & 0xffff0000) r += 16; |
|
|
495 | #endif |
|
|
496 | |
|
|
497 | return r; |
|
|
498 | #endif |
|
|
499 | } |
|
|
500 | |
|
|
501 | ecb_function_ ecb_const int ecb_ctz64 (uint64_t x); |
|
|
502 | ecb_function_ ecb_const int |
|
|
503 | ecb_ctz64 (uint64_t x) |
|
|
504 | { |
|
|
505 | #if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM) |
|
|
506 | unsigned long r; |
|
|
507 | _BitScanForward64 (&r, x); |
|
|
508 | return (int)r; |
|
|
509 | #else |
|
|
510 | int shift = x & 0xffffffff ? 0 : 32; |
|
|
511 | return ecb_ctz32 (x >> shift) + shift; |
|
|
512 | #endif |
|
|
513 | } |
|
|
514 | |
|
|
515 | ecb_function_ ecb_const int ecb_popcount32 (uint32_t x); |
|
|
516 | ecb_function_ ecb_const int |
|
|
517 | ecb_popcount32 (uint32_t x) |
|
|
518 | { |
|
|
519 | x -= (x >> 1) & 0x55555555; |
|
|
520 | x = ((x >> 2) & 0x33333333) + (x & 0x33333333); |
|
|
521 | x = ((x >> 4) + x) & 0x0f0f0f0f; |
|
|
522 | x *= 0x01010101; |
|
|
523 | |
|
|
524 | return x >> 24; |
|
|
525 | } |
|
|
526 | |
|
|
527 | ecb_function_ ecb_const int ecb_ld32 (uint32_t x); |
|
|
528 | ecb_function_ ecb_const int ecb_ld32 (uint32_t x) |
|
|
529 | { |
|
|
530 | #if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM) |
|
|
531 | unsigned long r; |
|
|
532 | _BitScanReverse (&r, x); |
|
|
533 | return (int)r; |
|
|
534 | #else |
|
|
535 | int r = 0; |
|
|
536 | |
|
|
537 | if (x >> 16) { x >>= 16; r += 16; } |
|
|
538 | if (x >> 8) { x >>= 8; r += 8; } |
|
|
539 | if (x >> 4) { x >>= 4; r += 4; } |
|
|
540 | if (x >> 2) { x >>= 2; r += 2; } |
|
|
541 | if (x >> 1) { r += 1; } |
|
|
542 | |
|
|
543 | return r; |
|
|
544 | #endif |
|
|
545 | } |
|
|
546 | |
|
|
547 | ecb_function_ ecb_const int ecb_ld64 (uint64_t x); |
|
|
548 | ecb_function_ ecb_const int ecb_ld64 (uint64_t x) |
|
|
549 | { |
|
|
550 | #if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM) |
|
|
551 | unsigned long r; |
|
|
552 | _BitScanReverse64 (&r, x); |
|
|
553 | return (int)r; |
|
|
554 | #else |
|
|
555 | int r = 0; |
|
|
556 | |
|
|
557 | if (x >> 32) { x >>= 32; r += 32; } |
|
|
558 | |
|
|
559 | return r + ecb_ld32 (x); |
|
|
560 | #endif |
|
|
561 | } |
|
|
562 | #endif |
|
|
563 | |
|
|
564 | ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x); |
|
|
565 | ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); } |
|
|
566 | ecb_function_ ecb_const ecb_bool ecb_is_pot64 (uint64_t x); |
|
|
567 | ecb_function_ ecb_const ecb_bool ecb_is_pot64 (uint64_t x) { return !(x & (x - 1)); } |
|
|
568 | |
|
|
569 | ecb_function_ ecb_const uint8_t ecb_bitrev8 (uint8_t x); |
|
|
570 | ecb_function_ ecb_const uint8_t ecb_bitrev8 (uint8_t x) |
|
|
571 | { |
|
|
572 | return ( (x * 0x0802U & 0x22110U) |
|
|
573 | | (x * 0x8020U & 0x88440U)) * 0x10101U >> 16; |
|
|
574 | } |
|
|
575 | |
|
|
576 | ecb_function_ ecb_const uint16_t ecb_bitrev16 (uint16_t x); |
|
|
577 | ecb_function_ ecb_const uint16_t ecb_bitrev16 (uint16_t x) |
|
|
578 | { |
|
|
579 | x = ((x >> 1) & 0x5555) | ((x & 0x5555) << 1); |
|
|
580 | x = ((x >> 2) & 0x3333) | ((x & 0x3333) << 2); |
|
|
581 | x = ((x >> 4) & 0x0f0f) | ((x & 0x0f0f) << 4); |
|
|
582 | x = ( x >> 8 ) | ( x << 8); |
|
|
583 | |
|
|
584 | return x; |
|
|
585 | } |
|
|
586 | |
|
|
587 | ecb_function_ ecb_const uint32_t ecb_bitrev32 (uint32_t x); |
|
|
588 | ecb_function_ ecb_const uint32_t ecb_bitrev32 (uint32_t x) |
|
|
589 | { |
|
|
590 | x = ((x >> 1) & 0x55555555) | ((x & 0x55555555) << 1); |
|
|
591 | x = ((x >> 2) & 0x33333333) | ((x & 0x33333333) << 2); |
|
|
592 | x = ((x >> 4) & 0x0f0f0f0f) | ((x & 0x0f0f0f0f) << 4); |
|
|
593 | x = ((x >> 8) & 0x00ff00ff) | ((x & 0x00ff00ff) << 8); |
|
|
594 | x = ( x >> 16 ) | ( x << 16); |
|
|
595 | |
|
|
596 | return x; |
|
|
597 | } |
|
|
598 | |
|
|
599 | ecb_function_ ecb_const int ecb_popcount64 (uint64_t x); |
|
|
600 | ecb_function_ ecb_const int |
|
|
601 | ecb_popcount64 (uint64_t x) |
|
|
602 | { |
|
|
603 | /* popcount64 is only available on 64 bit cpus as gcc builtin. */ |
|
|
604 | /* also, gcc/clang make this surprisingly difficult to use */ |
|
|
605 | #if (__SIZEOF_LONG__ == 8) && (ECB_GCC_VERSION(3,4) || ECB_CLANG_BUILTIN (__builtin_popcountl)) |
|
|
606 | return __builtin_popcountl (x); |
|
|
607 | #else |
|
|
608 | return ecb_popcount32 (x) + ecb_popcount32 (x >> 32); |
|
|
609 | #endif |
|
|
610 | } |
|
|
611 | |
|
|
612 | ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count); |
|
|
613 | ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count); |
|
|
614 | ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count); |
|
|
615 | ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count); |
|
|
616 | ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count); |
|
|
617 | ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count); |
|
|
618 | ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count); |
|
|
619 | ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count); |
|
|
620 | |
|
|
621 | ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> (-count & 7)) | (x << (count & 7)); } |
|
|
622 | ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << (-count & 7)) | (x >> (count & 7)); } |
|
|
623 | ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (-count & 15)) | (x << (count & 15)); } |
|
|
624 | ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (-count & 15)) | (x >> (count & 15)); } |
|
|
625 | ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (-count & 31)) | (x << (count & 31)); } |
|
|
626 | ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (-count & 31)) | (x >> (count & 31)); } |
|
|
627 | ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (-count & 63)) | (x << (count & 63)); } |
|
|
628 | ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (-count & 63)) | (x >> (count & 63)); } |
|
|
629 | |
|
|
630 | #if ECB_CPP |
|
|
631 | |
|
|
632 | inline uint8_t ecb_ctz (uint8_t v) { return ecb_ctz32 (v); } |
|
|
633 | inline uint16_t ecb_ctz (uint16_t v) { return ecb_ctz32 (v); } |
|
|
634 | inline uint32_t ecb_ctz (uint32_t v) { return ecb_ctz32 (v); } |
|
|
635 | inline uint64_t ecb_ctz (uint64_t v) { return ecb_ctz64 (v); } |
|
|
636 | |
|
|
637 | inline bool ecb_is_pot (uint8_t v) { return ecb_is_pot32 (v); } |
|
|
638 | inline bool ecb_is_pot (uint16_t v) { return ecb_is_pot32 (v); } |
|
|
639 | inline bool ecb_is_pot (uint32_t v) { return ecb_is_pot32 (v); } |
|
|
640 | inline bool ecb_is_pot (uint64_t v) { return ecb_is_pot64 (v); } |
|
|
641 | |
|
|
642 | inline int ecb_ld (uint8_t v) { return ecb_ld32 (v); } |
|
|
643 | inline int ecb_ld (uint16_t v) { return ecb_ld32 (v); } |
|
|
644 | inline int ecb_ld (uint32_t v) { return ecb_ld32 (v); } |
|
|
645 | inline int ecb_ld (uint64_t v) { return ecb_ld64 (v); } |
|
|
646 | |
|
|
647 | inline int ecb_popcount (uint8_t v) { return ecb_popcount32 (v); } |
|
|
648 | inline int ecb_popcount (uint16_t v) { return ecb_popcount32 (v); } |
|
|
649 | inline int ecb_popcount (uint32_t v) { return ecb_popcount32 (v); } |
|
|
650 | inline int ecb_popcount (uint64_t v) { return ecb_popcount64 (v); } |
|
|
651 | |
|
|
652 | inline uint8_t ecb_bitrev (uint8_t v) { return ecb_bitrev8 (v); } |
|
|
653 | inline uint16_t ecb_bitrev (uint16_t v) { return ecb_bitrev16 (v); } |
|
|
654 | inline uint32_t ecb_bitrev (uint32_t v) { return ecb_bitrev32 (v); } |
|
|
655 | |
|
|
656 | inline uint8_t ecb_rotl (uint8_t v, unsigned int count) { return ecb_rotl8 (v, count); } |
|
|
657 | inline uint16_t ecb_rotl (uint16_t v, unsigned int count) { return ecb_rotl16 (v, count); } |
|
|
658 | inline uint32_t ecb_rotl (uint32_t v, unsigned int count) { return ecb_rotl32 (v, count); } |
|
|
659 | inline uint64_t ecb_rotl (uint64_t v, unsigned int count) { return ecb_rotl64 (v, count); } |
|
|
660 | |
|
|
661 | inline uint8_t ecb_rotr (uint8_t v, unsigned int count) { return ecb_rotr8 (v, count); } |
|
|
662 | inline uint16_t ecb_rotr (uint16_t v, unsigned int count) { return ecb_rotr16 (v, count); } |
|
|
663 | inline uint32_t ecb_rotr (uint32_t v, unsigned int count) { return ecb_rotr32 (v, count); } |
|
|
664 | inline uint64_t ecb_rotr (uint64_t v, unsigned int count) { return ecb_rotr64 (v, count); } |
|
|
665 | |
|
|
666 | #endif |
|
|
667 | |
|
|
668 | #if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64)) |
|
|
669 | #if ECB_GCC_VERSION(4,8) || ECB_CLANG_BUILTIN(__builtin_bswap16) |
|
|
670 | #define ecb_bswap16(x) __builtin_bswap16 (x) |
|
|
671 | #else |
|
|
672 | #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16) |
|
|
673 | #endif |
|
|
674 | #define ecb_bswap32(x) __builtin_bswap32 (x) |
|
|
675 | #define ecb_bswap64(x) __builtin_bswap64 (x) |
|
|
676 | #elif _MSC_VER |
|
|
677 | #include <stdlib.h> |
|
|
678 | #define ecb_bswap16(x) ((uint16_t)_byteswap_ushort ((uint16_t)(x))) |
|
|
679 | #define ecb_bswap32(x) ((uint32_t)_byteswap_ulong ((uint32_t)(x))) |
|
|
680 | #define ecb_bswap64(x) ((uint64_t)_byteswap_uint64 ((uint64_t)(x))) |
|
|
681 | #else |
|
|
682 | ecb_function_ ecb_const uint16_t ecb_bswap16 (uint16_t x); |
|
|
683 | ecb_function_ ecb_const uint16_t |
|
|
684 | ecb_bswap16 (uint16_t x) |
|
|
685 | { |
|
|
686 | return ecb_rotl16 (x, 8); |
|
|
687 | } |
|
|
688 | |
|
|
689 | ecb_function_ ecb_const uint32_t ecb_bswap32 (uint32_t x); |
|
|
690 | ecb_function_ ecb_const uint32_t |
|
|
691 | ecb_bswap32 (uint32_t x) |
|
|
692 | { |
|
|
693 | return (((uint32_t)ecb_bswap16 (x)) << 16) | ecb_bswap16 (x >> 16); |
|
|
694 | } |
|
|
695 | |
|
|
696 | ecb_function_ ecb_const uint64_t ecb_bswap64 (uint64_t x); |
|
|
697 | ecb_function_ ecb_const uint64_t |
|
|
698 | ecb_bswap64 (uint64_t x) |
|
|
699 | { |
|
|
700 | return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32); |
|
|
701 | } |
|
|
702 | #endif |
|
|
703 | |
|
|
704 | #if ECB_GCC_VERSION(4,5) || ECB_CLANG_BUILTIN(__builtin_unreachable) |
64 | # define ecb_unreachable() __builtin_unreachable () |
705 | #define ecb_unreachable() __builtin_unreachable () |
65 | #else |
706 | #else |
66 | /* this seems to work fine, but gcc always emits a warning for it :/ */ |
707 | /* this seems to work fine, but gcc always emits a warning for it :/ */ |
67 | ECB_HEADER_INLINE void ecb_unreachable () ecb_attribute ((noreturn)); |
708 | ecb_inline ecb_noreturn void ecb_unreachable (void); |
68 | ECB_HEADER_INLINE void ecb_unreachable () { } |
709 | ecb_inline ecb_noreturn void ecb_unreachable (void) { } |
69 | #endif |
710 | #endif |
70 | |
|
|
71 | /* put into ifs if you are very sure that the expression */ |
|
|
72 | /* is mostly true or mosty false. note that these return */ |
|
|
73 | /* booleans, not the expression. */ |
|
|
74 | #define ecb_expect_false(expr) ecb_expect ((expr) ? 1 : 0, 0) |
|
|
75 | #define ecb_expect_true(expr) ecb_expect ((expr) ? 1 : 0, 1) |
|
|
76 | |
711 | |
77 | /* try to tell the compiler that some condition is definitely true */ |
712 | /* try to tell the compiler that some condition is definitely true */ |
78 | #define ecb_assume(cond) do { if (!(cond)) unreachable (); } while (0) |
713 | #define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0 |
79 | |
714 | |
80 | /* count trailing zero bits and count # of one bits */ |
715 | ecb_inline ecb_const uint32_t ecb_byteorder_helper (void); |
81 | #if ECB_GCC_VERSION(3,4) |
716 | ecb_inline ecb_const uint32_t |
82 | ECB_HEADER_INLINE int ecb_ctz (unsigned int x) { return __builtin_ctz (x); } |
717 | ecb_byteorder_helper (void) |
83 | ECB_HEADER_INLINE int ecb_popcount (unsigned int x) { return __builtin_popcount (x); } |
718 | { |
|
|
719 | /* the union code still generates code under pressure in gcc, */ |
|
|
720 | /* but less than using pointers, and always seems to */ |
|
|
721 | /* successfully return a constant. */ |
|
|
722 | /* the reason why we have this horrible preprocessor mess */ |
|
|
723 | /* is to avoid it in all cases, at least on common architectures */ |
|
|
724 | /* or when using a recent enough gcc version (>= 4.6) */ |
|
|
725 | #if (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \ |
|
|
726 | || ((__i386 || __i386__ || _M_IX86 || ECB_GCC_AMD64 || ECB_MSVC_AMD64) && !__VOS__) |
|
|
727 | #define ECB_LITTLE_ENDIAN 1 |
|
|
728 | return 0x44332211; |
|
|
729 | #elif (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) \ |
|
|
730 | || ((__AARCH64EB__ || __MIPSEB__ || __ARMEB__) && !__VOS__) |
|
|
731 | #define ECB_BIG_ENDIAN 1 |
|
|
732 | return 0x11223344; |
|
|
733 | #else |
|
|
734 | union |
|
|
735 | { |
|
|
736 | uint8_t c[4]; |
|
|
737 | uint32_t u; |
|
|
738 | } u = { 0x11, 0x22, 0x33, 0x44 }; |
|
|
739 | return u.u; |
|
|
740 | #endif |
|
|
741 | } |
|
|
742 | |
|
|
743 | ecb_inline ecb_const ecb_bool ecb_big_endian (void); |
|
|
744 | ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11223344; } |
|
|
745 | ecb_inline ecb_const ecb_bool ecb_little_endian (void); |
|
|
746 | ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44332211; } |
|
|
747 | |
|
|
748 | /*****************************************************************************/ |
|
|
749 | /* unaligned load/store */ |
|
|
750 | |
|
|
751 | ecb_inline uint_fast16_t ecb_be_u16_to_host (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; } |
|
|
752 | ecb_inline uint_fast32_t ecb_be_u32_to_host (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; } |
|
|
753 | ecb_inline uint_fast64_t ecb_be_u64_to_host (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; } |
|
|
754 | |
|
|
755 | ecb_inline uint_fast16_t ecb_le_u16_to_host (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; } |
|
|
756 | ecb_inline uint_fast32_t ecb_le_u32_to_host (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; } |
|
|
757 | ecb_inline uint_fast64_t ecb_le_u64_to_host (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; } |
|
|
758 | |
|
|
759 | ecb_inline uint_fast16_t ecb_peek_u16_u (const void *ptr) { uint16_t v; memcpy (&v, ptr, sizeof (v)); return v; } |
|
|
760 | ecb_inline uint_fast32_t ecb_peek_u32_u (const void *ptr) { uint32_t v; memcpy (&v, ptr, sizeof (v)); return v; } |
|
|
761 | ecb_inline uint_fast64_t ecb_peek_u64_u (const void *ptr) { uint64_t v; memcpy (&v, ptr, sizeof (v)); return v; } |
|
|
762 | |
|
|
763 | ecb_inline uint_fast16_t ecb_peek_be_u16_u (const void *ptr) { return ecb_be_u16_to_host (ecb_peek_u16_u (ptr)); } |
|
|
764 | ecb_inline uint_fast32_t ecb_peek_be_u32_u (const void *ptr) { return ecb_be_u32_to_host (ecb_peek_u32_u (ptr)); } |
|
|
765 | ecb_inline uint_fast64_t ecb_peek_be_u64_u (const void *ptr) { return ecb_be_u64_to_host (ecb_peek_u64_u (ptr)); } |
|
|
766 | |
|
|
767 | ecb_inline uint_fast16_t ecb_peek_le_u16_u (const void *ptr) { return ecb_le_u16_to_host (ecb_peek_u16_u (ptr)); } |
|
|
768 | ecb_inline uint_fast32_t ecb_peek_le_u32_u (const void *ptr) { return ecb_le_u32_to_host (ecb_peek_u32_u (ptr)); } |
|
|
769 | ecb_inline uint_fast64_t ecb_peek_le_u64_u (const void *ptr) { return ecb_le_u64_to_host (ecb_peek_u64_u (ptr)); } |
|
|
770 | |
|
|
771 | ecb_inline uint_fast16_t ecb_host_to_be_u16 (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; } |
|
|
772 | ecb_inline uint_fast32_t ecb_host_to_be_u32 (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; } |
|
|
773 | ecb_inline uint_fast64_t ecb_host_to_be_u64 (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; } |
|
|
774 | |
|
|
775 | ecb_inline uint_fast16_t ecb_host_to_le_u16 (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; } |
|
|
776 | ecb_inline uint_fast32_t ecb_host_to_le_u32 (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; } |
|
|
777 | ecb_inline uint_fast64_t ecb_host_to_le_u64 (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; } |
|
|
778 | |
|
|
779 | ecb_inline void ecb_poke_u16_u (void *ptr, uint16_t v) { memcpy (ptr, &v, sizeof (v)); } |
|
|
780 | ecb_inline void ecb_poke_u32_u (void *ptr, uint32_t v) { memcpy (ptr, &v, sizeof (v)); } |
|
|
781 | ecb_inline void ecb_poke_u64_u (void *ptr, uint64_t v) { memcpy (ptr, &v, sizeof (v)); } |
|
|
782 | |
|
|
783 | ecb_inline void ecb_poke_be_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_be_u16 (v)); } |
|
|
784 | ecb_inline void ecb_poke_be_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_be_u32 (v)); } |
|
|
785 | ecb_inline void ecb_poke_be_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_be_u64 (v)); } |
|
|
786 | |
|
|
787 | ecb_inline void ecb_poke_le_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_le_u16 (v)); } |
|
|
788 | ecb_inline void ecb_poke_le_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_le_u32 (v)); } |
|
|
789 | ecb_inline void ecb_poke_le_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_le_u64 (v)); } |
|
|
790 | |
|
|
791 | #if ECB_CPP |
|
|
792 | |
|
|
793 | inline uint8_t ecb_bswap (uint8_t v) { return v; } |
|
|
794 | inline uint16_t ecb_bswap (uint16_t v) { return ecb_bswap16 (v); } |
|
|
795 | inline uint32_t ecb_bswap (uint32_t v) { return ecb_bswap32 (v); } |
|
|
796 | inline uint64_t ecb_bswap (uint64_t v) { return ecb_bswap64 (v); } |
|
|
797 | |
|
|
798 | template<typename T> inline T ecb_be_to_host (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; } |
|
|
799 | template<typename T> inline T ecb_le_to_host (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; } |
|
|
800 | template<typename T> inline T ecb_peek (const void *ptr) { return *(const T *)ptr; } |
|
|
801 | template<typename T> inline T ecb_peek_be (const void *ptr) { return ecb_be_to_host (ecb_peek <T> (ptr)); } |
|
|
802 | template<typename T> inline T ecb_peek_le (const void *ptr) { return ecb_le_to_host (ecb_peek <T> (ptr)); } |
|
|
803 | template<typename T> inline T ecb_peek_u (const void *ptr) { T v; memcpy (&v, ptr, sizeof (v)); return v; } |
|
|
804 | template<typename T> inline T ecb_peek_be_u (const void *ptr) { return ecb_be_to_host (ecb_peek_u<T> (ptr)); } |
|
|
805 | template<typename T> inline T ecb_peek_le_u (const void *ptr) { return ecb_le_to_host (ecb_peek_u<T> (ptr)); } |
|
|
806 | |
|
|
807 | template<typename T> inline T ecb_host_to_be (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; } |
|
|
808 | template<typename T> inline T ecb_host_to_le (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; } |
|
|
809 | template<typename T> inline void ecb_poke (void *ptr, T v) { *(T *)ptr = v; } |
|
|
810 | template<typename T> inline void ecb_poke_be (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_be (v)); } |
|
|
811 | template<typename T> inline void ecb_poke_le (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_le (v)); } |
|
|
812 | template<typename T> inline void ecb_poke_u (void *ptr, T v) { memcpy (ptr, &v, sizeof (v)); } |
|
|
813 | template<typename T> inline void ecb_poke_be_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_be (v)); } |
|
|
814 | template<typename T> inline void ecb_poke_le_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_le (v)); } |
|
|
815 | |
|
|
816 | #endif |
|
|
817 | |
|
|
818 | /*****************************************************************************/ |
|
|
819 | /* pointer/integer hashing */ |
|
|
820 | |
|
|
821 | /* based on hash by Chris Wellons, https://nullprogram.com/blog/2018/07/31/ */ |
|
|
822 | ecb_function_ uint32_t ecb_mix32 (uint32_t v); |
|
|
823 | ecb_function_ uint32_t ecb_mix32 (uint32_t v) |
|
|
824 | { |
|
|
825 | v ^= v >> 16; v *= 0x7feb352dU; |
|
|
826 | v ^= v >> 15; v *= 0x846ca68bU; |
|
|
827 | v ^= v >> 16; |
|
|
828 | return v; |
|
|
829 | } |
|
|
830 | |
|
|
831 | ecb_function_ uint32_t ecb_unmix32 (uint32_t v); |
|
|
832 | ecb_function_ uint32_t ecb_unmix32 (uint32_t v) |
|
|
833 | { |
|
|
834 | v ^= v >> 16 ; v *= 0x43021123U; |
|
|
835 | v ^= v >> 15 ^ v >> 30; v *= 0x1d69e2a5U; |
|
|
836 | v ^= v >> 16 ; |
|
|
837 | return v; |
|
|
838 | } |
|
|
839 | |
|
|
840 | /* based on splitmix64, by Sebastiona Vigna, https://prng.di.unimi.it/splitmix64.c */ |
|
|
841 | ecb_function_ uint64_t ecb_mix64 (uint64_t v); |
|
|
842 | ecb_function_ uint64_t ecb_mix64 (uint64_t v) |
|
|
843 | { |
|
|
844 | v ^= v >> 30; v *= 0xbf58476d1ce4e5b9U; |
|
|
845 | v ^= v >> 27; v *= 0x94d049bb133111ebU; |
|
|
846 | v ^= v >> 31; |
|
|
847 | return v; |
|
|
848 | } |
|
|
849 | |
|
|
850 | ecb_function_ uint64_t ecb_unmix64 (uint64_t v); |
|
|
851 | ecb_function_ uint64_t ecb_unmix64 (uint64_t v) |
|
|
852 | { |
|
|
853 | v ^= v >> 31 ^ v >> 62; v *= 0x319642b2d24d8ec3U; |
|
|
854 | v ^= v >> 27 ^ v >> 54; v *= 0x96de1b173f119089U; |
|
|
855 | v ^= v >> 30 ^ v >> 60; |
|
|
856 | return v; |
|
|
857 | } |
|
|
858 | |
|
|
859 | ecb_function_ uintptr_t ecb_ptrmix (void *p); |
|
|
860 | ecb_function_ uintptr_t ecb_ptrmix (void *p) |
|
|
861 | { |
|
|
862 | #if ECB_PTRSIZE <= 4 |
|
|
863 | return ecb_mix32 ((uint32_t)p); |
84 | #else |
864 | #else |
85 | ECB_HEADER_INLINE |
865 | return ecb_mix64 ((uint64_t)p); |
86 | ecb_ctz (unsigned int x) ecb_const |
866 | #endif |
87 | { |
867 | } |
88 | int r = 0; |
|
|
89 | |
868 | |
90 | x &= -x; // this isolates the lowest bit |
869 | ecb_function_ void *ecb_ptrunmix (uintptr_t v); |
|
|
870 | ecb_function_ void *ecb_ptrunmix (uintptr_t v) |
|
|
871 | { |
|
|
872 | #if ECB_PTRSIZE <= 4 |
|
|
873 | return (void *)ecb_unmix32 (v); |
|
|
874 | #else |
|
|
875 | return (void *)ecb_unmix64 (v); |
|
|
876 | #endif |
|
|
877 | } |
91 | |
878 | |
92 | if (x & 0xaaaaaaaa) r += 1; |
879 | #if ECB_CPP |
93 | if (x & 0xcccccccc) r += 2; |
|
|
94 | if (x & 0xf0f0f0f0) r += 4; |
|
|
95 | if (x & 0xff00ff00) r += 8; |
|
|
96 | if (x & 0xffff0000) r += 16; |
|
|
97 | |
880 | |
|
|
881 | template<typename T> |
|
|
882 | inline uintptr_t ecb_ptrmix (T *p) |
|
|
883 | { |
|
|
884 | return ecb_ptrmix (static_cast<void *>(p)); |
|
|
885 | } |
|
|
886 | |
|
|
887 | template<typename T> |
|
|
888 | inline T *ecb_ptrunmix (uintptr_t v) |
|
|
889 | { |
|
|
890 | return static_cast<T *>(ecb_ptrunmix (v)); |
|
|
891 | } |
|
|
892 | |
|
|
893 | #endif |
|
|
894 | |
|
|
895 | /*****************************************************************************/ |
|
|
896 | /* gray code */ |
|
|
897 | |
|
|
898 | ecb_function_ uint_fast8_t ecb_gray8_encode (uint_fast8_t b) { return b ^ (b >> 1); } |
|
|
899 | ecb_function_ uint_fast16_t ecb_gray16_encode (uint_fast16_t b) { return b ^ (b >> 1); } |
|
|
900 | ecb_function_ uint_fast32_t ecb_gray32_encode (uint_fast32_t b) { return b ^ (b >> 1); } |
|
|
901 | ecb_function_ uint_fast64_t ecb_gray64_encode (uint_fast64_t b) { return b ^ (b >> 1); } |
|
|
902 | |
|
|
903 | ecb_function_ uint8_t ecb_gray8_decode (uint8_t g) |
|
|
904 | { |
|
|
905 | g ^= g >> 1; |
|
|
906 | g ^= g >> 2; |
|
|
907 | g ^= g >> 4; |
|
|
908 | |
|
|
909 | return g; |
|
|
910 | } |
|
|
911 | |
|
|
912 | ecb_function_ uint16_t ecb_gray16_decode (uint16_t g) |
|
|
913 | { |
|
|
914 | g ^= g >> 1; |
|
|
915 | g ^= g >> 2; |
|
|
916 | g ^= g >> 4; |
|
|
917 | g ^= g >> 8; |
|
|
918 | |
|
|
919 | return g; |
|
|
920 | } |
|
|
921 | |
|
|
922 | ecb_function_ uint32_t ecb_gray32_decode (uint32_t g) |
|
|
923 | { |
|
|
924 | g ^= g >> 1; |
|
|
925 | g ^= g >> 2; |
|
|
926 | g ^= g >> 4; |
|
|
927 | g ^= g >> 8; |
|
|
928 | g ^= g >> 16; |
|
|
929 | |
|
|
930 | return g; |
|
|
931 | } |
|
|
932 | |
|
|
933 | ecb_function_ uint64_t ecb_gray64_decode (uint64_t g) |
|
|
934 | { |
|
|
935 | g ^= g >> 1; |
|
|
936 | g ^= g >> 2; |
|
|
937 | g ^= g >> 4; |
|
|
938 | g ^= g >> 8; |
|
|
939 | g ^= g >> 16; |
|
|
940 | g ^= g >> 32; |
|
|
941 | |
|
|
942 | return g; |
|
|
943 | } |
|
|
944 | |
|
|
945 | #if ECB_CPP |
|
|
946 | |
|
|
947 | ecb_function_ uint8_t ecb_gray_encode (uint8_t b) { return ecb_gray8_encode (b); } |
|
|
948 | ecb_function_ uint16_t ecb_gray_encode (uint16_t b) { return ecb_gray16_encode (b); } |
|
|
949 | ecb_function_ uint32_t ecb_gray_encode (uint32_t b) { return ecb_gray32_encode (b); } |
|
|
950 | ecb_function_ uint64_t ecb_gray_encode (uint64_t b) { return ecb_gray64_encode (b); } |
|
|
951 | |
|
|
952 | ecb_function_ uint8_t ecb_gray_decode (uint8_t g) { return ecb_gray8_decode (g); } |
|
|
953 | ecb_function_ uint16_t ecb_gray_decode (uint16_t g) { return ecb_gray16_decode (g); } |
|
|
954 | ecb_function_ uint32_t ecb_gray_decode (uint32_t g) { return ecb_gray32_decode (g); } |
|
|
955 | ecb_function_ uint64_t ecb_gray_decode (uint64_t g) { return ecb_gray64_decode (g); } |
|
|
956 | |
|
|
957 | #endif |
|
|
958 | |
|
|
959 | /*****************************************************************************/ |
|
|
960 | /* 2d hilbert curves */ |
|
|
961 | |
|
|
962 | /* algorithm from the book Hacker's Delight, modified to not */ |
|
|
963 | /* run into undefined behaviour for n==16 */ |
|
|
964 | static uint32_t |
|
|
965 | ecb_hilbert2d_index_to_coord32 (int n, uint32_t s) |
|
|
966 | { |
|
|
967 | uint32_t comp, swap, cs, t, sr; |
|
|
968 | |
|
|
969 | /* pad s on the left (unused) bits with 01 (no change groups) */ |
|
|
970 | s |= 0x55555555U << n << n; |
|
|
971 | /* "s shift right" */ |
|
|
972 | sr = (s >> 1) & 0x55555555U; |
|
|
973 | /* compute complement and swap info in two-bit groups */ |
|
|
974 | cs = ((s & 0x55555555U) + sr) ^ 0x55555555U; |
|
|
975 | |
|
|
976 | /* parallel prefix xor op to propagate both complement |
|
|
977 | * and swap info together from left to right (there is |
|
|
978 | * no step "cs ^= cs >> 1", so in effect it computes |
|
|
979 | * two independent parallel prefix operations on two |
|
|
980 | * interleaved sets of sixteen bits). |
|
|
981 | */ |
|
|
982 | cs ^= cs >> 2; |
|
|
983 | cs ^= cs >> 4; |
|
|
984 | cs ^= cs >> 8; |
|
|
985 | cs ^= cs >> 16; |
|
|
986 | |
|
|
987 | /* separate swap and complement bits */ |
|
|
988 | swap = cs & 0x55555555U; |
|
|
989 | comp = (cs >> 1) & 0x55555555U; |
|
|
990 | |
|
|
991 | /* calculate coordinates in odd and even bit positions */ |
|
|
992 | t = (s & swap) ^ comp; |
|
|
993 | s = s ^ sr ^ t ^ (t << 1); |
|
|
994 | |
|
|
995 | /* unpad/clear out any junk on the left */ |
|
|
996 | s = s & ((1 << n << n) - 1); |
|
|
997 | |
|
|
998 | /* Now "unshuffle" to separate the x and y bits. */ |
|
|
999 | t = (s ^ (s >> 1)) & 0x22222222U; s ^= t ^ (t << 1); |
|
|
1000 | t = (s ^ (s >> 2)) & 0x0c0c0c0cU; s ^= t ^ (t << 2); |
|
|
1001 | t = (s ^ (s >> 4)) & 0x00f000f0U; s ^= t ^ (t << 4); |
|
|
1002 | t = (s ^ (s >> 8)) & 0x0000ff00U; s ^= t ^ (t << 8); |
|
|
1003 | |
|
|
1004 | /* now s contains two 16-bit coordinates */ |
|
|
1005 | return s; |
|
|
1006 | } |
|
|
1007 | |
|
|
1008 | /* 64 bit, a straightforward extension to the 32 bit case */ |
|
|
1009 | static uint64_t |
|
|
1010 | ecb_hilbert2d_index_to_coord64 (int n, uint64_t s) |
|
|
1011 | { |
|
|
1012 | uint64_t comp, swap, cs, t, sr; |
|
|
1013 | |
|
|
1014 | /* pad s on the left (unused) bits with 01 (no change groups) */ |
|
|
1015 | s |= 0x5555555555555555U << n << n; |
|
|
1016 | /* "s shift right" */ |
|
|
1017 | sr = (s >> 1) & 0x5555555555555555U; |
|
|
1018 | /* compute complement and swap info in two-bit groups */ |
|
|
1019 | cs = ((s & 0x5555555555555555U) + sr) ^ 0x5555555555555555U; |
|
|
1020 | |
|
|
1021 | /* parallel prefix xor op to propagate both complement |
|
|
1022 | * and swap info together from left to right (there is |
|
|
1023 | * no step "cs ^= cs >> 1", so in effect it computes |
|
|
1024 | * two independent parallel prefix operations on two |
|
|
1025 | * interleaved sets of thirty-two bits). |
|
|
1026 | */ |
|
|
1027 | cs ^= cs >> 2; |
|
|
1028 | cs ^= cs >> 4; |
|
|
1029 | cs ^= cs >> 8; |
|
|
1030 | cs ^= cs >> 16; |
|
|
1031 | cs ^= cs >> 32; |
|
|
1032 | |
|
|
1033 | /* separate swap and complement bits */ |
|
|
1034 | swap = cs & 0x5555555555555555U; |
|
|
1035 | comp = (cs >> 1) & 0x5555555555555555U; |
|
|
1036 | |
|
|
1037 | /* calculate coordinates in odd and even bit positions */ |
|
|
1038 | t = (s & swap) ^ comp; |
|
|
1039 | s = s ^ sr ^ t ^ (t << 1); |
|
|
1040 | |
|
|
1041 | /* unpad/clear out any junk on the left */ |
|
|
1042 | s = s & ((1 << n << n) - 1); |
|
|
1043 | |
|
|
1044 | /* Now "unshuffle" to separate the x and y bits. */ |
|
|
1045 | t = (s ^ (s >> 1)) & 0x2222222222222222U; s ^= t ^ (t << 1); |
|
|
1046 | t = (s ^ (s >> 2)) & 0x0c0c0c0c0c0c0c0cU; s ^= t ^ (t << 2); |
|
|
1047 | t = (s ^ (s >> 4)) & 0x00f000f000f000f0U; s ^= t ^ (t << 4); |
|
|
1048 | t = (s ^ (s >> 8)) & 0x0000ff000000ff00U; s ^= t ^ (t << 8); |
|
|
1049 | t = (s ^ (s >> 16)) & 0x00000000ffff0000U; s ^= t ^ (t << 16); |
|
|
1050 | |
|
|
1051 | /* now s contains two 32-bit coordinates */ |
|
|
1052 | return s; |
|
|
1053 | } |
|
|
1054 | |
|
|
1055 | /* algorithm from the book Hacker's Delight, but a similar algorithm*/ |
|
|
1056 | /* is given in https://doi.org/10.1002/spe.4380160103 */ |
|
|
1057 | /* this has been slightly improved over the original version */ |
|
|
1058 | ecb_function_ uint32_t |
|
|
1059 | ecb_hilbert2d_coord_to_index32 (int n, uint32_t xy) |
|
|
1060 | { |
|
|
1061 | uint32_t row; |
|
|
1062 | uint32_t state = 0; |
|
|
1063 | uint32_t s = 0; |
|
|
1064 | |
|
|
1065 | do |
|
|
1066 | { |
|
|
1067 | --n; |
|
|
1068 | |
|
|
1069 | row = 4 * state |
|
|
1070 | | (2 & (xy >> n >> 15)) |
|
|
1071 | | (1 & (xy >> n )); |
|
|
1072 | |
|
|
1073 | /* these funky constants are lookup tables for two-bit values */ |
|
|
1074 | s = (s << 2) | (0x361e9cb4U >> 2 * row) & 3; |
|
|
1075 | state = (0x8fe65831U >> 2 * row) & 3; |
|
|
1076 | } |
|
|
1077 | while (n > 0); |
|
|
1078 | |
|
|
1079 | return s; |
|
|
1080 | } |
|
|
1081 | |
|
|
1082 | /* 64 bit, essentially the same as 32 bit */ |
|
|
1083 | ecb_function_ uint64_t |
|
|
1084 | ecb_hilbert2d_coord_to_index64 (int n, uint64_t xy) |
|
|
1085 | { |
|
|
1086 | uint32_t row; |
|
|
1087 | uint32_t state = 0; |
|
|
1088 | uint64_t s = 0; |
|
|
1089 | |
|
|
1090 | do |
|
|
1091 | { |
|
|
1092 | --n; |
|
|
1093 | |
|
|
1094 | row = 4 * state |
|
|
1095 | | (2 & (xy >> n >> 31)) |
|
|
1096 | | (1 & (xy >> n )); |
|
|
1097 | |
|
|
1098 | /* these funky constants are lookup tables for two-bit values */ |
|
|
1099 | s = (s << 2) | (0x361e9cb4U >> 2 * row) & 3; |
|
|
1100 | state = (0x8fe65831U >> 2 * row) & 3; |
|
|
1101 | } |
|
|
1102 | while (n > 0); |
|
|
1103 | |
|
|
1104 | return s; |
|
|
1105 | } |
|
|
1106 | |
|
|
1107 | /*****************************************************************************/ |
|
|
1108 | /* division */ |
|
|
1109 | |
|
|
1110 | #if ECB_GCC_VERSION(3,0) || ECB_C99 |
|
|
1111 | /* C99 tightened the definition of %, so we can use a more efficient version */ |
|
|
1112 | #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0)) |
|
|
1113 | #else |
|
|
1114 | #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n))) |
|
|
1115 | #endif |
|
|
1116 | |
|
|
1117 | #if ECB_CPP |
|
|
1118 | template<typename T> |
|
|
1119 | static inline T ecb_div_rd (T val, T div) |
|
|
1120 | { |
|
|
1121 | return val < 0 ? - ((-val + div - 1) / div) : (val ) / div; |
|
|
1122 | } |
|
|
1123 | template<typename T> |
|
|
1124 | static inline T ecb_div_ru (T val, T div) |
|
|
1125 | { |
|
|
1126 | return val < 0 ? - ((-val ) / div) : (val + div - 1) / div; |
|
|
1127 | } |
|
|
1128 | #else |
|
|
1129 | #define ecb_div_rd(val,div) ((val) < 0 ? - ((-(val) + (div) - 1) / (div)) : ((val) ) / (div)) |
|
|
1130 | #define ecb_div_ru(val,div) ((val) < 0 ? - ((-(val) ) / (div)) : ((val) + (div) - 1) / (div)) |
|
|
1131 | #endif |
|
|
1132 | |
|
|
1133 | /*****************************************************************************/ |
|
|
1134 | /* array length */ |
|
|
1135 | |
|
|
1136 | #if ecb_cplusplus_does_not_suck |
|
|
1137 | /* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */ |
|
|
1138 | template<typename T, int N> |
|
|
1139 | static inline int ecb_array_length (const T (&arr)[N]) |
|
|
1140 | { |
|
|
1141 | return N; |
|
|
1142 | } |
|
|
1143 | #else |
|
|
1144 | #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) |
|
|
1145 | #endif |
|
|
1146 | |
|
|
1147 | /*****************************************************************************/ |
|
|
1148 | /* IEEE 754-2008 half float conversions */ |
|
|
1149 | |
|
|
1150 | ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x); |
|
|
1151 | ecb_function_ ecb_const uint32_t |
|
|
1152 | ecb_binary16_to_binary32 (uint32_t x) |
|
|
1153 | { |
|
|
1154 | unsigned int s = (x & 0x8000) << (31 - 15); |
|
|
1155 | int e = (x >> 10) & 0x001f; |
|
|
1156 | unsigned int m = x & 0x03ff; |
|
|
1157 | |
|
|
1158 | if (ecb_expect_false (e == 31)) |
|
|
1159 | /* infinity or NaN */ |
|
|
1160 | e = 255 - (127 - 15); |
|
|
1161 | else if (ecb_expect_false (!e)) |
|
|
1162 | { |
|
|
1163 | if (ecb_expect_true (!m)) |
|
|
1164 | /* zero, handled by code below by forcing e to 0 */ |
|
|
1165 | e = 0 - (127 - 15); |
|
|
1166 | else |
|
|
1167 | { |
|
|
1168 | /* subnormal, renormalise */ |
|
|
1169 | unsigned int s = 10 - ecb_ld32 (m); |
|
|
1170 | |
|
|
1171 | m = (m << s) & 0x3ff; /* mask implicit bit */ |
|
|
1172 | e -= s - 1; |
|
|
1173 | } |
|
|
1174 | } |
|
|
1175 | |
|
|
1176 | /* e and m now are normalised, or zero, (or inf or nan) */ |
|
|
1177 | e += 127 - 15; |
|
|
1178 | |
|
|
1179 | return s | (e << 23) | (m << (23 - 10)); |
|
|
1180 | } |
|
|
1181 | |
|
|
1182 | ecb_function_ ecb_const uint16_t ecb_binary32_to_binary16 (uint32_t x); |
|
|
1183 | ecb_function_ ecb_const uint16_t |
|
|
1184 | ecb_binary32_to_binary16 (uint32_t x) |
|
|
1185 | { |
|
|
1186 | unsigned int s = (x >> 16) & 0x00008000; /* sign bit, the easy part */ |
|
|
1187 | int e = ((x >> 23) & 0x000000ff) - (127 - 15); /* the desired exponent */ |
|
|
1188 | unsigned int m = x & 0x007fffff; |
|
|
1189 | |
|
|
1190 | x &= 0x7fffffff; |
|
|
1191 | |
|
|
1192 | /* if it's within range of binary16 normals, use fast path */ |
|
|
1193 | if (ecb_expect_true (0x38800000 <= x && x <= 0x477fefff)) |
|
|
1194 | { |
|
|
1195 | /* mantissa round-to-even */ |
|
|
1196 | m += 0x00000fff + ((m >> (23 - 10)) & 1); |
|
|
1197 | |
|
|
1198 | /* handle overflow */ |
|
|
1199 | if (ecb_expect_false (m >= 0x00800000)) |
|
|
1200 | { |
|
|
1201 | m >>= 1; |
|
|
1202 | e += 1; |
|
|
1203 | } |
|
|
1204 | |
|
|
1205 | return s | (e << 10) | (m >> (23 - 10)); |
|
|
1206 | } |
|
|
1207 | |
|
|
1208 | /* handle large numbers and infinity */ |
|
|
1209 | if (ecb_expect_true (0x477fefff < x && x <= 0x7f800000)) |
|
|
1210 | return s | 0x7c00; |
|
|
1211 | |
|
|
1212 | /* handle zero, subnormals and small numbers */ |
|
|
1213 | if (ecb_expect_true (x < 0x38800000)) |
|
|
1214 | { |
|
|
1215 | /* zero */ |
|
|
1216 | if (ecb_expect_true (!x)) |
|
|
1217 | return s; |
|
|
1218 | |
|
|
1219 | /* handle subnormals */ |
|
|
1220 | |
|
|
1221 | /* too small, will be zero */ |
|
|
1222 | if (e < (14 - 24)) /* might not be sharp, but is good enough */ |
|
|
1223 | return s; |
|
|
1224 | |
|
|
1225 | m |= 0x00800000; /* make implicit bit explicit */ |
|
|
1226 | |
|
|
1227 | /* very tricky - we need to round to the nearest e (+10) bit value */ |
|
|
1228 | { |
|
|
1229 | unsigned int bits = 14 - e; |
|
|
1230 | unsigned int half = (1 << (bits - 1)) - 1; |
|
|
1231 | unsigned int even = (m >> bits) & 1; |
|
|
1232 | |
|
|
1233 | /* if this overflows, we will end up with a normalised number */ |
|
|
1234 | m = (m + half + even) >> bits; |
|
|
1235 | } |
|
|
1236 | |
|
|
1237 | return s | m; |
|
|
1238 | } |
|
|
1239 | |
|
|
1240 | /* handle NaNs, preserve leftmost nan bits, but make sure we don't turn them into infinities */ |
|
|
1241 | m >>= 13; |
|
|
1242 | |
|
|
1243 | return s | 0x7c00 | m | !m; |
|
|
1244 | } |
|
|
1245 | |
|
|
1246 | /*******************************************************************************/ |
|
|
1247 | /* fast integer to ascii */ |
|
|
1248 | |
|
|
1249 | /* |
|
|
1250 | * This code is pretty complicated because it is general. The idea behind it, |
|
|
1251 | * however, is pretty simple: first, the number is multiplied with a scaling |
|
|
1252 | * factor (2**bits / 10**(digits-1)) to convert the integer into a fixed-point |
|
|
1253 | * number with the first digit in the upper bits. |
|
|
1254 | * Then this digit is converted to text and masked out. The resulting number |
|
|
1255 | * is then multiplied by 10, by multiplying the fixed point representation |
|
|
1256 | * by 5 and shifting the (binary) decimal point one to the right, so a 4.28 |
|
|
1257 | * format becomes 5.27, 6.26 and so on. |
|
|
1258 | * The rest involves only advancing the pointer if we already generated a |
|
|
1259 | * non-zero digit, so leading zeroes are overwritten. |
|
|
1260 | */ |
|
|
1261 | |
|
|
1262 | /* simply return a mask with "bits" bits set */ |
|
|
1263 | #define ecb_i2a_mask(type,bits) ((((type)1) << (bits)) - 1) |
|
|
1264 | |
|
|
1265 | /* oputput a single digit. maskvalue is 10**digitidx */ |
|
|
1266 | #define ecb_i2a_digit(type,bits,digitmask,maskvalue,digitidx) \ |
|
|
1267 | if (digitmask >= maskvalue) /* constant, used to decide how many digits to generate */ \ |
|
|
1268 | { \ |
|
|
1269 | char digit = x >> (bits - digitidx); /* calculate the topmost digit */ \ |
|
|
1270 | *ptr = digit + '0'; /* output it */ \ |
|
|
1271 | nz = (digitmask == maskvalue) || nz || digit; /* first term == always output last digit */ \ |
|
|
1272 | ptr += nz; /* output digit only if non-zero digit seen */ \ |
|
|
1273 | x = (x & ecb_i2a_mask (type, bits - digitidx)) * 5; /* *10, but shift decimal point right */ \ |
|
|
1274 | } |
|
|
1275 | |
|
|
1276 | /* convert integer to fixed point format and multiply out digits, highest first */ |
|
|
1277 | /* requires magic constants: max. digits and number of bits after the decimal point */ |
|
|
1278 | #define ecb_i2a_def(suffix,ptr,v,type,bits,digitmask,lz) \ |
|
|
1279 | ecb_inline char *ecb_i2a_ ## suffix (char *ptr, uint32_t u) \ |
|
|
1280 | { \ |
|
|
1281 | char nz = lz; /* non-zero digit seen? */ \ |
|
|
1282 | /* convert to x.bits fixed-point */ \ |
|
|
1283 | type x = u * ((ecb_i2a_mask (type, bits) + digitmask) / digitmask); \ |
|
|
1284 | /* output up to 10 digits */ \ |
|
|
1285 | ecb_i2a_digit (type,bits,digitmask, 1, 0); \ |
|
|
1286 | ecb_i2a_digit (type,bits,digitmask, 10, 1); \ |
|
|
1287 | ecb_i2a_digit (type,bits,digitmask, 100, 2); \ |
|
|
1288 | ecb_i2a_digit (type,bits,digitmask, 1000, 3); \ |
|
|
1289 | ecb_i2a_digit (type,bits,digitmask, 10000, 4); \ |
|
|
1290 | ecb_i2a_digit (type,bits,digitmask, 100000, 5); \ |
|
|
1291 | ecb_i2a_digit (type,bits,digitmask, 1000000, 6); \ |
|
|
1292 | ecb_i2a_digit (type,bits,digitmask, 10000000, 7); \ |
|
|
1293 | ecb_i2a_digit (type,bits,digitmask, 100000000, 8); \ |
|
|
1294 | ecb_i2a_digit (type,bits,digitmask, 1000000000, 9); \ |
|
|
1295 | return ptr; \ |
|
|
1296 | } |
|
|
1297 | |
|
|
1298 | /* predefined versions of the above, for various digits */ |
|
|
1299 | /* ecb_i2a_xN = almost N digits, limit defined by macro */ |
|
|
1300 | /* ecb_i2a_N = up to N digits, leading zeroes suppressed */ |
|
|
1301 | /* ecb_i2a_0N = exactly N digits, including leading zeroes */ |
|
|
1302 | |
|
|
1303 | /* non-leading-zero versions, limited range */ |
|
|
1304 | #define ECB_I2A_MAX_X5 59074 /* limit for ecb_i2a_x5 */ |
|
|
1305 | #define ECB_I2A_MAX_X10 2932500665 /* limit for ecb_i2a_x10 */ |
|
|
1306 | ecb_i2a_def ( x5, ptr, v, uint32_t, 26, 10000, 0) |
|
|
1307 | ecb_i2a_def (x10, ptr, v, uint64_t, 60, 1000000000, 0) |
|
|
1308 | |
|
|
1309 | /* non-leading zero versions, all digits, 4 and 9 are optimal for 32/64 bit */ |
|
|
1310 | ecb_i2a_def ( 2, ptr, v, uint32_t, 10, 10, 0) |
|
|
1311 | ecb_i2a_def ( 3, ptr, v, uint32_t, 12, 100, 0) |
|
|
1312 | ecb_i2a_def ( 4, ptr, v, uint32_t, 26, 1000, 0) |
|
|
1313 | ecb_i2a_def ( 5, ptr, v, uint64_t, 30, 10000, 0) |
|
|
1314 | ecb_i2a_def ( 6, ptr, v, uint64_t, 36, 100000, 0) |
|
|
1315 | ecb_i2a_def ( 7, ptr, v, uint64_t, 44, 1000000, 0) |
|
|
1316 | ecb_i2a_def ( 8, ptr, v, uint64_t, 50, 10000000, 0) |
|
|
1317 | ecb_i2a_def ( 9, ptr, v, uint64_t, 56, 100000000, 0) |
|
|
1318 | |
|
|
1319 | /* leading-zero versions, all digits, 04 and 09 are optimal for 32/64 bit */ |
|
|
1320 | ecb_i2a_def (02, ptr, v, uint32_t, 10, 10, 1) |
|
|
1321 | ecb_i2a_def (03, ptr, v, uint32_t, 12, 100, 1) |
|
|
1322 | ecb_i2a_def (04, ptr, v, uint32_t, 26, 1000, 1) |
|
|
1323 | ecb_i2a_def (05, ptr, v, uint64_t, 30, 10000, 1) |
|
|
1324 | ecb_i2a_def (06, ptr, v, uint64_t, 36, 100000, 1) |
|
|
1325 | ecb_i2a_def (07, ptr, v, uint64_t, 44, 1000000, 1) |
|
|
1326 | ecb_i2a_def (08, ptr, v, uint64_t, 50, 10000000, 1) |
|
|
1327 | ecb_i2a_def (09, ptr, v, uint64_t, 56, 100000000, 1) |
|
|
1328 | |
|
|
1329 | #define ECB_I2A_I32_DIGITS 11 |
|
|
1330 | #define ECB_I2A_U32_DIGITS 10 |
|
|
1331 | #define ECB_I2A_I64_DIGITS 20 |
|
|
1332 | #define ECB_I2A_U64_DIGITS 21 |
|
|
1333 | #define ECB_I2A_MAX_DIGITS 21 |
|
|
1334 | |
|
|
1335 | ecb_inline char * |
|
|
1336 | ecb_i2a_u32 (char *ptr, uint32_t u) |
|
|
1337 | { |
|
|
1338 | #if ECB_64BIT_NATIVE |
|
|
1339 | if (ecb_expect_true (u <= ECB_I2A_MAX_X10)) |
|
|
1340 | ptr = ecb_i2a_x10 (ptr, u); |
|
|
1341 | else /* x10 almost, but not fully, covers 32 bit */ |
|
|
1342 | { |
|
|
1343 | uint32_t u1 = u % 1000000000; |
|
|
1344 | uint32_t u2 = u / 1000000000; |
|
|
1345 | |
|
|
1346 | *ptr++ = u2 + '0'; |
|
|
1347 | ptr = ecb_i2a_09 (ptr, u1); |
|
|
1348 | } |
|
|
1349 | #else |
|
|
1350 | if (ecb_expect_true (u <= ECB_I2A_MAX_X5)) |
|
|
1351 | ecb_i2a_x5 (ptr, u); |
|
|
1352 | else if (ecb_expect_true (u <= ECB_I2A_MAX_X5 * 10000)) |
|
|
1353 | { |
|
|
1354 | uint32_t u1 = u % 10000; |
|
|
1355 | uint32_t u2 = u / 10000; |
|
|
1356 | |
|
|
1357 | ptr = ecb_i2a_x5 (ptr, u2); |
|
|
1358 | ptr = ecb_i2a_04 (ptr, u1); |
|
|
1359 | } |
|
|
1360 | else |
|
|
1361 | { |
|
|
1362 | uint32_t u1 = u % 10000; |
|
|
1363 | uint32_t ua = u / 10000; |
|
|
1364 | uint32_t u2 = ua % 10000; |
|
|
1365 | uint32_t u3 = ua / 10000; |
|
|
1366 | |
|
|
1367 | ptr = ecb_i2a_2 (ptr, u3); |
|
|
1368 | ptr = ecb_i2a_04 (ptr, u2); |
|
|
1369 | ptr = ecb_i2a_04 (ptr, u1); |
|
|
1370 | } |
|
|
1371 | #endif |
|
|
1372 | |
98 | return r; |
1373 | return ptr; |
99 | } |
1374 | } |
100 | |
1375 | |
101 | ECB_HEADER_INLINE |
1376 | ecb_inline char * |
102 | ecb_popcount (unsigned int x) ecb_const |
1377 | ecb_i2a_i32 (char *ptr, int32_t v) |
103 | { |
1378 | { |
104 | x -= (x >> 1) & 0x55555555; |
1379 | *ptr = '-'; ptr += v < 0; |
105 | x = ((x >> 2) & 0x33333333) + (x & 0x33333333); |
1380 | uint32_t u = v < 0 ? -(uint32_t)v : v; |
106 | x = ((x >> 4) + x) & 0x0f0f0f0f; |
|
|
107 | x *= 0x01010101; |
|
|
108 | |
1381 | |
109 | return x >> 24; |
1382 | #if ECB_64BIT_NATIVE |
110 | } |
1383 | ptr = ecb_i2a_x10 (ptr, u); /* x10 fully covers 31 bit */ |
|
|
1384 | #else |
|
|
1385 | ptr = ecb_i2a_u32 (ptr, u); |
111 | #endif |
1386 | #endif |
112 | |
1387 | |
113 | ECB_HEADER_INLINE unsigned char |
1388 | return ptr; |
114 | ecb_byteorder_helper () ecb_const |
|
|
115 | { |
|
|
116 | const uint32_t u = 0x11223344; |
|
|
117 | return *(unsigned char *)&u; |
|
|
118 | } |
1389 | } |
119 | |
1390 | |
120 | ECB_HEADER_INLINE bool ecb_big_endian () ecb_const { return ecb_byteorder_helper () == 0x11; }; |
1391 | ecb_inline char * |
121 | ECB_HEADER_INLINE bool ecb_network () ecb_const { return big_endian (); }; |
1392 | ecb_i2a_u64 (char *ptr, uint64_t u) |
122 | ECB_HEADER_INLINE bool ecb_little_endian () ecb_const { return ecb_byteorder_helper () == 0x44; }; |
1393 | { |
123 | ECB_HEADER_INLINE bool ecb_vax () ecb_const { return little_endian (); }; |
1394 | #if ECB_64BIT_NATIVE |
|
|
1395 | if (ecb_expect_true (u <= ECB_I2A_MAX_X10)) |
|
|
1396 | ptr = ecb_i2a_x10 (ptr, u); |
|
|
1397 | else if (ecb_expect_false (u <= ECB_I2A_MAX_X10 * 1000000000)) |
|
|
1398 | { |
|
|
1399 | uint64_t u1 = u % 1000000000; |
|
|
1400 | uint64_t u2 = u / 1000000000; |
124 | |
1401 | |
|
|
1402 | ptr = ecb_i2a_x10 (ptr, u2); |
|
|
1403 | ptr = ecb_i2a_09 (ptr, u1); |
|
|
1404 | } |
|
|
1405 | else |
|
|
1406 | { |
|
|
1407 | uint64_t u1 = u % 1000000000; |
|
|
1408 | uint64_t ua = u / 1000000000; |
|
|
1409 | uint64_t u2 = ua % 1000000000; |
|
|
1410 | uint64_t u3 = ua / 1000000000; |
125 | |
1411 | |
|
|
1412 | ptr = ecb_i2a_2 (ptr, u3); |
|
|
1413 | ptr = ecb_i2a_09 (ptr, u2); |
|
|
1414 | ptr = ecb_i2a_09 (ptr, u1); |
|
|
1415 | } |
|
|
1416 | #else |
|
|
1417 | if (ecb_expect_true (u <= ECB_I2A_MAX_X5)) |
|
|
1418 | ptr = ecb_i2a_x5 (ptr, u); |
|
|
1419 | else |
|
|
1420 | { |
|
|
1421 | uint64_t u1 = u % 10000; |
|
|
1422 | uint64_t u2 = u / 10000; |
|
|
1423 | |
|
|
1424 | ptr = ecb_i2a_u64 (ptr, u2); |
|
|
1425 | ptr = ecb_i2a_04 (ptr, u1); |
|
|
1426 | } |
126 | #endif |
1427 | #endif |
127 | |
1428 | |
|
|
1429 | return ptr; |
|
|
1430 | } |
|
|
1431 | |
|
|
1432 | ecb_inline char * |
|
|
1433 | ecb_i2a_i64 (char *ptr, int64_t v) |
|
|
1434 | { |
|
|
1435 | *ptr = '-'; ptr += v < 0; |
|
|
1436 | uint64_t u = v < 0 ? -(uint64_t)v : v; |
|
|
1437 | |
|
|
1438 | #if ECB_64BIT_NATIVE |
|
|
1439 | if (ecb_expect_true (u <= ECB_I2A_MAX_X10)) |
|
|
1440 | ptr = ecb_i2a_x10 (ptr, u); |
|
|
1441 | else if (ecb_expect_false (u <= ECB_I2A_MAX_X10 * 1000000000)) |
|
|
1442 | { |
|
|
1443 | uint64_t u1 = u % 1000000000; |
|
|
1444 | uint64_t u2 = u / 1000000000; |
|
|
1445 | |
|
|
1446 | ptr = ecb_i2a_x10 (ptr, u2); |
|
|
1447 | ptr = ecb_i2a_09 (ptr, u1); |
|
|
1448 | } |
|
|
1449 | else |
|
|
1450 | { |
|
|
1451 | uint64_t u1 = u % 1000000000; |
|
|
1452 | uint64_t ua = u / 1000000000; |
|
|
1453 | uint64_t u2 = ua % 1000000000; |
|
|
1454 | uint64_t u3 = ua / 1000000000; |
|
|
1455 | |
|
|
1456 | /* 2**31 is 19 digits, so the top is exactly one digit */ |
|
|
1457 | *ptr++ = u3 + '0'; |
|
|
1458 | ptr = ecb_i2a_09 (ptr, u2); |
|
|
1459 | ptr = ecb_i2a_09 (ptr, u1); |
|
|
1460 | } |
|
|
1461 | #else |
|
|
1462 | ptr = ecb_i2a_u64 (ptr, u); |
|
|
1463 | #endif |
|
|
1464 | |
|
|
1465 | return ptr; |
|
|
1466 | } |
|
|
1467 | |
|
|
1468 | /*******************************************************************************/ |
|
|
1469 | /* floating point stuff, can be disabled by defining ECB_NO_LIBM */ |
|
|
1470 | |
|
|
1471 | /* basically, everything uses "ieee pure-endian" floating point numbers */ |
|
|
1472 | /* the only noteworthy exception is ancient armle, which uses order 43218765 */ |
|
|
1473 | #if 0 \ |
|
|
1474 | || __i386 || __i386__ \ |
|
|
1475 | || ECB_GCC_AMD64 \ |
|
|
1476 | || __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \ |
|
|
1477 | || defined __s390__ || defined __s390x__ \ |
|
|
1478 | || defined __mips__ \ |
|
|
1479 | || defined __alpha__ \ |
|
|
1480 | || defined __hppa__ \ |
|
|
1481 | || defined __ia64__ \ |
|
|
1482 | || defined __m68k__ \ |
|
|
1483 | || defined __m88k__ \ |
|
|
1484 | || defined __sh__ \ |
|
|
1485 | || defined _M_IX86 || defined ECB_MSVC_AMD64 || defined _M_IA64 \ |
|
|
1486 | || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \ |
|
|
1487 | || defined __aarch64__ |
|
|
1488 | #define ECB_STDFP 1 |
|
|
1489 | #else |
|
|
1490 | #define ECB_STDFP 0 |
|
|
1491 | #endif |
|
|
1492 | |
|
|
1493 | #ifndef ECB_NO_LIBM |
|
|
1494 | |
|
|
1495 | #include <math.h> /* for frexp*, ldexp*, INFINITY, NAN */ |
|
|
1496 | |
|
|
1497 | /* only the oldest of old doesn't have this one. solaris. */ |
|
|
1498 | #ifdef INFINITY |
|
|
1499 | #define ECB_INFINITY INFINITY |
|
|
1500 | #else |
|
|
1501 | #define ECB_INFINITY HUGE_VAL |
|
|
1502 | #endif |
|
|
1503 | |
|
|
1504 | #ifdef NAN |
|
|
1505 | #define ECB_NAN NAN |
|
|
1506 | #else |
|
|
1507 | #define ECB_NAN ECB_INFINITY |
|
|
1508 | #endif |
|
|
1509 | |
|
|
1510 | #if ECB_C99 || _XOPEN_VERSION >= 600 || _POSIX_VERSION >= 200112L |
|
|
1511 | #define ecb_ldexpf(x,e) ldexpf ((x), (e)) |
|
|
1512 | #define ecb_frexpf(x,e) frexpf ((x), (e)) |
|
|
1513 | #else |
|
|
1514 | #define ecb_ldexpf(x,e) (float) ldexp ((double) (x), (e)) |
|
|
1515 | #define ecb_frexpf(x,e) (float) frexp ((double) (x), (e)) |
|
|
1516 | #endif |
|
|
1517 | |
|
|
1518 | /* convert a float to ieee single/binary32 */ |
|
|
1519 | ecb_function_ ecb_const uint32_t ecb_float_to_binary32 (float x); |
|
|
1520 | ecb_function_ ecb_const uint32_t |
|
|
1521 | ecb_float_to_binary32 (float x) |
|
|
1522 | { |
|
|
1523 | uint32_t r; |
|
|
1524 | |
|
|
1525 | #if ECB_STDFP |
|
|
1526 | memcpy (&r, &x, 4); |
|
|
1527 | #else |
|
|
1528 | /* slow emulation, works for anything but -0 */ |
|
|
1529 | uint32_t m; |
|
|
1530 | int e; |
|
|
1531 | |
|
|
1532 | if (x == 0e0f ) return 0x00000000U; |
|
|
1533 | if (x > +3.40282346638528860e+38f) return 0x7f800000U; |
|
|
1534 | if (x < -3.40282346638528860e+38f) return 0xff800000U; |
|
|
1535 | if (x != x ) return 0x7fbfffffU; |
|
|
1536 | |
|
|
1537 | m = ecb_frexpf (x, &e) * 0x1000000U; |
|
|
1538 | |
|
|
1539 | r = m & 0x80000000U; |
|
|
1540 | |
|
|
1541 | if (r) |
|
|
1542 | m = -m; |
|
|
1543 | |
|
|
1544 | if (e <= -126) |
|
|
1545 | { |
|
|
1546 | m &= 0xffffffU; |
|
|
1547 | m >>= (-125 - e); |
|
|
1548 | e = -126; |
|
|
1549 | } |
|
|
1550 | |
|
|
1551 | r |= (e + 126) << 23; |
|
|
1552 | r |= m & 0x7fffffU; |
|
|
1553 | #endif |
|
|
1554 | |
|
|
1555 | return r; |
|
|
1556 | } |
|
|
1557 | |
|
|
1558 | /* converts an ieee single/binary32 to a float */ |
|
|
1559 | ecb_function_ ecb_const float ecb_binary32_to_float (uint32_t x); |
|
|
1560 | ecb_function_ ecb_const float |
|
|
1561 | ecb_binary32_to_float (uint32_t x) |
|
|
1562 | { |
|
|
1563 | float r; |
|
|
1564 | |
|
|
1565 | #if ECB_STDFP |
|
|
1566 | memcpy (&r, &x, 4); |
|
|
1567 | #else |
|
|
1568 | /* emulation, only works for normals and subnormals and +0 */ |
|
|
1569 | int neg = x >> 31; |
|
|
1570 | int e = (x >> 23) & 0xffU; |
|
|
1571 | |
|
|
1572 | x &= 0x7fffffU; |
|
|
1573 | |
|
|
1574 | if (e) |
|
|
1575 | x |= 0x800000U; |
|
|
1576 | else |
|
|
1577 | e = 1; |
|
|
1578 | |
|
|
1579 | /* we distrust ldexpf a bit and do the 2**-24 scaling by an extra multiply */ |
|
|
1580 | r = ecb_ldexpf (x * (0.5f / 0x800000U), e - 126); |
|
|
1581 | |
|
|
1582 | r = neg ? -r : r; |
|
|
1583 | #endif |
|
|
1584 | |
|
|
1585 | return r; |
|
|
1586 | } |
|
|
1587 | |
|
|
1588 | /* convert a double to ieee double/binary64 */ |
|
|
1589 | ecb_function_ ecb_const uint64_t ecb_double_to_binary64 (double x); |
|
|
1590 | ecb_function_ ecb_const uint64_t |
|
|
1591 | ecb_double_to_binary64 (double x) |
|
|
1592 | { |
|
|
1593 | uint64_t r; |
|
|
1594 | |
|
|
1595 | #if ECB_STDFP |
|
|
1596 | memcpy (&r, &x, 8); |
|
|
1597 | #else |
|
|
1598 | /* slow emulation, works for anything but -0 */ |
|
|
1599 | uint64_t m; |
|
|
1600 | int e; |
|
|
1601 | |
|
|
1602 | if (x == 0e0 ) return 0x0000000000000000U; |
|
|
1603 | if (x > +1.79769313486231470e+308) return 0x7ff0000000000000U; |
|
|
1604 | if (x < -1.79769313486231470e+308) return 0xfff0000000000000U; |
|
|
1605 | if (x != x ) return 0X7ff7ffffffffffffU; |
|
|
1606 | |
|
|
1607 | m = frexp (x, &e) * 0x20000000000000U; |
|
|
1608 | |
|
|
1609 | r = m & 0x8000000000000000;; |
|
|
1610 | |
|
|
1611 | if (r) |
|
|
1612 | m = -m; |
|
|
1613 | |
|
|
1614 | if (e <= -1022) |
|
|
1615 | { |
|
|
1616 | m &= 0x1fffffffffffffU; |
|
|
1617 | m >>= (-1021 - e); |
|
|
1618 | e = -1022; |
|
|
1619 | } |
|
|
1620 | |
|
|
1621 | r |= ((uint64_t)(e + 1022)) << 52; |
|
|
1622 | r |= m & 0xfffffffffffffU; |
|
|
1623 | #endif |
|
|
1624 | |
|
|
1625 | return r; |
|
|
1626 | } |
|
|
1627 | |
|
|
1628 | /* converts an ieee double/binary64 to a double */ |
|
|
1629 | ecb_function_ ecb_const double ecb_binary64_to_double (uint64_t x); |
|
|
1630 | ecb_function_ ecb_const double |
|
|
1631 | ecb_binary64_to_double (uint64_t x) |
|
|
1632 | { |
|
|
1633 | double r; |
|
|
1634 | |
|
|
1635 | #if ECB_STDFP |
|
|
1636 | memcpy (&r, &x, 8); |
|
|
1637 | #else |
|
|
1638 | /* emulation, only works for normals and subnormals and +0 */ |
|
|
1639 | int neg = x >> 63; |
|
|
1640 | int e = (x >> 52) & 0x7ffU; |
|
|
1641 | |
|
|
1642 | x &= 0xfffffffffffffU; |
|
|
1643 | |
|
|
1644 | if (e) |
|
|
1645 | x |= 0x10000000000000U; |
|
|
1646 | else |
|
|
1647 | e = 1; |
|
|
1648 | |
|
|
1649 | /* we distrust ldexp a bit and do the 2**-53 scaling by an extra multiply */ |
|
|
1650 | r = ldexp (x * (0.5 / 0x10000000000000U), e - 1022); |
|
|
1651 | |
|
|
1652 | r = neg ? -r : r; |
|
|
1653 | #endif |
|
|
1654 | |
|
|
1655 | return r; |
|
|
1656 | } |
|
|
1657 | |
|
|
1658 | /* convert a float to ieee half/binary16 */ |
|
|
1659 | ecb_function_ ecb_const uint16_t ecb_float_to_binary16 (float x); |
|
|
1660 | ecb_function_ ecb_const uint16_t |
|
|
1661 | ecb_float_to_binary16 (float x) |
|
|
1662 | { |
|
|
1663 | return ecb_binary32_to_binary16 (ecb_float_to_binary32 (x)); |
|
|
1664 | } |
|
|
1665 | |
|
|
1666 | /* convert an ieee half/binary16 to float */ |
|
|
1667 | ecb_function_ ecb_const float ecb_binary16_to_float (uint16_t x); |
|
|
1668 | ecb_function_ ecb_const float |
|
|
1669 | ecb_binary16_to_float (uint16_t x) |
|
|
1670 | { |
|
|
1671 | return ecb_binary32_to_float (ecb_binary16_to_binary32 (x)); |
|
|
1672 | } |
|
|
1673 | |
|
|
1674 | #endif |
|
|
1675 | |
|
|
1676 | #endif |
|
|
1677 | |