ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libecb/ecb.h
Revision: 1.216
Committed: Wed Apr 13 15:43:28 2022 UTC (2 years ago) by root
Content type: text/plain
Branch: MAIN
CVS Tags: HEAD
Changes since 1.215: +2 -2 lines
Log Message:
*** empty log message ***

File Contents

# Content
1 /*
2 * libecb - http://software.schmorp.de/pkg/libecb
3 *
4 * Copyright (©) 2009-2015,2018-2021 Marc Alexander Lehmann <libecb@schmorp.de>
5 * Copyright (©) 2011 Emanuele Giaquinta
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without modifica-
9 * tion, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 *
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
20 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
21 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
22 * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
26 * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
27 * OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * Alternatively, the contents of this file may be used under the terms of
30 * the GNU General Public License ("GPL") version 2 or any later version,
31 * in which case the provisions of the GPL are applicable instead of
32 * the above. If you wish to allow the use of your version of this file
33 * only under the terms of the GPL and not to allow others to use your
34 * version of this file under the BSD license, indicate your decision
35 * by deleting the provisions above and replace them with the notice
36 * and other provisions required by the GPL. If you do not delete the
37 * provisions above, a recipient may use your version of this file under
38 * either the BSD or the GPL.
39 */
40
41 #ifndef ECB_H
42 #define ECB_H
43
44 /* 16 bits major, 16 bits minor */
45 #define ECB_VERSION 0x0001000c
46
47 #include <string.h> /* for memcpy */
48
49 #if defined (_WIN32) && !defined (__MINGW32__)
50 typedef signed char int8_t;
51 typedef unsigned char uint8_t;
52 typedef signed char int_fast8_t;
53 typedef unsigned char uint_fast8_t;
54 typedef signed short int16_t;
55 typedef unsigned short uint16_t;
56 typedef signed int int_fast16_t;
57 typedef unsigned int uint_fast16_t;
58 typedef signed int int32_t;
59 typedef unsigned int uint32_t;
60 typedef signed int int_fast32_t;
61 typedef unsigned int uint_fast32_t;
62 #if __GNUC__
63 typedef signed long long int64_t;
64 typedef unsigned long long uint64_t;
65 #else /* _MSC_VER || __BORLANDC__ */
66 typedef signed __int64 int64_t;
67 typedef unsigned __int64 uint64_t;
68 #endif
69 typedef int64_t int_fast64_t;
70 typedef uint64_t uint_fast64_t;
71 #ifdef _WIN64
72 #define ECB_PTRSIZE 8
73 typedef uint64_t uintptr_t;
74 typedef int64_t intptr_t;
75 #else
76 #define ECB_PTRSIZE 4
77 typedef uint32_t uintptr_t;
78 typedef int32_t intptr_t;
79 #endif
80 #else
81 #include <inttypes.h>
82 #if (defined INTPTR_MAX ? INTPTR_MAX : ULONG_MAX) > 0xffffffffU
83 #define ECB_PTRSIZE 8
84 #else
85 #define ECB_PTRSIZE 4
86 #endif
87 #endif
88
89 #define ECB_GCC_AMD64 (__amd64 || __amd64__ || __x86_64 || __x86_64__)
90 #define ECB_MSVC_AMD64 (_M_AMD64 || _M_X64)
91
92 #ifndef ECB_OPTIMIZE_SIZE
93 #if __OPTIMIZE_SIZE__
94 #define ECB_OPTIMIZE_SIZE 1
95 #else
96 #define ECB_OPTIMIZE_SIZE 0
97 #endif
98 #endif
99
100 /* work around x32 idiocy by defining proper macros */
101 #if ECB_GCC_AMD64 || ECB_MSVC_AMD64
102 #if _ILP32
103 #define ECB_AMD64_X32 1
104 #else
105 #define ECB_AMD64 1
106 #endif
107 #endif
108
109 #if ECB_PTRSIZE >= 8 || ECB_AMD64_X32
110 #define ECB_64BIT_NATIVE 1
111 #else
112 #define ECB_64BIT_NATIVE 0
113 #endif
114
115 /* many compilers define _GNUC_ to some versions but then only implement
116 * what their idiot authors think are the "more important" extensions,
117 * causing enormous grief in return for some better fake benchmark numbers.
118 * or so.
119 * we try to detect these and simply assume they are not gcc - if they have
120 * an issue with that they should have done it right in the first place.
121 */
122 #if !defined __GNUC_MINOR__ || defined __INTEL_COMPILER || defined __SUNPRO_C || defined __SUNPRO_CC || defined __llvm__ || defined __clang__
123 #define ECB_GCC_VERSION(major,minor) 0
124 #else
125 #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))
126 #endif
127
128 #define ECB_CLANG_VERSION(major,minor) (__clang_major__ > (major) || (__clang_major__ == (major) && __clang_minor__ >= (minor)))
129
130 #if __clang__ && defined __has_builtin
131 #define ECB_CLANG_BUILTIN(x) __has_builtin (x)
132 #else
133 #define ECB_CLANG_BUILTIN(x) 0
134 #endif
135
136 #if __clang__ && defined __has_extension
137 #define ECB_CLANG_EXTENSION(x) __has_extension (x)
138 #else
139 #define ECB_CLANG_EXTENSION(x) 0
140 #endif
141
142 #define ECB_CPP (__cplusplus+0)
143 #define ECB_CPP11 (__cplusplus >= 201103L)
144 #define ECB_CPP14 (__cplusplus >= 201402L)
145 #define ECB_CPP17 (__cplusplus >= 201703L)
146
147 #if ECB_CPP
148 #define ECB_C 0
149 #define ECB_STDC_VERSION 0
150 #else
151 #define ECB_C 1
152 #define ECB_STDC_VERSION __STDC_VERSION__
153 #endif
154
155 #define ECB_C99 (ECB_STDC_VERSION >= 199901L)
156 #define ECB_C11 (ECB_STDC_VERSION >= 201112L)
157 #define ECB_C17 (ECB_STDC_VERSION >= 201710L)
158
159 #if ECB_CPP
160 #define ECB_EXTERN_C extern "C"
161 #define ECB_EXTERN_C_BEG ECB_EXTERN_C {
162 #define ECB_EXTERN_C_END }
163 #else
164 #define ECB_EXTERN_C extern
165 #define ECB_EXTERN_C_BEG
166 #define ECB_EXTERN_C_END
167 #endif
168
169 /*****************************************************************************/
170
171 /* ECB_NO_THREADS - ecb is not used by multiple threads, ever */
172 /* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */
173
174 #if ECB_NO_THREADS
175 #define ECB_NO_SMP 1
176 #endif
177
178 #if ECB_NO_SMP
179 #define ECB_MEMORY_FENCE do { } while (0)
180 #endif
181
182 /* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/compiler_ref/compiler_builtins.html */
183 #if __xlC__ && ECB_CPP
184 #include <builtins.h>
185 #endif
186
187 #if 1400 <= _MSC_VER
188 #include <intrin.h> /* fence functions _ReadBarrier, also bit search functions _BitScanReverse */
189 #endif
190
191 #ifndef ECB_MEMORY_FENCE
192 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
193 #define ECB_MEMORY_FENCE_RELAXED __asm__ __volatile__ ("" : : : "memory")
194 #if __i386 || __i386__
195 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
196 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
197 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory")
198 #elif ECB_GCC_AMD64
199 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory")
200 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
201 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory")
202 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__
203 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory")
204 #elif defined __ARM_ARCH_2__ \
205 || defined __ARM_ARCH_3__ || defined __ARM_ARCH_3M__ \
206 || defined __ARM_ARCH_4__ || defined __ARM_ARCH_4T__ \
207 || defined __ARM_ARCH_5__ || defined __ARM_ARCH_5E__ \
208 || defined __ARM_ARCH_5T__ || defined __ARM_ARCH_5TE__ \
209 || defined __ARM_ARCH_5TEJ__
210 /* should not need any, unless running old code on newer cpu - arm doesn't support that */
211 #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \
212 || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ \
213 || defined __ARM_ARCH_6T2__
214 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory")
215 #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \
216 || defined __ARM_ARCH_7R__ || defined __ARM_ARCH_7M__
217 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory")
218 #elif __aarch64__
219 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory")
220 #elif (__sparc || __sparc__) && !(__sparc_v8__ || defined __sparcv8)
221 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory")
222 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory")
223 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore")
224 #elif defined __s390__ || defined __s390x__
225 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory")
226 #elif defined __mips__
227 /* GNU/Linux emulates sync on mips1 architectures, so we force its use */
228 /* anybody else who still uses mips1 is supposed to send in their version, with detection code. */
229 #define ECB_MEMORY_FENCE __asm__ __volatile__ (".set mips2; sync; .set mips0" : : : "memory")
230 #elif defined __alpha__
231 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory")
232 #elif defined __hppa__
233 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
234 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
235 #elif defined __ia64__
236 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory")
237 #elif defined __m68k__
238 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
239 #elif defined __m88k__
240 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("tb1 0,%%r0,128" : : : "memory")
241 #elif defined __sh__
242 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
243 #endif
244 #endif
245 #endif
246
247 #ifndef ECB_MEMORY_FENCE
248 #if ECB_GCC_VERSION(4,7)
249 /* see comment below (stdatomic.h) about the C11 memory model. */
250 #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST)
251 #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE)
252 #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE)
253 #undef ECB_MEMORY_FENCE_RELAXED
254 #define ECB_MEMORY_FENCE_RELAXED __atomic_thread_fence (__ATOMIC_RELAXED)
255
256 #elif ECB_CLANG_EXTENSION(c_atomic)
257 /* see comment below (stdatomic.h) about the C11 memory model. */
258 #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST)
259 #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE)
260 #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE)
261 #undef ECB_MEMORY_FENCE_RELAXED
262 #define ECB_MEMORY_FENCE_RELAXED __c11_atomic_thread_fence (__ATOMIC_RELAXED)
263
264 #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__
265 #define ECB_MEMORY_FENCE __sync_synchronize ()
266 #elif _MSC_VER >= 1500 /* VC++ 2008 */
267 /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */
268 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
269 #define ECB_MEMORY_FENCE _ReadWriteBarrier (); MemoryBarrier()
270 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier (); MemoryBarrier() /* according to msdn, _ReadBarrier is not a load fence */
271 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier (); MemoryBarrier()
272 #elif _MSC_VER >= 1400 /* VC++ 2005 */
273 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
274 #define ECB_MEMORY_FENCE _ReadWriteBarrier ()
275 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */
276 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier ()
277 #elif defined _WIN32
278 #include <WinNT.h>
279 #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */
280 #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
281 #include <mbarrier.h>
282 #define ECB_MEMORY_FENCE __machine_rw_barrier ()
283 #define ECB_MEMORY_FENCE_ACQUIRE __machine_acq_barrier ()
284 #define ECB_MEMORY_FENCE_RELEASE __machine_rel_barrier ()
285 #define ECB_MEMORY_FENCE_RELAXED __compiler_barrier ()
286 #elif __xlC__
287 #define ECB_MEMORY_FENCE __sync ()
288 #endif
289 #endif
290
291 #ifndef ECB_MEMORY_FENCE
292 #if ECB_C11 && !defined __STDC_NO_ATOMICS__
293 /* we assume that these memory fences work on all variables/all memory accesses, */
294 /* not just C11 atomics and atomic accesses */
295 #include <stdatomic.h>
296 #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst)
297 #define ECB_MEMORY_FENCE_ACQUIRE atomic_thread_fence (memory_order_acquire)
298 #define ECB_MEMORY_FENCE_RELEASE atomic_thread_fence (memory_order_release)
299 #endif
300 #endif
301
302 #ifndef ECB_MEMORY_FENCE
303 #if !ECB_AVOID_PTHREADS
304 /*
305 * if you get undefined symbol references to pthread_mutex_lock,
306 * or failure to find pthread.h, then you should implement
307 * the ECB_MEMORY_FENCE operations for your cpu/compiler
308 * OR provide pthread.h and link against the posix thread library
309 * of your system.
310 */
311 #include <pthread.h>
312 #define ECB_NEEDS_PTHREADS 1
313 #define ECB_MEMORY_FENCE_NEEDS_PTHREADS 1
314
315 static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER;
316 #define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0)
317 #endif
318 #endif
319
320 #if !defined ECB_MEMORY_FENCE_ACQUIRE && defined ECB_MEMORY_FENCE
321 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
322 #endif
323
324 #if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE
325 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
326 #endif
327
328 #if !defined ECB_MEMORY_FENCE_RELAXED && defined ECB_MEMORY_FENCE
329 #define ECB_MEMORY_FENCE_RELAXED ECB_MEMORY_FENCE /* very heavy-handed */
330 #endif
331
332 /*****************************************************************************/
333
334 #if ECB_CPP
335 #define ecb_inline static inline
336 #elif ECB_GCC_VERSION(2,5)
337 #define ecb_inline static __inline__
338 #elif ECB_C99
339 #define ecb_inline static inline
340 #else
341 #define ecb_inline static
342 #endif
343
344 #if ECB_GCC_VERSION(3,3)
345 #define ecb_restrict __restrict__
346 #elif ECB_C99
347 #define ecb_restrict restrict
348 #else
349 #define ecb_restrict
350 #endif
351
352 typedef int ecb_bool;
353
354 #define ECB_CONCAT_(a, b) a ## b
355 #define ECB_CONCAT(a, b) ECB_CONCAT_(a, b)
356 #define ECB_STRINGIFY_(a) # a
357 #define ECB_STRINGIFY(a) ECB_STRINGIFY_(a)
358 #define ECB_STRINGIFY_EXPR(expr) ((expr), ECB_STRINGIFY_ (expr))
359
360 /* This marks larger functions that do not neccessarily need to be inlined */
361 /* The idea is to possibly compile the header twice, */
362 /* once exposing only the declarations, another time to define external functions */
363 /* TODO: possibly static would be best for these at the moment? */
364 #define ecb_function_ ecb_inline
365
366 #if ECB_GCC_VERSION(3,1) || ECB_CLANG_VERSION(2,8)
367 #define ecb_attribute(attrlist) __attribute__ (attrlist)
368 #else
369 #define ecb_attribute(attrlist)
370 #endif
371
372 #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_constant_p)
373 #define ecb_is_constant(expr) __builtin_constant_p (expr)
374 #else
375 /* possible C11 impl for integral types
376 typedef struct ecb_is_constant_struct ecb_is_constant_struct;
377 #define ecb_is_constant(expr) _Generic ((1 ? (struct ecb_is_constant_struct *)0 : (void *)((expr) - (expr)), ecb_is_constant_struct *: 0, default: 1)) */
378
379 #define ecb_is_constant(expr) 0
380 #endif
381
382 #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_expect)
383 #define ecb_expect(expr,value) __builtin_expect ((expr),(value))
384 #else
385 #define ecb_expect(expr,value) (expr)
386 #endif
387
388 #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_prefetch)
389 #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality)
390 #else
391 #define ecb_prefetch(addr,rw,locality)
392 #endif
393
394 /* no emulation for ecb_decltype */
395 #if ECB_CPP11
396 // older implementations might have problems with decltype(x)::type, work around it
397 template<class T> struct ecb_decltype_t { typedef T type; };
398 #define ecb_decltype(x) ecb_decltype_t<decltype (x)>::type
399 #elif ECB_GCC_VERSION(3,0) || ECB_CLANG_VERSION(2,8)
400 #define ecb_decltype(x) __typeof__ (x)
401 #endif
402
403 #if _MSC_VER >= 1300
404 #define ecb_deprecated __declspec (deprecated)
405 #else
406 #define ecb_deprecated ecb_attribute ((__deprecated__))
407 #endif
408
409 #if _MSC_VER >= 1500
410 #define ecb_deprecated_message(msg) __declspec (deprecated (msg))
411 #elif ECB_GCC_VERSION(4,5)
412 #define ecb_deprecated_message(msg) ecb_attribute ((__deprecated__ (msg))
413 #else
414 #define ecb_deprecated_message(msg) ecb_deprecated
415 #endif
416
417 #if _MSC_VER >= 1400
418 #define ecb_noinline __declspec (noinline)
419 #else
420 #define ecb_noinline ecb_attribute ((__noinline__))
421 #endif
422
423 #define ecb_unused ecb_attribute ((__unused__))
424 #define ecb_const ecb_attribute ((__const__))
425 #define ecb_pure ecb_attribute ((__pure__))
426
427 #if ECB_C11 || __IBMC_NORETURN
428 /* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/language_ref/noreturn.html */
429 #define ecb_noreturn _Noreturn
430 #elif ECB_CPP11
431 #define ecb_noreturn [[noreturn]]
432 #elif _MSC_VER >= 1200
433 /* http://msdn.microsoft.com/en-us/library/k6ktzx3s.aspx */
434 #define ecb_noreturn __declspec (noreturn)
435 #else
436 #define ecb_noreturn ecb_attribute ((__noreturn__))
437 #endif
438
439 #if ECB_GCC_VERSION(4,3)
440 #define ecb_artificial ecb_attribute ((__artificial__))
441 #define ecb_hot ecb_attribute ((__hot__))
442 #define ecb_cold ecb_attribute ((__cold__))
443 #else
444 #define ecb_artificial
445 #define ecb_hot
446 #define ecb_cold
447 #endif
448
449 /* put around conditional expressions if you are very sure that the */
450 /* expression is mostly true or mostly false. note that these return */
451 /* booleans, not the expression. */
452 #define ecb_expect_false(expr) ecb_expect (!!(expr), 0)
453 #define ecb_expect_true(expr) ecb_expect (!!(expr), 1)
454 /* for compatibility to the rest of the world */
455 #define ecb_likely(expr) ecb_expect_true (expr)
456 #define ecb_unlikely(expr) ecb_expect_false (expr)
457
458 /* count trailing zero bits and count # of one bits */
459 #if ECB_GCC_VERSION(3,4) \
460 || (ECB_CLANG_BUILTIN(__builtin_clz) && ECB_CLANG_BUILTIN(__builtin_clzll) \
461 && ECB_CLANG_BUILTIN(__builtin_ctz) && ECB_CLANG_BUILTIN(__builtin_ctzll) \
462 && ECB_CLANG_BUILTIN(__builtin_popcount))
463 #define ecb_ctz32(x) __builtin_ctz (x)
464 #define ecb_ctz64(x) (__SIZEOF_LONG__ == 64 ? __builtin_ctzl (x) : __builtin_ctzll (x))
465 #define ecb_clz32(x) __builtin_clz (x)
466 #define ecb_clz64(x) (__SIZEOF_LONG__ == 64 ? __builtin_clzl (x) : __builtin_clzll (x))
467 #define ecb_ld32(x) (ecb_clz32 (x) ^ 31)
468 #define ecb_ld64(x) (ecb_clz64 (x) ^ 63)
469 #define ecb_popcount32(x) __builtin_popcount (x)
470 /* ecb_popcount64 is more difficult, see below */
471 #else
472 ecb_function_ ecb_const int ecb_ctz32 (uint32_t x);
473 ecb_function_ ecb_const int ecb_ctz32 (uint32_t x)
474 {
475 #if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM)
476 unsigned long r;
477 _BitScanForward (&r, x);
478 return (int)r;
479 #else
480 int r;
481
482 x &= ~x + 1; /* this isolates the lowest bit */
483
484 #if 1
485 /* David Seal's algorithm, Message-ID: <32975@armltd.uucp> from 1994 */
486 /* This happens to return 32 for x == 0, but the API does not support this */
487
488 /* -0 marks unused entries */
489 static unsigned char table[64] =
490 {
491 32, 0, 1, 12, 2, 6, -0, 13, 3, -0, 7, -0, -0, -0, -0, 14,
492 10, 4, -0, -0, 8, -0, -0, 25, -0, -0, -0, -0, -0, 21, 27, 15,
493 31, 11, 5, -0, -0, -0, -0, -0, 9, -0, -0, 24, -0, -0, 20, 26,
494 30, -0, -0, -0, -0, 23, -0, 19, 29, -0, 22, 18, 28, 17, 16, -0
495 };
496
497 /* magic constant results in 33 unique values in the upper 6 bits */
498 x *= 0x0450fbafU; /* == 17 * 65 * 65535 */
499
500 r = table [x >> 26];
501 #elif 0 /* branchless on i386, typically */
502 r = 0;
503 r += !!(x & 0xaaaaaaaa) << 0;
504 r += !!(x & 0xcccccccc) << 1;
505 r += !!(x & 0xf0f0f0f0) << 2;
506 r += !!(x & 0xff00ff00) << 3;
507 r += !!(x & 0xffff0000) << 4;
508 #else /* branchless on modern compilers, typically */
509 r = 0;
510 if (x & 0xaaaaaaaa) r += 1;
511 if (x & 0xcccccccc) r += 2;
512 if (x & 0xf0f0f0f0) r += 4;
513 if (x & 0xff00ff00) r += 8;
514 if (x & 0xffff0000) r += 16;
515 #endif
516
517 return r;
518 #endif
519 }
520
521 ecb_function_ ecb_const int ecb_ctz64 (uint64_t x);
522 ecb_function_ ecb_const int ecb_ctz64 (uint64_t x)
523 {
524 #if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM)
525 unsigned long r;
526 _BitScanForward64 (&r, x);
527 return (int)r;
528 #else
529 int shift = x & 0xffffffff ? 0 : 32;
530 return ecb_ctz32 (x >> shift) + shift;
531 #endif
532 }
533
534 ecb_function_ ecb_const int ecb_clz32 (uint32_t x);
535 ecb_function_ ecb_const int ecb_clz32 (uint32_t x)
536 {
537 #if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM)
538 unsigned long r;
539 _BitScanReverse (&r, x);
540 return 31 - (int)r;
541 #else
542
543 /* Robert Harley's algorithm from comp.arch 1996-12-07 */
544 /* This happens to return 32 for x == 0, but the API does not support this */
545
546 /* -0 marks unused table elements */
547 static unsigned char table[64] =
548 {
549 32, 31, -0, 16, -0, 30, 3, -0, 15, -0, -0, -0, 29, 10, 2, -0,
550 -0, -0, 12, 14, 21, -0, 19, -0, -0, 28, -0, 25, -0, 9, 1, -0,
551 17, -0, 4, -0, -0, -0, 11, -0, 13, 22, 20, -0, 26, -0, -0, 18,
552 5, -0, -0, 23, -0, 27, -0, 6, -0, 24, 7, -0, 8, -0, 0, -0
553 };
554
555 /* propagate leftmost 1 bit to the right */
556 x |= x >> 1;
557 x |= x >> 2;
558 x |= x >> 4;
559 x |= x >> 8;
560 x |= x >> 16;
561
562 /* magic constant results in 33 unique values in the upper 6 bits */
563 x *= 0x06EB14F9U; /* == 7 * 255 * 255 * 255 */
564
565 return table [x >> 26];
566 #endif
567 }
568
569 ecb_function_ ecb_const int ecb_clz64 (uint64_t x);
570 ecb_function_ ecb_const int ecb_clz64 (uint64_t x)
571 {
572 #if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM)
573 unsigned long r;
574 _BitScanReverse64 (&r, x);
575 return 63 - (int)r;
576 #else
577 uint32_t l = x >> 32;
578 int shift = l ? 0 : 32;
579 return ecb_clz32 (l ? l : x) + shift;
580 #endif
581 }
582
583 ecb_function_ ecb_const int ecb_popcount32 (uint32_t x);
584 ecb_function_ ecb_const int ecb_popcount32 (uint32_t x)
585 {
586 x -= (x >> 1) & 0x55555555;
587 x = ((x >> 2) & 0x33333333) + (x & 0x33333333);
588 x = ((x >> 4) + x) & 0x0f0f0f0f;
589 x *= 0x01010101;
590
591 return x >> 24;
592 }
593
594 ecb_function_ ecb_const int ecb_ld32 (uint32_t x);
595 ecb_function_ ecb_const int ecb_ld32 (uint32_t x)
596 {
597 #if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM)
598 unsigned long r;
599 _BitScanReverse (&r, x);
600 return (int)r;
601 #else
602 int r = 0;
603
604 if (x >> 16) { x >>= 16; r += 16; }
605 if (x >> 8) { x >>= 8; r += 8; }
606 if (x >> 4) { x >>= 4; r += 4; }
607 if (x >> 2) { x >>= 2; r += 2; }
608 if (x >> 1) { r += 1; }
609
610 return r;
611 #endif
612 }
613
614 ecb_function_ ecb_const int ecb_ld64 (uint64_t x);
615 ecb_function_ ecb_const int ecb_ld64 (uint64_t x)
616 {
617 #if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM)
618 unsigned long r;
619 _BitScanReverse64 (&r, x);
620 return (int)r;
621 #else
622 int r = 0;
623
624 if (x >> 32) { x >>= 32; r += 32; }
625
626 return r + ecb_ld32 (x);
627 #endif
628 }
629 #endif
630
631 ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x);
632 ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); }
633 ecb_function_ ecb_const ecb_bool ecb_is_pot64 (uint64_t x);
634 ecb_function_ ecb_const ecb_bool ecb_is_pot64 (uint64_t x) { return !(x & (x - 1)); }
635
636 ecb_function_ ecb_const uint8_t ecb_bitrev8 (uint8_t x);
637 ecb_function_ ecb_const uint8_t ecb_bitrev8 (uint8_t x)
638 {
639 return ( (x * 0x0802U & 0x22110U)
640 | (x * 0x8020U & 0x88440U)) * 0x10101U >> 16;
641 }
642
643 ecb_function_ ecb_const uint16_t ecb_bitrev16 (uint16_t x);
644 ecb_function_ ecb_const uint16_t ecb_bitrev16 (uint16_t x)
645 {
646 x = ((x >> 1) & 0x5555) | ((x & 0x5555) << 1);
647 x = ((x >> 2) & 0x3333) | ((x & 0x3333) << 2);
648 x = ((x >> 4) & 0x0f0f) | ((x & 0x0f0f) << 4);
649 x = ( x >> 8 ) | ( x << 8);
650
651 return x;
652 }
653
654 ecb_function_ ecb_const uint32_t ecb_bitrev32 (uint32_t x);
655 ecb_function_ ecb_const uint32_t ecb_bitrev32 (uint32_t x)
656 {
657 x = ((x >> 1) & 0x55555555) | ((x & 0x55555555) << 1);
658 x = ((x >> 2) & 0x33333333) | ((x & 0x33333333) << 2);
659 x = ((x >> 4) & 0x0f0f0f0f) | ((x & 0x0f0f0f0f) << 4);
660 x = ((x >> 8) & 0x00ff00ff) | ((x & 0x00ff00ff) << 8);
661 x = ( x >> 16 ) | ( x << 16);
662
663 return x;
664 }
665
666 ecb_function_ ecb_const int ecb_popcount64 (uint64_t x);
667 ecb_function_ ecb_const int ecb_popcount64 (uint64_t x)
668 {
669 /* popcount64 is only available on 64 bit cpus as gcc builtin. */
670 /* also, gcc/clang make this surprisingly difficult to use */
671 #if (__SIZEOF_LONG__ == 8) && (ECB_GCC_VERSION(3,4) || ECB_CLANG_BUILTIN (__builtin_popcountl))
672 return __builtin_popcountl (x);
673 #else
674 return ecb_popcount32 (x) + ecb_popcount32 (x >> 32);
675 #endif
676 }
677
678 ecb_inline uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> (-count & 7)) | (x << (count & 7)); }
679 ecb_inline uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << (-count & 7)) | (x >> (count & 7)); }
680 ecb_inline uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (-count & 15)) | (x << (count & 15)); }
681 ecb_inline uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (-count & 15)) | (x >> (count & 15)); }
682 ecb_inline uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (-count & 31)) | (x << (count & 31)); }
683 ecb_inline uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (-count & 31)) | (x >> (count & 31)); }
684 ecb_inline uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (-count & 63)) | (x << (count & 63)); }
685 ecb_inline uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (-count & 63)) | (x >> (count & 63)); }
686
687 #if ECB_CPP
688
689 inline uint8_t ecb_ctz (uint8_t v) { return ecb_ctz32 (v); }
690 inline uint16_t ecb_ctz (uint16_t v) { return ecb_ctz32 (v); }
691 inline uint32_t ecb_ctz (uint32_t v) { return ecb_ctz32 (v); }
692 inline uint64_t ecb_ctz (uint64_t v) { return ecb_ctz64 (v); }
693
694 inline bool ecb_is_pot (uint8_t v) { return ecb_is_pot32 (v); }
695 inline bool ecb_is_pot (uint16_t v) { return ecb_is_pot32 (v); }
696 inline bool ecb_is_pot (uint32_t v) { return ecb_is_pot32 (v); }
697 inline bool ecb_is_pot (uint64_t v) { return ecb_is_pot64 (v); }
698
699 inline int ecb_ld (uint8_t v) { return ecb_ld32 (v); }
700 inline int ecb_ld (uint16_t v) { return ecb_ld32 (v); }
701 inline int ecb_ld (uint32_t v) { return ecb_ld32 (v); }
702 inline int ecb_ld (uint64_t v) { return ecb_ld64 (v); }
703
704 inline int ecb_popcount (uint8_t v) { return ecb_popcount32 (v); }
705 inline int ecb_popcount (uint16_t v) { return ecb_popcount32 (v); }
706 inline int ecb_popcount (uint32_t v) { return ecb_popcount32 (v); }
707 inline int ecb_popcount (uint64_t v) { return ecb_popcount64 (v); }
708
709 inline uint8_t ecb_bitrev (uint8_t v) { return ecb_bitrev8 (v); }
710 inline uint16_t ecb_bitrev (uint16_t v) { return ecb_bitrev16 (v); }
711 inline uint32_t ecb_bitrev (uint32_t v) { return ecb_bitrev32 (v); }
712
713 inline uint8_t ecb_rotl (uint8_t v, unsigned int count) { return ecb_rotl8 (v, count); }
714 inline uint16_t ecb_rotl (uint16_t v, unsigned int count) { return ecb_rotl16 (v, count); }
715 inline uint32_t ecb_rotl (uint32_t v, unsigned int count) { return ecb_rotl32 (v, count); }
716 inline uint64_t ecb_rotl (uint64_t v, unsigned int count) { return ecb_rotl64 (v, count); }
717
718 inline uint8_t ecb_rotr (uint8_t v, unsigned int count) { return ecb_rotr8 (v, count); }
719 inline uint16_t ecb_rotr (uint16_t v, unsigned int count) { return ecb_rotr16 (v, count); }
720 inline uint32_t ecb_rotr (uint32_t v, unsigned int count) { return ecb_rotr32 (v, count); }
721 inline uint64_t ecb_rotr (uint64_t v, unsigned int count) { return ecb_rotr64 (v, count); }
722
723 #endif
724
725 #if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64))
726 #if ECB_GCC_VERSION(4,8) || ECB_CLANG_BUILTIN(__builtin_bswap16)
727 #define ecb_bswap16(x) __builtin_bswap16 (x)
728 #else
729 #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16)
730 #endif
731 #define ecb_bswap32(x) __builtin_bswap32 (x)
732 #define ecb_bswap64(x) __builtin_bswap64 (x)
733 #elif _MSC_VER
734 #include <stdlib.h>
735 #define ecb_bswap16(x) ((uint16_t)_byteswap_ushort ((uint16_t)(x)))
736 #define ecb_bswap32(x) ((uint32_t)_byteswap_ulong ((uint32_t)(x)))
737 #define ecb_bswap64(x) ((uint64_t)_byteswap_uint64 ((uint64_t)(x)))
738 #else
739 ecb_function_ ecb_const uint16_t ecb_bswap16 (uint16_t x);
740 ecb_function_ ecb_const uint16_t ecb_bswap16 (uint16_t x)
741 {
742 return ecb_rotl16 (x, 8);
743 }
744
745 ecb_function_ ecb_const uint32_t ecb_bswap32 (uint32_t x);
746 ecb_function_ ecb_const uint32_t ecb_bswap32 (uint32_t x)
747 {
748 return (((uint32_t)ecb_bswap16 (x)) << 16) | ecb_bswap16 (x >> 16);
749 }
750
751 ecb_function_ ecb_const uint64_t ecb_bswap64 (uint64_t x);
752 ecb_function_ ecb_const uint64_t ecb_bswap64 (uint64_t x)
753 {
754 return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32);
755 }
756 #endif
757
758 #if ECB_GCC_VERSION(4,5) || ECB_CLANG_BUILTIN(__builtin_unreachable)
759 #define ecb_unreachable() __builtin_unreachable ()
760 #else
761 /* this seems to work fine, but gcc always emits a warning for it :/ */
762 ecb_inline ecb_noreturn void ecb_unreachable (void);
763 ecb_inline ecb_noreturn void ecb_unreachable (void) { }
764 #endif
765
766 /* try to tell the compiler that some condition is definitely true */
767 #define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0
768
769 ecb_inline uint32_t ecb_byteorder_helper (void);
770 ecb_inline uint32_t ecb_byteorder_helper (void)
771 {
772 /* the union code still generates code under pressure in gcc, */
773 /* but less than using pointers, and always seems to */
774 /* successfully return a constant. */
775 /* the reason why we have this horrible preprocessor mess */
776 /* is to avoid it in all cases, at least on common architectures */
777 /* or when using a recent enough gcc version (>= 4.6) */
778 #if (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \
779 || ((__i386 || __i386__ || _M_IX86 || ECB_GCC_AMD64 || ECB_MSVC_AMD64) && !__VOS__)
780 #define ECB_LITTLE_ENDIAN 1
781 return 0x44332211;
782 #elif (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) \
783 || ((__AARCH64EB__ || __MIPSEB__ || __ARMEB__) && !__VOS__)
784 #define ECB_BIG_ENDIAN 1
785 return 0x11223344;
786 #else
787 union
788 {
789 uint8_t c[4];
790 uint32_t u;
791 } u = { 0x11, 0x22, 0x33, 0x44 };
792 return u.u;
793 #endif
794 }
795
796 ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11223344; }
797 ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44332211; }
798
799 /*****************************************************************************/
800 /* unaligned load/store */
801
802 ecb_inline uint_fast16_t ecb_be_u16_to_host (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; }
803 ecb_inline uint_fast32_t ecb_be_u32_to_host (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; }
804 ecb_inline uint_fast64_t ecb_be_u64_to_host (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; }
805
806 ecb_inline uint_fast16_t ecb_le_u16_to_host (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; }
807 ecb_inline uint_fast32_t ecb_le_u32_to_host (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; }
808 ecb_inline uint_fast64_t ecb_le_u64_to_host (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; }
809
810 ecb_inline uint_fast16_t ecb_peek_u16_u (const void *ptr) { uint16_t v; memcpy (&v, ptr, sizeof (v)); return v; }
811 ecb_inline uint_fast32_t ecb_peek_u32_u (const void *ptr) { uint32_t v; memcpy (&v, ptr, sizeof (v)); return v; }
812 ecb_inline uint_fast64_t ecb_peek_u64_u (const void *ptr) { uint64_t v; memcpy (&v, ptr, sizeof (v)); return v; }
813
814 ecb_inline uint_fast16_t ecb_peek_be_u16_u (const void *ptr) { return ecb_be_u16_to_host (ecb_peek_u16_u (ptr)); }
815 ecb_inline uint_fast32_t ecb_peek_be_u32_u (const void *ptr) { return ecb_be_u32_to_host (ecb_peek_u32_u (ptr)); }
816 ecb_inline uint_fast64_t ecb_peek_be_u64_u (const void *ptr) { return ecb_be_u64_to_host (ecb_peek_u64_u (ptr)); }
817
818 ecb_inline uint_fast16_t ecb_peek_le_u16_u (const void *ptr) { return ecb_le_u16_to_host (ecb_peek_u16_u (ptr)); }
819 ecb_inline uint_fast32_t ecb_peek_le_u32_u (const void *ptr) { return ecb_le_u32_to_host (ecb_peek_u32_u (ptr)); }
820 ecb_inline uint_fast64_t ecb_peek_le_u64_u (const void *ptr) { return ecb_le_u64_to_host (ecb_peek_u64_u (ptr)); }
821
822 ecb_inline uint_fast16_t ecb_host_to_be_u16 (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; }
823 ecb_inline uint_fast32_t ecb_host_to_be_u32 (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; }
824 ecb_inline uint_fast64_t ecb_host_to_be_u64 (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; }
825
826 ecb_inline uint_fast16_t ecb_host_to_le_u16 (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; }
827 ecb_inline uint_fast32_t ecb_host_to_le_u32 (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; }
828 ecb_inline uint_fast64_t ecb_host_to_le_u64 (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; }
829
830 ecb_inline void ecb_poke_u16_u (void *ptr, uint16_t v) { memcpy (ptr, &v, sizeof (v)); }
831 ecb_inline void ecb_poke_u32_u (void *ptr, uint32_t v) { memcpy (ptr, &v, sizeof (v)); }
832 ecb_inline void ecb_poke_u64_u (void *ptr, uint64_t v) { memcpy (ptr, &v, sizeof (v)); }
833
834 ecb_inline void ecb_poke_be_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_be_u16 (v)); }
835 ecb_inline void ecb_poke_be_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_be_u32 (v)); }
836 ecb_inline void ecb_poke_be_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_be_u64 (v)); }
837
838 ecb_inline void ecb_poke_le_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_le_u16 (v)); }
839 ecb_inline void ecb_poke_le_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_le_u32 (v)); }
840 ecb_inline void ecb_poke_le_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_le_u64 (v)); }
841
842 #if ECB_CPP
843
844 inline uint8_t ecb_bswap (uint8_t v) { return v; }
845 inline uint16_t ecb_bswap (uint16_t v) { return ecb_bswap16 (v); }
846 inline uint32_t ecb_bswap (uint32_t v) { return ecb_bswap32 (v); }
847 inline uint64_t ecb_bswap (uint64_t v) { return ecb_bswap64 (v); }
848
849 template<typename T> inline T ecb_be_to_host (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; }
850 template<typename T> inline T ecb_le_to_host (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; }
851 template<typename T> inline T ecb_peek (const void *ptr) { return *(const T *)ptr; }
852 template<typename T> inline T ecb_peek_be (const void *ptr) { return ecb_be_to_host (ecb_peek <T> (ptr)); }
853 template<typename T> inline T ecb_peek_le (const void *ptr) { return ecb_le_to_host (ecb_peek <T> (ptr)); }
854 template<typename T> inline T ecb_peek_u (const void *ptr) { T v; memcpy (&v, ptr, sizeof (v)); return v; }
855 template<typename T> inline T ecb_peek_be_u (const void *ptr) { return ecb_be_to_host (ecb_peek_u<T> (ptr)); }
856 template<typename T> inline T ecb_peek_le_u (const void *ptr) { return ecb_le_to_host (ecb_peek_u<T> (ptr)); }
857
858 template<typename T> inline T ecb_host_to_be (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; }
859 template<typename T> inline T ecb_host_to_le (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; }
860 template<typename T> inline void ecb_poke (void *ptr, T v) { *(T *)ptr = v; }
861 template<typename T> inline void ecb_poke_be (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_be (v)); }
862 template<typename T> inline void ecb_poke_le (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_le (v)); }
863 template<typename T> inline void ecb_poke_u (void *ptr, T v) { memcpy (ptr, &v, sizeof (v)); }
864 template<typename T> inline void ecb_poke_be_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_be (v)); }
865 template<typename T> inline void ecb_poke_le_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_le (v)); }
866
867 #endif
868
869 /*****************************************************************************/
870 /* pointer/integer hashing */
871
872 /* based on hash by Chris Wellons, https://nullprogram.com/blog/2018/07/31/ */
873 ecb_function_ ecb_const uint32_t ecb_mix32 (uint32_t v);
874 ecb_function_ ecb_const uint32_t ecb_mix32 (uint32_t v)
875 {
876 v ^= v >> 16; v *= 0x7feb352dU;
877 v ^= v >> 15; v *= 0x846ca68bU;
878 v ^= v >> 16;
879 return v;
880 }
881
882 ecb_function_ ecb_const uint32_t ecb_unmix32 (uint32_t v);
883 ecb_function_ ecb_const uint32_t ecb_unmix32 (uint32_t v)
884 {
885 v ^= v >> 16 ; v *= 0x43021123U;
886 v ^= v >> 15 ^ v >> 30; v *= 0x1d69e2a5U;
887 v ^= v >> 16 ;
888 return v;
889 }
890
891 /* based on splitmix64, by Sebastiona Vigna, https://prng.di.unimi.it/splitmix64.c */
892 ecb_function_ ecb_const uint64_t ecb_mix64 (uint64_t v);
893 ecb_function_ ecb_const uint64_t ecb_mix64 (uint64_t v)
894 {
895 v ^= v >> 30; v *= 0xbf58476d1ce4e5b9U;
896 v ^= v >> 27; v *= 0x94d049bb133111ebU;
897 v ^= v >> 31;
898 return v;
899 }
900
901 ecb_function_ ecb_const uint64_t ecb_unmix64 (uint64_t v);
902 ecb_function_ ecb_const uint64_t ecb_unmix64 (uint64_t v)
903 {
904 v ^= v >> 31 ^ v >> 62; v *= 0x319642b2d24d8ec3U;
905 v ^= v >> 27 ^ v >> 54; v *= 0x96de1b173f119089U;
906 v ^= v >> 30 ^ v >> 60;
907 return v;
908 }
909
910 ecb_function_ ecb_const uintptr_t ecb_ptrmix (void *p);
911 ecb_function_ ecb_const uintptr_t ecb_ptrmix (void *p)
912 {
913 #if ECB_PTRSIZE <= 4
914 return ecb_mix32 ((uint32_t)p);
915 #else
916 return ecb_mix64 ((uint64_t)p);
917 #endif
918 }
919
920 ecb_function_ ecb_const void *ecb_ptrunmix (uintptr_t v);
921 ecb_function_ ecb_const void *ecb_ptrunmix (uintptr_t v)
922 {
923 #if ECB_PTRSIZE <= 4
924 return (void *)ecb_unmix32 (v);
925 #else
926 return (void *)ecb_unmix64 (v);
927 #endif
928 }
929
930 #if ECB_CPP
931
932 template<typename T>
933 inline uintptr_t ecb_ptrmix (T *p)
934 {
935 return ecb_ptrmix (static_cast<void *>(p));
936 }
937
938 template<typename T>
939 inline T *ecb_ptrunmix (uintptr_t v)
940 {
941 return static_cast<T *>(ecb_ptrunmix (v));
942 }
943
944 #endif
945
946 /*****************************************************************************/
947 /* gray code */
948
949 ecb_inline uint_fast8_t ecb_gray_encode8 (uint_fast8_t b) { return b ^ (b >> 1); }
950 ecb_inline uint_fast16_t ecb_gray_encode16 (uint_fast16_t b) { return b ^ (b >> 1); }
951 ecb_inline uint_fast32_t ecb_gray_encode32 (uint_fast32_t b) { return b ^ (b >> 1); }
952 ecb_inline uint_fast64_t ecb_gray_encode64 (uint_fast64_t b) { return b ^ (b >> 1); }
953
954 ecb_function_ ecb_const uint8_t ecb_gray_decode8 (uint8_t g);
955 ecb_function_ ecb_const uint8_t ecb_gray_decode8 (uint8_t g)
956 {
957 g ^= g >> 1;
958 g ^= g >> 2;
959 g ^= g >> 4;
960
961 return g;
962 }
963
964 ecb_function_ ecb_const uint16_t ecb_gray_decode16 (uint16_t g);
965 ecb_function_ ecb_const uint16_t ecb_gray_decode16 (uint16_t g)
966 {
967 g ^= g >> 1;
968 g ^= g >> 2;
969 g ^= g >> 4;
970 g ^= g >> 8;
971
972 return g;
973 }
974
975 ecb_function_ ecb_const uint32_t ecb_gray_decode32 (uint32_t g);
976 ecb_function_ ecb_const uint32_t ecb_gray_decode32 (uint32_t g)
977 {
978 g ^= g >> 1;
979 g ^= g >> 2;
980 g ^= g >> 4;
981 g ^= g >> 8;
982 g ^= g >> 16;
983
984 return g;
985 }
986
987 ecb_function_ ecb_const uint64_t ecb_gray_decode64 (uint64_t g);
988 ecb_function_ ecb_const uint64_t ecb_gray_decode64 (uint64_t g)
989 {
990 g ^= g >> 1;
991 g ^= g >> 2;
992 g ^= g >> 4;
993 g ^= g >> 8;
994 g ^= g >> 16;
995 g ^= g >> 32;
996
997 return g;
998 }
999
1000 #if ECB_CPP
1001
1002 ecb_inline uint8_t ecb_gray_encode (uint8_t b) { return ecb_gray_encode8 (b); }
1003 ecb_inline uint16_t ecb_gray_encode (uint16_t b) { return ecb_gray_encode16 (b); }
1004 ecb_inline uint32_t ecb_gray_encode (uint32_t b) { return ecb_gray_encode32 (b); }
1005 ecb_inline uint64_t ecb_gray_encode (uint64_t b) { return ecb_gray_encode64 (b); }
1006
1007 ecb_inline uint8_t ecb_gray_decode (uint8_t g) { return ecb_gray_decode8 (g); }
1008 ecb_inline uint16_t ecb_gray_decode (uint16_t g) { return ecb_gray_decode16 (g); }
1009 ecb_inline uint32_t ecb_gray_decode (uint32_t g) { return ecb_gray_decode32 (g); }
1010 ecb_inline uint64_t ecb_gray_decode (uint64_t g) { return ecb_gray_decode64 (g); }
1011
1012 #endif
1013
1014 /*****************************************************************************/
1015 /* 2d hilbert curves */
1016
1017 /* algorithm from the book Hacker's Delight, modified to not */
1018 /* run into undefined behaviour for n==16 */
1019 ecb_function_ ecb_const uint32_t ecb_hilbert2d_index_to_coord32 (int n, uint32_t s);
1020 ecb_function_ ecb_const uint32_t ecb_hilbert2d_index_to_coord32 (int n, uint32_t s)
1021 {
1022 uint32_t comp, swap, cs, t, sr;
1023
1024 /* pad s on the left (unused) bits with 01 (no change groups) */
1025 s |= 0x55555555U << n << n;
1026 /* "s shift right" */
1027 sr = (s >> 1) & 0x55555555U;
1028 /* compute complement and swap info in two-bit groups */
1029 cs = ((s & 0x55555555U) + sr) ^ 0x55555555U;
1030
1031 /* parallel prefix xor op to propagate both complement
1032 * and swap info together from left to right (there is
1033 * no step "cs ^= cs >> 1", so in effect it computes
1034 * two independent parallel prefix operations on two
1035 * interleaved sets of sixteen bits).
1036 */
1037 cs ^= cs >> 2;
1038 cs ^= cs >> 4;
1039 cs ^= cs >> 8;
1040 cs ^= cs >> 16;
1041
1042 /* separate swap and complement bits */
1043 swap = cs & 0x55555555U;
1044 comp = (cs >> 1) & 0x55555555U;
1045
1046 /* calculate coordinates in odd and even bit positions */
1047 t = (s & swap) ^ comp;
1048 s = s ^ sr ^ t ^ (t << 1);
1049
1050 /* unpad/clear out any junk on the left */
1051 s = s & ((1 << n << n) - 1);
1052
1053 /* Now "unshuffle" to separate the x and y bits. */
1054 t = (s ^ (s >> 1)) & 0x22222222U; s ^= t ^ (t << 1);
1055 t = (s ^ (s >> 2)) & 0x0c0c0c0cU; s ^= t ^ (t << 2);
1056 t = (s ^ (s >> 4)) & 0x00f000f0U; s ^= t ^ (t << 4);
1057 t = (s ^ (s >> 8)) & 0x0000ff00U; s ^= t ^ (t << 8);
1058
1059 /* now s contains two 16-bit coordinates */
1060 return s;
1061 }
1062
1063 /* 64 bit, a straightforward extension to the 32 bit case */
1064 ecb_function_ ecb_const uint64_t ecb_hilbert2d_index_to_coord64 (int n, uint64_t s);
1065 ecb_function_ ecb_const uint64_t ecb_hilbert2d_index_to_coord64 (int n, uint64_t s)
1066 {
1067 uint64_t comp, swap, cs, t, sr;
1068
1069 /* pad s on the left (unused) bits with 01 (no change groups) */
1070 s |= 0x5555555555555555U << n << n;
1071 /* "s shift right" */
1072 sr = (s >> 1) & 0x5555555555555555U;
1073 /* compute complement and swap info in two-bit groups */
1074 cs = ((s & 0x5555555555555555U) + sr) ^ 0x5555555555555555U;
1075
1076 /* parallel prefix xor op to propagate both complement
1077 * and swap info together from left to right (there is
1078 * no step "cs ^= cs >> 1", so in effect it computes
1079 * two independent parallel prefix operations on two
1080 * interleaved sets of thirty-two bits).
1081 */
1082 cs ^= cs >> 2;
1083 cs ^= cs >> 4;
1084 cs ^= cs >> 8;
1085 cs ^= cs >> 16;
1086 cs ^= cs >> 32;
1087
1088 /* separate swap and complement bits */
1089 swap = cs & 0x5555555555555555U;
1090 comp = (cs >> 1) & 0x5555555555555555U;
1091
1092 /* calculate coordinates in odd and even bit positions */
1093 t = (s & swap) ^ comp;
1094 s = s ^ sr ^ t ^ (t << 1);
1095
1096 /* unpad/clear out any junk on the left */
1097 s = s & ((1 << n << n) - 1);
1098
1099 /* Now "unshuffle" to separate the x and y bits. */
1100 t = (s ^ (s >> 1)) & 0x2222222222222222U; s ^= t ^ (t << 1);
1101 t = (s ^ (s >> 2)) & 0x0c0c0c0c0c0c0c0cU; s ^= t ^ (t << 2);
1102 t = (s ^ (s >> 4)) & 0x00f000f000f000f0U; s ^= t ^ (t << 4);
1103 t = (s ^ (s >> 8)) & 0x0000ff000000ff00U; s ^= t ^ (t << 8);
1104 t = (s ^ (s >> 16)) & 0x00000000ffff0000U; s ^= t ^ (t << 16);
1105
1106 /* now s contains two 32-bit coordinates */
1107 return s;
1108 }
1109
1110 /* algorithm from the book Hacker's Delight, but a similar algorithm*/
1111 /* is given in https://doi.org/10.1002/spe.4380160103 */
1112 /* this has been slightly improved over the original version */
1113 ecb_function_ ecb_const uint32_t ecb_hilbert2d_coord_to_index32 (int n, uint32_t xy);
1114 ecb_function_ ecb_const uint32_t ecb_hilbert2d_coord_to_index32 (int n, uint32_t xy)
1115 {
1116 uint32_t row;
1117 uint32_t state = 0;
1118 uint32_t s = 0;
1119
1120 do
1121 {
1122 --n;
1123
1124 row = 4 * state
1125 | (2 & (xy >> n >> 15))
1126 | (1 & (xy >> n ));
1127
1128 /* these funky constants are lookup tables for two-bit values */
1129 s = (s << 2) | (0x361e9cb4U >> 2 * row) & 3;
1130 state = (0x8fe65831U >> 2 * row) & 3;
1131 }
1132 while (n > 0);
1133
1134 return s;
1135 }
1136
1137 /* 64 bit, essentially the same as 32 bit */
1138 ecb_function_ ecb_const uint64_t ecb_hilbert2d_coord_to_index64 (int n, uint64_t xy);
1139 ecb_function_ ecb_const uint64_t ecb_hilbert2d_coord_to_index64 (int n, uint64_t xy)
1140 {
1141 uint32_t row;
1142 uint32_t state = 0;
1143 uint64_t s = 0;
1144
1145 do
1146 {
1147 --n;
1148
1149 row = 4 * state
1150 | (2 & (xy >> n >> 31))
1151 | (1 & (xy >> n ));
1152
1153 /* these funky constants are lookup tables for two-bit values */
1154 s = (s << 2) | (0x361e9cb4U >> 2 * row) & 3;
1155 state = (0x8fe65831U >> 2 * row) & 3;
1156 }
1157 while (n > 0);
1158
1159 return s;
1160 }
1161
1162 /*****************************************************************************/
1163 /* division */
1164
1165 #if ECB_GCC_VERSION(3,0) || ECB_C99
1166 /* C99 tightened the definition of %, so we can use a more efficient version */
1167 #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0))
1168 #else
1169 #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n)))
1170 #endif
1171
1172 #if ECB_CPP
1173 template<typename T>
1174 static inline T ecb_div_rd (T val, T div)
1175 {
1176 return val < 0 ? - ((-val + div - 1) / div) : (val ) / div;
1177 }
1178 template<typename T>
1179 static inline T ecb_div_ru (T val, T div)
1180 {
1181 return val < 0 ? - ((-val ) / div) : (val + div - 1) / div;
1182 }
1183 #else
1184 #define ecb_div_rd(val,div) ((val) < 0 ? - ((-(val) + (div) - 1) / (div)) : ((val) ) / (div))
1185 #define ecb_div_ru(val,div) ((val) < 0 ? - ((-(val) ) / (div)) : ((val) + (div) - 1) / (div))
1186 #endif
1187
1188 /*****************************************************************************/
1189 /* array length */
1190
1191 #if ecb_cplusplus_does_not_suck
1192 /* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */
1193 template<typename T, int N>
1194 static inline int ecb_array_length (const T (&arr)[N])
1195 {
1196 return N;
1197 }
1198 #else
1199 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0]))
1200 #endif
1201
1202 /*****************************************************************************/
1203 /* IEEE 754-2008 half float conversions */
1204
1205 ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x);
1206 ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x)
1207 {
1208 unsigned int s = (x & 0x8000) << (31 - 15);
1209 int e = (x >> 10) & 0x001f;
1210 unsigned int m = x & 0x03ff;
1211
1212 if (ecb_expect_false (e == 31))
1213 /* infinity or NaN */
1214 e = 255 - (127 - 15);
1215 else if (ecb_expect_false (!e))
1216 {
1217 if (ecb_expect_true (!m))
1218 /* zero, handled by code below by forcing e to 0 */
1219 e = 0 - (127 - 15);
1220 else
1221 {
1222 /* subnormal, renormalise */
1223 unsigned int s = 10 - ecb_ld32 (m);
1224
1225 m = (m << s) & 0x3ff; /* mask implicit bit */
1226 e -= s - 1;
1227 }
1228 }
1229
1230 /* e and m now are normalised, or zero, (or inf or nan) */
1231 e += 127 - 15;
1232
1233 return s | (e << 23) | (m << (23 - 10));
1234 }
1235
1236 ecb_function_ ecb_const uint16_t ecb_binary32_to_binary16 (uint32_t x);
1237 ecb_function_ ecb_const uint16_t ecb_binary32_to_binary16 (uint32_t x)
1238 {
1239 unsigned int s = (x >> 16) & 0x00008000; /* sign bit, the easy part */
1240 int e = ((x >> 23) & 0x000000ff) - (127 - 15); /* the desired exponent */
1241 unsigned int m = x & 0x007fffff;
1242
1243 x &= 0x7fffffff;
1244
1245 /* if it's within range of binary16 normals, use fast path */
1246 if (ecb_expect_true (0x38800000 <= x && x <= 0x477fefff))
1247 {
1248 /* mantissa round-to-even */
1249 m += 0x00000fff + ((m >> (23 - 10)) & 1);
1250
1251 /* handle overflow */
1252 if (ecb_expect_false (m >= 0x00800000))
1253 {
1254 m >>= 1;
1255 e += 1;
1256 }
1257
1258 return s | (e << 10) | (m >> (23 - 10));
1259 }
1260
1261 /* handle large numbers and infinity */
1262 if (ecb_expect_true (0x477fefff < x && x <= 0x7f800000))
1263 return s | 0x7c00;
1264
1265 /* handle zero, subnormals and small numbers */
1266 if (ecb_expect_true (x < 0x38800000))
1267 {
1268 /* zero */
1269 if (ecb_expect_true (!x))
1270 return s;
1271
1272 /* handle subnormals */
1273
1274 /* too small, will be zero */
1275 if (e < (14 - 24)) /* might not be sharp, but is good enough */
1276 return s;
1277
1278 m |= 0x00800000; /* make implicit bit explicit */
1279
1280 /* very tricky - we need to round to the nearest e (+10) bit value */
1281 {
1282 unsigned int bits = 14 - e;
1283 unsigned int half = (1 << (bits - 1)) - 1;
1284 unsigned int even = (m >> bits) & 1;
1285
1286 /* if this overflows, we will end up with a normalised number */
1287 m = (m + half + even) >> bits;
1288 }
1289
1290 return s | m;
1291 }
1292
1293 /* handle NaNs, preserve leftmost nan bits, but make sure we don't turn them into infinities */
1294 m >>= 13;
1295
1296 return s | 0x7c00 | m | !m;
1297 }
1298
1299 /*******************************************************************************/
1300 /* fast integer to ascii */
1301
1302 /*
1303 * This code is pretty complicated because it is general. The idea behind it,
1304 * however, is pretty simple: first, the number is multiplied with a scaling
1305 * factor (2**bits / 10**(digits-1)) to convert the integer into a fixed-point
1306 * number with the first digit in the upper bits.
1307 * Then this digit is converted to text and masked out. The resulting number
1308 * is then multiplied by 10, by multiplying the fixed point representation
1309 * by 5 and shifting the (binary) decimal point one to the right, so a 4.28
1310 * format becomes 5.27, 6.26 and so on.
1311 * The rest involves only advancing the pointer if we already generated a
1312 * non-zero digit, so leading zeroes are overwritten.
1313 */
1314
1315 /* simply return a mask with "bits" bits set */
1316 #define ecb_i2a_mask(type,bits) ((((type)1) << (bits)) - 1)
1317
1318 /* oputput a single digit. maskvalue is 10**digitidx */
1319 #define ecb_i2a_digit(type,bits,digitmask,maskvalue,digitidx) \
1320 if (digitmask >= maskvalue) /* constant, used to decide how many digits to generate */ \
1321 { \
1322 char digit = x >> (bits - digitidx); /* calculate the topmost digit */ \
1323 *ptr = digit + '0'; /* output it */ \
1324 nz = (digitmask == maskvalue) || nz || digit; /* first term == always output last digit */ \
1325 ptr += nz; /* output digit only if non-zero digit seen */ \
1326 x = (x & ecb_i2a_mask (type, bits - digitidx)) * 5; /* *10, but shift decimal point right */ \
1327 }
1328
1329 /* convert integer to fixed point format and multiply out digits, highest first */
1330 /* requires magic constants: max. digits and number of bits after the decimal point */
1331 #define ecb_i2a_def(suffix,ptr,v,type,bits,digitmask,lz) \
1332 ecb_inline char *ecb_i2a_ ## suffix (char *ptr, uint32_t u) \
1333 { \
1334 char nz = lz; /* non-zero digit seen? */ \
1335 /* convert to x.bits fixed-point */ \
1336 type x = u * ((ecb_i2a_mask (type, bits) + digitmask) / digitmask); \
1337 /* output up to 10 digits */ \
1338 ecb_i2a_digit (type,bits,digitmask, 1, 0); \
1339 ecb_i2a_digit (type,bits,digitmask, 10, 1); \
1340 ecb_i2a_digit (type,bits,digitmask, 100, 2); \
1341 ecb_i2a_digit (type,bits,digitmask, 1000, 3); \
1342 ecb_i2a_digit (type,bits,digitmask, 10000, 4); \
1343 ecb_i2a_digit (type,bits,digitmask, 100000, 5); \
1344 ecb_i2a_digit (type,bits,digitmask, 1000000, 6); \
1345 ecb_i2a_digit (type,bits,digitmask, 10000000, 7); \
1346 ecb_i2a_digit (type,bits,digitmask, 100000000, 8); \
1347 ecb_i2a_digit (type,bits,digitmask, 1000000000, 9); \
1348 return ptr; \
1349 }
1350
1351 /* predefined versions of the above, for various digits */
1352 /* ecb_i2a_xN = almost N digits, limit defined by macro */
1353 /* ecb_i2a_N = up to N digits, leading zeroes suppressed */
1354 /* ecb_i2a_0N = exactly N digits, including leading zeroes */
1355
1356 /* non-leading-zero versions, limited range */
1357 #define ECB_I2A_MAX_X5 59074 /* limit for ecb_i2a_x5 */
1358 #define ECB_I2A_MAX_X10 2932500665 /* limit for ecb_i2a_x10 */
1359 ecb_i2a_def ( x5, ptr, v, uint32_t, 26, 10000, 0)
1360 ecb_i2a_def (x10, ptr, v, uint64_t, 60, 1000000000, 0)
1361
1362 /* non-leading zero versions, all digits, 4 and 9 are optimal for 32/64 bit */
1363 ecb_i2a_def ( 2, ptr, v, uint32_t, 10, 10, 0)
1364 ecb_i2a_def ( 3, ptr, v, uint32_t, 12, 100, 0)
1365 ecb_i2a_def ( 4, ptr, v, uint32_t, 26, 1000, 0)
1366 ecb_i2a_def ( 5, ptr, v, uint64_t, 30, 10000, 0)
1367 ecb_i2a_def ( 6, ptr, v, uint64_t, 36, 100000, 0)
1368 ecb_i2a_def ( 7, ptr, v, uint64_t, 44, 1000000, 0)
1369 ecb_i2a_def ( 8, ptr, v, uint64_t, 50, 10000000, 0)
1370 ecb_i2a_def ( 9, ptr, v, uint64_t, 56, 100000000, 0)
1371
1372 /* leading-zero versions, all digits, 04 and 09 are optimal for 32/64 bit */
1373 ecb_i2a_def (02, ptr, v, uint32_t, 10, 10, 1)
1374 ecb_i2a_def (03, ptr, v, uint32_t, 12, 100, 1)
1375 ecb_i2a_def (04, ptr, v, uint32_t, 26, 1000, 1)
1376 ecb_i2a_def (05, ptr, v, uint64_t, 30, 10000, 1)
1377 ecb_i2a_def (06, ptr, v, uint64_t, 36, 100000, 1)
1378 ecb_i2a_def (07, ptr, v, uint64_t, 44, 1000000, 1)
1379 ecb_i2a_def (08, ptr, v, uint64_t, 50, 10000000, 1)
1380 ecb_i2a_def (09, ptr, v, uint64_t, 56, 100000000, 1)
1381
1382 #define ECB_I2A_I32_DIGITS 11
1383 #define ECB_I2A_U32_DIGITS 10
1384 #define ECB_I2A_I64_DIGITS 20
1385 #define ECB_I2A_U64_DIGITS 21
1386 #define ECB_I2A_MAX_DIGITS 21
1387
1388 ecb_function_ char * ecb_i2a_u32 (char *ptr, uint32_t u);
1389 ecb_function_ char * ecb_i2a_u32 (char *ptr, uint32_t u)
1390 {
1391 #if ECB_64BIT_NATIVE
1392 if (ecb_expect_true (u <= ECB_I2A_MAX_X10))
1393 ptr = ecb_i2a_x10 (ptr, u);
1394 else /* x10 almost, but not fully, covers 32 bit */
1395 {
1396 uint32_t u1 = u % 1000000000;
1397 uint32_t u2 = u / 1000000000;
1398
1399 *ptr++ = u2 + '0';
1400 ptr = ecb_i2a_09 (ptr, u1);
1401 }
1402 #else
1403 if (ecb_expect_true (u <= ECB_I2A_MAX_X5))
1404 ecb_i2a_x5 (ptr, u);
1405 else if (ecb_expect_true (u <= ECB_I2A_MAX_X5 * 10000))
1406 {
1407 uint32_t u1 = u % 10000;
1408 uint32_t u2 = u / 10000;
1409
1410 ptr = ecb_i2a_x5 (ptr, u2);
1411 ptr = ecb_i2a_04 (ptr, u1);
1412 }
1413 else
1414 {
1415 uint32_t u1 = u % 10000;
1416 uint32_t ua = u / 10000;
1417 uint32_t u2 = ua % 10000;
1418 uint32_t u3 = ua / 10000;
1419
1420 ptr = ecb_i2a_2 (ptr, u3);
1421 ptr = ecb_i2a_04 (ptr, u2);
1422 ptr = ecb_i2a_04 (ptr, u1);
1423 }
1424 #endif
1425
1426 return ptr;
1427 }
1428
1429 ecb_function_ char * ecb_i2a_i32 (char *ptr, int32_t v);
1430 ecb_function_ char * ecb_i2a_i32 (char *ptr, int32_t v)
1431 {
1432 *ptr = '-'; ptr += v < 0;
1433 uint32_t u = v < 0 ? -(uint32_t)v : v;
1434
1435 #if ECB_64BIT_NATIVE
1436 ptr = ecb_i2a_x10 (ptr, u); /* x10 fully covers 31 bit */
1437 #else
1438 ptr = ecb_i2a_u32 (ptr, u);
1439 #endif
1440
1441 return ptr;
1442 }
1443
1444 ecb_function_ char * ecb_i2a_u64 (char *ptr, uint64_t u);
1445 ecb_function_ char * ecb_i2a_u64 (char *ptr, uint64_t u)
1446 {
1447 #if ECB_64BIT_NATIVE
1448 if (ecb_expect_true (u <= ECB_I2A_MAX_X10))
1449 ptr = ecb_i2a_x10 (ptr, u);
1450 else if (ecb_expect_false (u <= ECB_I2A_MAX_X10 * 1000000000))
1451 {
1452 uint64_t u1 = u % 1000000000;
1453 uint64_t u2 = u / 1000000000;
1454
1455 ptr = ecb_i2a_x10 (ptr, u2);
1456 ptr = ecb_i2a_09 (ptr, u1);
1457 }
1458 else
1459 {
1460 uint64_t u1 = u % 1000000000;
1461 uint64_t ua = u / 1000000000;
1462 uint64_t u2 = ua % 1000000000;
1463 uint64_t u3 = ua / 1000000000;
1464
1465 ptr = ecb_i2a_2 (ptr, u3);
1466 ptr = ecb_i2a_09 (ptr, u2);
1467 ptr = ecb_i2a_09 (ptr, u1);
1468 }
1469 #else
1470 if (ecb_expect_true (u <= ECB_I2A_MAX_X5))
1471 ptr = ecb_i2a_x5 (ptr, u);
1472 else
1473 {
1474 uint64_t u1 = u % 10000;
1475 uint64_t u2 = u / 10000;
1476
1477 ptr = ecb_i2a_u64 (ptr, u2);
1478 ptr = ecb_i2a_04 (ptr, u1);
1479 }
1480 #endif
1481
1482 return ptr;
1483 }
1484
1485 ecb_function_ char * ecb_i2a_i64 (char *ptr, int64_t v);
1486 ecb_function_ char * ecb_i2a_i64 (char *ptr, int64_t v)
1487 {
1488 *ptr = '-'; ptr += v < 0;
1489 uint64_t u = v < 0 ? -(uint64_t)v : v;
1490
1491 #if ECB_64BIT_NATIVE
1492 if (ecb_expect_true (u <= ECB_I2A_MAX_X10))
1493 ptr = ecb_i2a_x10 (ptr, u);
1494 else if (ecb_expect_false (u <= ECB_I2A_MAX_X10 * 1000000000))
1495 {
1496 uint64_t u1 = u % 1000000000;
1497 uint64_t u2 = u / 1000000000;
1498
1499 ptr = ecb_i2a_x10 (ptr, u2);
1500 ptr = ecb_i2a_09 (ptr, u1);
1501 }
1502 else
1503 {
1504 uint64_t u1 = u % 1000000000;
1505 uint64_t ua = u / 1000000000;
1506 uint64_t u2 = ua % 1000000000;
1507 uint64_t u3 = ua / 1000000000;
1508
1509 /* 2**31 is 19 digits, so the top is exactly one digit */
1510 *ptr++ = u3 + '0';
1511 ptr = ecb_i2a_09 (ptr, u2);
1512 ptr = ecb_i2a_09 (ptr, u1);
1513 }
1514 #else
1515 ptr = ecb_i2a_u64 (ptr, u);
1516 #endif
1517
1518 return ptr;
1519 }
1520
1521 /*******************************************************************************/
1522 /* floating point stuff, can be disabled by defining ECB_NO_LIBM */
1523
1524 /* basically, everything uses "ieee pure-endian" floating point numbers */
1525 /* the only noteworthy exception is ancient armle, which uses order 43218765 */
1526 #if 0 \
1527 || __i386 || __i386__ \
1528 || ECB_GCC_AMD64 \
1529 || __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \
1530 || defined __s390__ || defined __s390x__ \
1531 || defined __mips__ \
1532 || defined __alpha__ \
1533 || defined __hppa__ \
1534 || defined __ia64__ \
1535 || defined __m68k__ \
1536 || defined __m88k__ \
1537 || defined __sh__ \
1538 || defined _M_IX86 || defined ECB_MSVC_AMD64 || defined _M_IA64 \
1539 || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \
1540 || defined __aarch64__
1541 #define ECB_STDFP 1
1542 #else
1543 #define ECB_STDFP 0
1544 #endif
1545
1546 #ifndef ECB_NO_LIBM
1547
1548 #include <math.h> /* for frexp*, ldexp*, INFINITY, NAN */
1549
1550 /* only the oldest of old doesn't have this one. solaris. */
1551 #ifdef INFINITY
1552 #define ECB_INFINITY INFINITY
1553 #else
1554 #define ECB_INFINITY HUGE_VAL
1555 #endif
1556
1557 #ifdef NAN
1558 #define ECB_NAN NAN
1559 #else
1560 #define ECB_NAN ECB_INFINITY
1561 #endif
1562
1563 #if ECB_C99 || _XOPEN_VERSION >= 600 || _POSIX_VERSION >= 200112L
1564 #define ecb_ldexpf(x,e) ldexpf ((x), (e))
1565 #define ecb_frexpf(x,e) frexpf ((x), (e))
1566 #else
1567 #define ecb_ldexpf(x,e) (float) ldexp ((double) (x), (e))
1568 #define ecb_frexpf(x,e) (float) frexp ((double) (x), (e))
1569 #endif
1570
1571 /* convert a float to ieee single/binary32 */
1572 ecb_function_ ecb_const uint32_t ecb_float_to_binary32 (float x);
1573 ecb_function_ ecb_const uint32_t ecb_float_to_binary32 (float x)
1574 {
1575 uint32_t r;
1576
1577 #if ECB_STDFP
1578 memcpy (&r, &x, 4);
1579 #else
1580 /* slow emulation, works for anything but -0 */
1581 uint32_t m;
1582 int e;
1583
1584 if (x == 0e0f ) return 0x00000000U;
1585 if (x > +3.40282346638528860e+38f) return 0x7f800000U;
1586 if (x < -3.40282346638528860e+38f) return 0xff800000U;
1587 if (x != x ) return 0x7fbfffffU;
1588
1589 m = ecb_frexpf (x, &e) * 0x1000000U;
1590
1591 r = m & 0x80000000U;
1592
1593 if (r)
1594 m = -m;
1595
1596 if (e <= -126)
1597 {
1598 m &= 0xffffffU;
1599 m >>= (-125 - e);
1600 e = -126;
1601 }
1602
1603 r |= (e + 126) << 23;
1604 r |= m & 0x7fffffU;
1605 #endif
1606
1607 return r;
1608 }
1609
1610 /* converts an ieee single/binary32 to a float */
1611 ecb_function_ ecb_const float ecb_binary32_to_float (uint32_t x);
1612 ecb_function_ ecb_const float ecb_binary32_to_float (uint32_t x)
1613 {
1614 float r;
1615
1616 #if ECB_STDFP
1617 memcpy (&r, &x, 4);
1618 #else
1619 /* emulation, only works for normals and subnormals and +0 */
1620 int neg = x >> 31;
1621 int e = (x >> 23) & 0xffU;
1622
1623 x &= 0x7fffffU;
1624
1625 if (e)
1626 x |= 0x800000U;
1627 else
1628 e = 1;
1629
1630 /* we distrust ldexpf a bit and do the 2**-24 scaling by an extra multiply */
1631 r = ecb_ldexpf (x * (0.5f / 0x800000U), e - 126);
1632
1633 r = neg ? -r : r;
1634 #endif
1635
1636 return r;
1637 }
1638
1639 /* convert a double to ieee double/binary64 */
1640 ecb_function_ ecb_const uint64_t ecb_double_to_binary64 (double x);
1641 ecb_function_ ecb_const uint64_t ecb_double_to_binary64 (double x)
1642 {
1643 uint64_t r;
1644
1645 #if ECB_STDFP
1646 memcpy (&r, &x, 8);
1647 #else
1648 /* slow emulation, works for anything but -0 */
1649 uint64_t m;
1650 int e;
1651
1652 if (x == 0e0 ) return 0x0000000000000000U;
1653 if (x > +1.79769313486231470e+308) return 0x7ff0000000000000U;
1654 if (x < -1.79769313486231470e+308) return 0xfff0000000000000U;
1655 if (x != x ) return 0X7ff7ffffffffffffU;
1656
1657 m = frexp (x, &e) * 0x20000000000000U;
1658
1659 r = m & 0x8000000000000000;;
1660
1661 if (r)
1662 m = -m;
1663
1664 if (e <= -1022)
1665 {
1666 m &= 0x1fffffffffffffU;
1667 m >>= (-1021 - e);
1668 e = -1022;
1669 }
1670
1671 r |= ((uint64_t)(e + 1022)) << 52;
1672 r |= m & 0xfffffffffffffU;
1673 #endif
1674
1675 return r;
1676 }
1677
1678 /* converts an ieee double/binary64 to a double */
1679 ecb_function_ ecb_const double ecb_binary64_to_double (uint64_t x);
1680 ecb_function_ ecb_const double ecb_binary64_to_double (uint64_t x)
1681 {
1682 double r;
1683
1684 #if ECB_STDFP
1685 memcpy (&r, &x, 8);
1686 #else
1687 /* emulation, only works for normals and subnormals and +0 */
1688 int neg = x >> 63;
1689 int e = (x >> 52) & 0x7ffU;
1690
1691 x &= 0xfffffffffffffU;
1692
1693 if (e)
1694 x |= 0x10000000000000U;
1695 else
1696 e = 1;
1697
1698 /* we distrust ldexp a bit and do the 2**-53 scaling by an extra multiply */
1699 r = ldexp (x * (0.5 / 0x10000000000000U), e - 1022);
1700
1701 r = neg ? -r : r;
1702 #endif
1703
1704 return r;
1705 }
1706
1707 /* convert a float to ieee half/binary16 */
1708 ecb_function_ ecb_const uint16_t ecb_float_to_binary16 (float x);
1709 ecb_function_ ecb_const uint16_t ecb_float_to_binary16 (float x)
1710 {
1711 return ecb_binary32_to_binary16 (ecb_float_to_binary32 (x));
1712 }
1713
1714 /* convert an ieee half/binary16 to float */
1715 ecb_function_ ecb_const float ecb_binary16_to_float (uint16_t x);
1716 ecb_function_ ecb_const float ecb_binary16_to_float (uint16_t x)
1717 {
1718 return ecb_binary32_to_float (ecb_binary16_to_binary32 (x));
1719 }
1720
1721 #endif
1722
1723 #endif
1724