ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libecb/ecb.h
(Generate patch)

Comparing libecb/ecb.h (file contents):
Revision 1.165 by sf-exg, Fri May 1 12:02:18 2015 UTC vs.
Revision 1.214 by root, Fri Mar 25 15:36:36 2022 UTC

1/* 1/*
2 * libecb - http://software.schmorp.de/pkg/libecb 2 * libecb - http://software.schmorp.de/pkg/libecb
3 * 3 *
4 * Copyright (©) 2009-2015 Marc Alexander Lehmann <libecb@schmorp.de> 4 * Copyright (©) 2009-2015,2018-2021 Marc Alexander Lehmann <libecb@schmorp.de>
5 * Copyright (©) 2011 Emanuele Giaquinta 5 * Copyright (©) 2011 Emanuele Giaquinta
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without modifica- 8 * Redistribution and use in source and binary forms, with or without modifica-
9 * tion, are permitted provided that the following conditions are met: 9 * tion, are permitted provided that the following conditions are met:
40 40
41#ifndef ECB_H 41#ifndef ECB_H
42#define ECB_H 42#define ECB_H
43 43
44/* 16 bits major, 16 bits minor */ 44/* 16 bits major, 16 bits minor */
45#define ECB_VERSION 0x00010004 45#define ECB_VERSION 0x0001000c
46 46
47#ifdef _WIN32 47#include <string.h> /* for memcpy */
48
49#if defined (_WIN32) && !defined (__MINGW32__)
48 typedef signed char int8_t; 50 typedef signed char int8_t;
49 typedef unsigned char uint8_t; 51 typedef unsigned char uint8_t;
52 typedef signed char int_fast8_t;
53 typedef unsigned char uint_fast8_t;
50 typedef signed short int16_t; 54 typedef signed short int16_t;
51 typedef unsigned short uint16_t; 55 typedef unsigned short uint16_t;
56 typedef signed int int_fast16_t;
57 typedef unsigned int uint_fast16_t;
52 typedef signed int int32_t; 58 typedef signed int int32_t;
53 typedef unsigned int uint32_t; 59 typedef unsigned int uint32_t;
60 typedef signed int int_fast32_t;
61 typedef unsigned int uint_fast32_t;
54 #if __GNUC__ 62 #if __GNUC__
55 typedef signed long long int64_t; 63 typedef signed long long int64_t;
56 typedef unsigned long long uint64_t; 64 typedef unsigned long long uint64_t;
57 #else /* _MSC_VER || __BORLANDC__ */ 65 #else /* _MSC_VER || __BORLANDC__ */
58 typedef signed __int64 int64_t; 66 typedef signed __int64 int64_t;
59 typedef unsigned __int64 uint64_t; 67 typedef unsigned __int64 uint64_t;
60 #endif 68 #endif
69 typedef int64_t int_fast64_t;
70 typedef uint64_t uint_fast64_t;
61 #ifdef _WIN64 71 #ifdef _WIN64
62 #define ECB_PTRSIZE 8 72 #define ECB_PTRSIZE 8
63 typedef uint64_t uintptr_t; 73 typedef uint64_t uintptr_t;
64 typedef int64_t intptr_t; 74 typedef int64_t intptr_t;
65 #else 75 #else
67 typedef uint32_t uintptr_t; 77 typedef uint32_t uintptr_t;
68 typedef int32_t intptr_t; 78 typedef int32_t intptr_t;
69 #endif 79 #endif
70#else 80#else
71 #include <inttypes.h> 81 #include <inttypes.h>
72 #if UINTMAX_MAX > 0xffffffffU 82 #if (defined INTPTR_MAX ? INTPTR_MAX : ULONG_MAX) > 0xffffffffU
73 #define ECB_PTRSIZE 8 83 #define ECB_PTRSIZE 8
74 #else 84 #else
75 #define ECB_PTRSIZE 4 85 #define ECB_PTRSIZE 4
76 #endif 86 #endif
77#endif 87#endif
78 88
79#define ECB_GCC_AMD64 (__amd64 || __amd64__ || __x86_64 || __x86_64__) 89#define ECB_GCC_AMD64 (__amd64 || __amd64__ || __x86_64 || __x86_64__)
80#define ECB_MSVC_AMD64 (_M_AMD64 || _M_X64) 90#define ECB_MSVC_AMD64 (_M_AMD64 || _M_X64)
91
92#ifndef ECB_OPTIMIZE_SIZE
93 #if __OPTIMIZE_SIZE__
94 #define ECB_OPTIMIZE_SIZE 1
95 #else
96 #define ECB_OPTIMIZE_SIZE 0
97 #endif
98#endif
81 99
82/* work around x32 idiocy by defining proper macros */ 100/* work around x32 idiocy by defining proper macros */
83#if ECB_GCC_AMD64 || ECB_MSVC_AMD64 101#if ECB_GCC_AMD64 || ECB_MSVC_AMD64
84 #if _ILP32 102 #if _ILP32
85 #define ECB_AMD64_X32 1 103 #define ECB_AMD64_X32 1
86 #else 104 #else
87 #define ECB_AMD64 1 105 #define ECB_AMD64 1
88 #endif 106 #endif
107#endif
108
109#if ECB_PTRSIZE >= 8 || ECB_AMD64_X32
110 #define ECB_64BIT_NATIVE 1
111#else
112 #define ECB_64BIT_NATIVE 0
89#endif 113#endif
90 114
91/* many compilers define _GNUC_ to some versions but then only implement 115/* many compilers define _GNUC_ to some versions but then only implement
92 * what their idiot authors think are the "more important" extensions, 116 * what their idiot authors think are the "more important" extensions,
93 * causing enormous grief in return for some better fake benchmark numbers. 117 * causing enormous grief in return for some better fake benchmark numbers.
115 #define ECB_CLANG_EXTENSION(x) 0 139 #define ECB_CLANG_EXTENSION(x) 0
116#endif 140#endif
117 141
118#define ECB_CPP (__cplusplus+0) 142#define ECB_CPP (__cplusplus+0)
119#define ECB_CPP11 (__cplusplus >= 201103L) 143#define ECB_CPP11 (__cplusplus >= 201103L)
144#define ECB_CPP14 (__cplusplus >= 201402L)
145#define ECB_CPP17 (__cplusplus >= 201703L)
120 146
121#if ECB_CPP 147#if ECB_CPP
122 #define ECB_C 0 148 #define ECB_C 0
123 #define ECB_STDC_VERSION 0 149 #define ECB_STDC_VERSION 0
124#else 150#else
126 #define ECB_STDC_VERSION __STDC_VERSION__ 152 #define ECB_STDC_VERSION __STDC_VERSION__
127#endif 153#endif
128 154
129#define ECB_C99 (ECB_STDC_VERSION >= 199901L) 155#define ECB_C99 (ECB_STDC_VERSION >= 199901L)
130#define ECB_C11 (ECB_STDC_VERSION >= 201112L) 156#define ECB_C11 (ECB_STDC_VERSION >= 201112L)
157#define ECB_C17 (ECB_STDC_VERSION >= 201710L)
131 158
132#if ECB_CPP 159#if ECB_CPP
133 #define ECB_EXTERN_C extern "C" 160 #define ECB_EXTERN_C extern "C"
134 #define ECB_EXTERN_C_BEG ECB_EXTERN_C { 161 #define ECB_EXTERN_C_BEG ECB_EXTERN_C {
135 #define ECB_EXTERN_C_END } 162 #define ECB_EXTERN_C_END }
155/* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/compiler_ref/compiler_builtins.html */ 182/* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/compiler_ref/compiler_builtins.html */
156#if __xlC__ && ECB_CPP 183#if __xlC__ && ECB_CPP
157 #include <builtins.h> 184 #include <builtins.h>
158#endif 185#endif
159 186
187#if 1400 <= _MSC_VER
188 #include <intrin.h> /* fence functions _ReadBarrier, also bit search functions _BitScanReverse */
189#endif
190
160#ifndef ECB_MEMORY_FENCE 191#ifndef ECB_MEMORY_FENCE
161 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 192 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
193 #define ECB_MEMORY_FENCE_RELAXED __asm__ __volatile__ ("" : : : "memory")
162 #if __i386 || __i386__ 194 #if __i386 || __i386__
163 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") 195 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
164 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") 196 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
165 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") 197 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory")
166 #elif ECB_GCC_AMD64 198 #elif ECB_GCC_AMD64
167 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") 199 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory")
168 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") 200 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
169 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") 201 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory")
170 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ 202 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__
171 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") 203 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory")
204 #elif defined __ARM_ARCH_2__ \
205 || defined __ARM_ARCH_3__ || defined __ARM_ARCH_3M__ \
206 || defined __ARM_ARCH_4__ || defined __ARM_ARCH_4T__ \
207 || defined __ARM_ARCH_5__ || defined __ARM_ARCH_5E__ \
208 || defined __ARM_ARCH_5T__ || defined __ARM_ARCH_5TE__ \
209 || defined __ARM_ARCH_5TEJ__
210 /* should not need any, unless running old code on newer cpu - arm doesn't support that */
172 #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \ 211 #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \
173 || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ 212 || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ \
213 || defined __ARM_ARCH_6T2__
174 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") 214 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory")
175 #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ 215 #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \
176 || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__ 216 || defined __ARM_ARCH_7R__ || defined __ARM_ARCH_7M__
177 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") 217 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory")
178 #elif __aarch64__ 218 #elif __aarch64__
179 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory") 219 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory")
180 #elif (__sparc || __sparc__) && !__sparcv8 220 #elif (__sparc || __sparc__) && !(__sparc_v8__ || defined __sparcv8)
181 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory") 221 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory")
182 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory") 222 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory")
183 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore") 223 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore")
184 #elif defined __s390__ || defined __s390x__ 224 #elif defined __s390__ || defined __s390x__
185 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory") 225 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory")
208 #if ECB_GCC_VERSION(4,7) 248 #if ECB_GCC_VERSION(4,7)
209 /* see comment below (stdatomic.h) about the C11 memory model. */ 249 /* see comment below (stdatomic.h) about the C11 memory model. */
210 #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST) 250 #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST)
211 #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE) 251 #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE)
212 #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE) 252 #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE)
253 #undef ECB_MEMORY_FENCE_RELAXED
254 #define ECB_MEMORY_FENCE_RELAXED __atomic_thread_fence (__ATOMIC_RELAXED)
213 255
214 #elif ECB_CLANG_EXTENSION(c_atomic) 256 #elif ECB_CLANG_EXTENSION(c_atomic)
215 /* see comment below (stdatomic.h) about the C11 memory model. */ 257 /* see comment below (stdatomic.h) about the C11 memory model. */
216 #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST) 258 #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST)
217 #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE) 259 #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE)
218 #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE) 260 #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE)
261 #undef ECB_MEMORY_FENCE_RELAXED
262 #define ECB_MEMORY_FENCE_RELAXED __c11_atomic_thread_fence (__ATOMIC_RELAXED)
219 263
220 #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ 264 #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__
221 #define ECB_MEMORY_FENCE __sync_synchronize () 265 #define ECB_MEMORY_FENCE __sync_synchronize ()
222 #elif _MSC_VER >= 1500 /* VC++ 2008 */ 266 #elif _MSC_VER >= 1500 /* VC++ 2008 */
223 /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */ 267 /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */
233 #elif defined _WIN32 277 #elif defined _WIN32
234 #include <WinNT.h> 278 #include <WinNT.h>
235 #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */ 279 #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */
236 #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 280 #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
237 #include <mbarrier.h> 281 #include <mbarrier.h>
238 #define ECB_MEMORY_FENCE __machine_rw_barrier () 282 #define ECB_MEMORY_FENCE __machine_rw_barrier ()
239 #define ECB_MEMORY_FENCE_ACQUIRE __machine_r_barrier () 283 #define ECB_MEMORY_FENCE_ACQUIRE __machine_acq_barrier ()
240 #define ECB_MEMORY_FENCE_RELEASE __machine_w_barrier () 284 #define ECB_MEMORY_FENCE_RELEASE __machine_rel_barrier ()
285 #define ECB_MEMORY_FENCE_RELAXED __compiler_barrier ()
241 #elif __xlC__ 286 #elif __xlC__
242 #define ECB_MEMORY_FENCE __sync () 287 #define ECB_MEMORY_FENCE __sync ()
243 #endif 288 #endif
244#endif 289#endif
245 290
246#ifndef ECB_MEMORY_FENCE 291#ifndef ECB_MEMORY_FENCE
247 #if ECB_C11 && !defined __STDC_NO_ATOMICS__ 292 #if ECB_C11 && !defined __STDC_NO_ATOMICS__
248 /* we assume that these memory fences work on all variables/all memory accesses, */ 293 /* we assume that these memory fences work on all variables/all memory accesses, */
249 /* not just C11 atomics and atomic accesses */ 294 /* not just C11 atomics and atomic accesses */
250 #include <stdatomic.h> 295 #include <stdatomic.h>
251 /* Unfortunately, neither gcc 4.7 nor clang 3.1 generate any instructions for */
252 /* any fence other than seq_cst, which isn't very efficient for us. */
253 /* Why that is, we don't know - either the C11 memory model is quite useless */
254 /* for most usages, or gcc and clang have a bug */
255 /* I *currently* lean towards the latter, and inefficiently implement */
256 /* all three of ecb's fences as a seq_cst fence */
257 /* Update, gcc-4.8 generates mfence for all c++ fences, but nothing */
258 /* for all __atomic_thread_fence's except seq_cst */
259 #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst) 296 #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst)
297 #define ECB_MEMORY_FENCE_ACQUIRE atomic_thread_fence (memory_order_acquire)
298 #define ECB_MEMORY_FENCE_RELEASE atomic_thread_fence (memory_order_release)
260 #endif 299 #endif
261#endif 300#endif
262 301
263#ifndef ECB_MEMORY_FENCE 302#ifndef ECB_MEMORY_FENCE
264 #if !ECB_AVOID_PTHREADS 303 #if !ECB_AVOID_PTHREADS
284 323
285#if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE 324#if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE
286 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE 325 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
287#endif 326#endif
288 327
328#if !defined ECB_MEMORY_FENCE_RELAXED && defined ECB_MEMORY_FENCE
329 #define ECB_MEMORY_FENCE_RELAXED ECB_MEMORY_FENCE /* very heavy-handed */
330#endif
331
289/*****************************************************************************/ 332/*****************************************************************************/
290 333
291#if ECB_CPP 334#if ECB_CPP
292 #define ecb_inline static inline 335 #define ecb_inline static inline
293#elif ECB_GCC_VERSION(2,5) 336#elif ECB_GCC_VERSION(2,5)
312#define ECB_CONCAT(a, b) ECB_CONCAT_(a, b) 355#define ECB_CONCAT(a, b) ECB_CONCAT_(a, b)
313#define ECB_STRINGIFY_(a) # a 356#define ECB_STRINGIFY_(a) # a
314#define ECB_STRINGIFY(a) ECB_STRINGIFY_(a) 357#define ECB_STRINGIFY(a) ECB_STRINGIFY_(a)
315#define ECB_STRINGIFY_EXPR(expr) ((expr), ECB_STRINGIFY_ (expr)) 358#define ECB_STRINGIFY_EXPR(expr) ((expr), ECB_STRINGIFY_ (expr))
316 359
360/* This marks larger functions that do not neccessarily need to be inlined */
361/* The idea is to possibly compile the header twice, */
362/* once exposing only the declarations, another time to define external functions */
363/* TODO: possibly static would be best for these at the moment? */
317#define ecb_function_ ecb_inline 364#define ecb_function_ ecb_inline
318 365
319#if ECB_GCC_VERSION(3,1) || ECB_CLANG_VERSION(2,8) 366#if ECB_GCC_VERSION(3,1) || ECB_CLANG_VERSION(2,8)
320 #define ecb_attribute(attrlist) __attribute__ (attrlist) 367 #define ecb_attribute(attrlist) __attribute__ (attrlist)
321#else 368#else
411/* count trailing zero bits and count # of one bits */ 458/* count trailing zero bits and count # of one bits */
412#if ECB_GCC_VERSION(3,4) \ 459#if ECB_GCC_VERSION(3,4) \
413 || (ECB_CLANG_BUILTIN(__builtin_clz) && ECB_CLANG_BUILTIN(__builtin_clzll) \ 460 || (ECB_CLANG_BUILTIN(__builtin_clz) && ECB_CLANG_BUILTIN(__builtin_clzll) \
414 && ECB_CLANG_BUILTIN(__builtin_ctz) && ECB_CLANG_BUILTIN(__builtin_ctzll) \ 461 && ECB_CLANG_BUILTIN(__builtin_ctz) && ECB_CLANG_BUILTIN(__builtin_ctzll) \
415 && ECB_CLANG_BUILTIN(__builtin_popcount)) 462 && ECB_CLANG_BUILTIN(__builtin_popcount))
416 /* we assume int == 32 bit, long == 32 or 64 bit and long long == 64 bit */
417 #define ecb_ld32(x) (__builtin_clz (x) ^ 31)
418 #define ecb_ld64(x) (__builtin_clzll (x) ^ 63)
419 #define ecb_ctz32(x) __builtin_ctz (x) 463 #define ecb_ctz32(x) __builtin_ctz (x)
464 #define ecb_ctz64(x) (__SIZEOF_LONG__ == 64 ? __builtin_ctzl (x) : __builtin_ctzll (x))
420 #define ecb_ctz64(x) __builtin_ctzll (x) 465 #define ecb_clz32(x) __builtin_clz (x)
466 #define ecb_clz64(x) (__SIZEOF_LONG__ == 64 ? __builtin_clzl (x) : __builtin_clzll (x))
467 #define ecb_ld32(x) (ecb_clz32 (x) ^ 31)
468 #define ecb_ld64(x) (ecb_clz64 (x) ^ 63)
421 #define ecb_popcount32(x) __builtin_popcount (x) 469 #define ecb_popcount32(x) __builtin_popcount (x)
422 /* no popcountll */ 470 /* ecb_popcount64 is more difficult, see below */
423#else 471#else
424 ecb_function_ ecb_const int ecb_ctz32 (uint32_t x); 472 ecb_function_ ecb_const int ecb_ctz32 (uint32_t x);
425 ecb_function_ ecb_const int 473 ecb_function_ ecb_const int ecb_ctz32 (uint32_t x)
426 ecb_ctz32 (uint32_t x)
427 { 474 {
475#if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM)
476 unsigned long r;
477 _BitScanForward (&r, x);
478 return (int)r;
479#else
428 int r = 0; 480 int r;
429 481
430 x &= ~x + 1; /* this isolates the lowest bit */ 482 x &= ~x + 1; /* this isolates the lowest bit */
431 483
432#if ECB_branchless_on_i386 484 #if 1
485 /* David Seal's algorithm, Message-ID: <32975@armltd.uucp> from 1994 */
486 /* This happens to return 32 for x == 0, but the API does not support this */
487
488 /* -0 marks unused entries */
489 static unsigned char table[64] =
490 {
491 32, 0, 1, 12, 2, 6, -0, 13, 3, -0, 7, -0, -0, -0, -0, 14,
492 10, 4, -0, -0, 8, -0, -0, 25, -0, -0, -0, -0, -0, 21, 27, 15,
493 31, 11, 5, -0, -0, -0, -0, -0, 9, -0, -0, 24, -0, -0, 20, 26,
494 30, -0, -0, -0, -0, 23, -0, 19, 29, -0, 22, 18, 28, 17, 16, -0
495 };
496
497 /* magic constant results in 33 unique values in the upper 6 bits */
498 x *= 0x0450fbafU; /* == 17 * 65 * 65535 */
499
500 r = table [x >> 26];
501 #elif 0 /* branchless on i386, typically */
502 r = 0;
433 r += !!(x & 0xaaaaaaaa) << 0; 503 r += !!(x & 0xaaaaaaaa) << 0;
434 r += !!(x & 0xcccccccc) << 1; 504 r += !!(x & 0xcccccccc) << 1;
435 r += !!(x & 0xf0f0f0f0) << 2; 505 r += !!(x & 0xf0f0f0f0) << 2;
436 r += !!(x & 0xff00ff00) << 3; 506 r += !!(x & 0xff00ff00) << 3;
437 r += !!(x & 0xffff0000) << 4; 507 r += !!(x & 0xffff0000) << 4;
438#else 508 #else /* branchless on modern compilers, typically */
509 r = 0;
439 if (x & 0xaaaaaaaa) r += 1; 510 if (x & 0xaaaaaaaa) r += 1;
440 if (x & 0xcccccccc) r += 2; 511 if (x & 0xcccccccc) r += 2;
441 if (x & 0xf0f0f0f0) r += 4; 512 if (x & 0xf0f0f0f0) r += 4;
442 if (x & 0xff00ff00) r += 8; 513 if (x & 0xff00ff00) r += 8;
443 if (x & 0xffff0000) r += 16; 514 if (x & 0xffff0000) r += 16;
444#endif 515#endif
445 516
446 return r; 517 return r;
518#endif
447 } 519 }
448 520
449 ecb_function_ ecb_const int ecb_ctz64 (uint64_t x); 521 ecb_function_ ecb_const int ecb_ctz64 (uint64_t x);
450 ecb_function_ ecb_const int 522 ecb_function_ ecb_const int ecb_ctz64 (uint64_t x)
451 ecb_ctz64 (uint64_t x)
452 { 523 {
524#if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM)
525 unsigned long r;
526 _BitScanForward64 (&r, x);
527 return (int)r;
528#else
453 int shift = x & 0xffffffffU ? 0 : 32; 529 int shift = x & 0xffffffff ? 0 : 32;
454 return ecb_ctz32 (x >> shift) + shift; 530 return ecb_ctz32 (x >> shift) + shift;
531#endif
532 }
533
534 ecb_function_ ecb_const int ecb_clz32 (uint32_t x);
535 ecb_function_ ecb_const int ecb_clz32 (uint32_t x)
536 {
537#if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM)
538 unsigned long r;
539 _BitScanReverse (&r, x);
540 return (int)r;
541#else
542
543 /* Robert Harley's algorithm from comp.arch 1996-12-07 */
544 /* This happens to return 32 for x == 0, but the API does not support this */
545
546 /* -0 marks unused table elements */
547 static unsigned char table[64] =
548 {
549 32, 31, -0, 16, -0, 30, 3, -0, 15, -0, -0, -0, 29, 10, 2, -0,
550 -0, -0, 12, 14, 21, -0, 19, -0, -0, 28, -0, 25, -0, 9, 1, -0,
551 17, -0, 4, -0, -0, -0, 11, -0, 13, 22, 20, -0, 26, -0, -0, 18,
552 5, -0, -0, 23, -0, 27, -0, 6, -0, 24, 7, -0, 8, -0, 0, -0
553 };
554
555 /* propagate leftmost 1 bit to the right */
556 x |= x >> 1;
557 x |= x >> 2;
558 x |= x >> 4;
559 x |= x >> 8;
560 x |= x >> 16;
561
562 /* magic constant results in 33 unique values in the upper 6 bits */
563 x *= 0x06EB14F9U; /* == 7 * 255 * 255 * 255 */
564
565 return table [x >> 26];
566#endif
567 }
568
569 ecb_function_ ecb_const int ecb_clz64 (uint64_t x);
570 ecb_function_ ecb_const int ecb_clz64 (uint64_t x)
571 {
572#if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM)
573 unsigned long r;
574 _BitScanReverse64 (&r, x);
575 return (int)r;
576#else
577 uint32_t l = x >> 32;
578 int shift = l ? 0 : 32;
579 return ecb_clz32 (l ? l : x) + shift;
580#endif
455 } 581 }
456 582
457 ecb_function_ ecb_const int ecb_popcount32 (uint32_t x); 583 ecb_function_ ecb_const int ecb_popcount32 (uint32_t x);
458 ecb_function_ ecb_const int 584 ecb_function_ ecb_const int
459 ecb_popcount32 (uint32_t x) 585 ecb_popcount32 (uint32_t x)
467 } 593 }
468 594
469 ecb_function_ ecb_const int ecb_ld32 (uint32_t x); 595 ecb_function_ ecb_const int ecb_ld32 (uint32_t x);
470 ecb_function_ ecb_const int ecb_ld32 (uint32_t x) 596 ecb_function_ ecb_const int ecb_ld32 (uint32_t x)
471 { 597 {
598#if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM)
599 unsigned long r;
600 _BitScanReverse (&r, x);
601 return (int)r;
602#else
472 int r = 0; 603 int r = 0;
473 604
474 if (x >> 16) { x >>= 16; r += 16; } 605 if (x >> 16) { x >>= 16; r += 16; }
475 if (x >> 8) { x >>= 8; r += 8; } 606 if (x >> 8) { x >>= 8; r += 8; }
476 if (x >> 4) { x >>= 4; r += 4; } 607 if (x >> 4) { x >>= 4; r += 4; }
477 if (x >> 2) { x >>= 2; r += 2; } 608 if (x >> 2) { x >>= 2; r += 2; }
478 if (x >> 1) { r += 1; } 609 if (x >> 1) { r += 1; }
479 610
480 return r; 611 return r;
612#endif
481 } 613 }
482 614
483 ecb_function_ ecb_const int ecb_ld64 (uint64_t x); 615 ecb_function_ ecb_const int ecb_ld64 (uint64_t x);
484 ecb_function_ ecb_const int ecb_ld64 (uint64_t x) 616 ecb_function_ ecb_const int ecb_ld64 (uint64_t x)
485 { 617 {
618#if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM)
619 unsigned long r;
620 _BitScanReverse64 (&r, x);
621 return (int)r;
622#else
486 int r = 0; 623 int r = 0;
487 624
488 if (x >> 32) { x >>= 32; r += 32; } 625 if (x >> 32) { x >>= 32; r += 32; }
489 626
490 return r + ecb_ld32 (x); 627 return r + ecb_ld32 (x);
628#endif
491 } 629 }
492#endif 630#endif
493 631
494ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x); 632ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x);
495ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); } 633ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); }
524 x = ( x >> 16 ) | ( x << 16); 662 x = ( x >> 16 ) | ( x << 16);
525 663
526 return x; 664 return x;
527} 665}
528 666
529/* popcount64 is only available on 64 bit cpus as gcc builtin */
530/* so for this version we are lazy */
531ecb_function_ ecb_const int ecb_popcount64 (uint64_t x); 667ecb_function_ ecb_const int ecb_popcount64 (uint64_t x);
532ecb_function_ ecb_const int 668ecb_function_ ecb_const int ecb_popcount64 (uint64_t x)
533ecb_popcount64 (uint64_t x)
534{ 669{
670 /* popcount64 is only available on 64 bit cpus as gcc builtin. */
671 /* also, gcc/clang make this surprisingly difficult to use */
672#if (__SIZEOF_LONG__ == 8) && (ECB_GCC_VERSION(3,4) || ECB_CLANG_BUILTIN (__builtin_popcountl))
673 return __builtin_popcountl (x);
674#else
535 return ecb_popcount32 (x) + ecb_popcount32 (x >> 32); 675 return ecb_popcount32 (x) + ecb_popcount32 (x >> 32);
676#endif
536} 677}
537 678
538ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count); 679ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count);
539ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count); 680ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count);
540ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count); 681ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count);
542ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count); 683ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count);
543ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count); 684ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count);
544ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count); 685ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count);
545ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count); 686ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count);
546 687
547ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> ( 8 - count)) | (x << count); } 688ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> (-count & 7)) | (x << (count & 7)); }
548ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << ( 8 - count)) | (x >> count); } 689ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << (-count & 7)) | (x >> (count & 7)); }
549ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (16 - count)) | (x << count); } 690ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (-count & 15)) | (x << (count & 15)); }
550ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (16 - count)) | (x >> count); } 691ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (-count & 15)) | (x >> (count & 15)); }
551ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); } 692ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (-count & 31)) | (x << (count & 31)); }
552ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); } 693ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (-count & 31)) | (x >> (count & 31)); }
553ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); } 694ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (-count & 63)) | (x << (count & 63)); }
554ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); } 695ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (-count & 63)) | (x >> (count & 63)); }
696
697#if ECB_CPP
698
699inline uint8_t ecb_ctz (uint8_t v) { return ecb_ctz32 (v); }
700inline uint16_t ecb_ctz (uint16_t v) { return ecb_ctz32 (v); }
701inline uint32_t ecb_ctz (uint32_t v) { return ecb_ctz32 (v); }
702inline uint64_t ecb_ctz (uint64_t v) { return ecb_ctz64 (v); }
703
704inline bool ecb_is_pot (uint8_t v) { return ecb_is_pot32 (v); }
705inline bool ecb_is_pot (uint16_t v) { return ecb_is_pot32 (v); }
706inline bool ecb_is_pot (uint32_t v) { return ecb_is_pot32 (v); }
707inline bool ecb_is_pot (uint64_t v) { return ecb_is_pot64 (v); }
708
709inline int ecb_ld (uint8_t v) { return ecb_ld32 (v); }
710inline int ecb_ld (uint16_t v) { return ecb_ld32 (v); }
711inline int ecb_ld (uint32_t v) { return ecb_ld32 (v); }
712inline int ecb_ld (uint64_t v) { return ecb_ld64 (v); }
713
714inline int ecb_popcount (uint8_t v) { return ecb_popcount32 (v); }
715inline int ecb_popcount (uint16_t v) { return ecb_popcount32 (v); }
716inline int ecb_popcount (uint32_t v) { return ecb_popcount32 (v); }
717inline int ecb_popcount (uint64_t v) { return ecb_popcount64 (v); }
718
719inline uint8_t ecb_bitrev (uint8_t v) { return ecb_bitrev8 (v); }
720inline uint16_t ecb_bitrev (uint16_t v) { return ecb_bitrev16 (v); }
721inline uint32_t ecb_bitrev (uint32_t v) { return ecb_bitrev32 (v); }
722
723inline uint8_t ecb_rotl (uint8_t v, unsigned int count) { return ecb_rotl8 (v, count); }
724inline uint16_t ecb_rotl (uint16_t v, unsigned int count) { return ecb_rotl16 (v, count); }
725inline uint32_t ecb_rotl (uint32_t v, unsigned int count) { return ecb_rotl32 (v, count); }
726inline uint64_t ecb_rotl (uint64_t v, unsigned int count) { return ecb_rotl64 (v, count); }
727
728inline uint8_t ecb_rotr (uint8_t v, unsigned int count) { return ecb_rotr8 (v, count); }
729inline uint16_t ecb_rotr (uint16_t v, unsigned int count) { return ecb_rotr16 (v, count); }
730inline uint32_t ecb_rotr (uint32_t v, unsigned int count) { return ecb_rotr32 (v, count); }
731inline uint64_t ecb_rotr (uint64_t v, unsigned int count) { return ecb_rotr64 (v, count); }
732
733#endif
555 734
556#if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64)) 735#if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64))
557 #if ECB_GCC_VERSION(4,8) || ECB_CLANG_BUILTIN(__builtin_bswap16) 736 #if ECB_GCC_VERSION(4,8) || ECB_CLANG_BUILTIN(__builtin_bswap16)
558 #define ecb_bswap16(x) __builtin_bswap16 (x) 737 #define ecb_bswap16(x) __builtin_bswap16 (x)
559 #else 738 #else
566 #define ecb_bswap16(x) ((uint16_t)_byteswap_ushort ((uint16_t)(x))) 745 #define ecb_bswap16(x) ((uint16_t)_byteswap_ushort ((uint16_t)(x)))
567 #define ecb_bswap32(x) ((uint32_t)_byteswap_ulong ((uint32_t)(x))) 746 #define ecb_bswap32(x) ((uint32_t)_byteswap_ulong ((uint32_t)(x)))
568 #define ecb_bswap64(x) ((uint64_t)_byteswap_uint64 ((uint64_t)(x))) 747 #define ecb_bswap64(x) ((uint64_t)_byteswap_uint64 ((uint64_t)(x)))
569#else 748#else
570 ecb_function_ ecb_const uint16_t ecb_bswap16 (uint16_t x); 749 ecb_function_ ecb_const uint16_t ecb_bswap16 (uint16_t x);
571 ecb_function_ ecb_const uint16_t 750 ecb_function_ ecb_const uint16_t ecb_bswap16 (uint16_t x)
572 ecb_bswap16 (uint16_t x)
573 { 751 {
574 return ecb_rotl16 (x, 8); 752 return ecb_rotl16 (x, 8);
575 } 753 }
576 754
577 ecb_function_ ecb_const uint32_t ecb_bswap32 (uint32_t x); 755 ecb_function_ ecb_const uint32_t ecb_bswap32 (uint32_t x);
578 ecb_function_ ecb_const uint32_t 756 ecb_function_ ecb_const uint32_t ecb_bswap32 (uint32_t x)
579 ecb_bswap32 (uint32_t x)
580 { 757 {
581 return (((uint32_t)ecb_bswap16 (x)) << 16) | ecb_bswap16 (x >> 16); 758 return (((uint32_t)ecb_bswap16 (x)) << 16) | ecb_bswap16 (x >> 16);
582 } 759 }
583 760
584 ecb_function_ ecb_const uint64_t ecb_bswap64 (uint64_t x); 761 ecb_function_ ecb_const uint64_t ecb_bswap64 (uint64_t x);
585 ecb_function_ ecb_const uint64_t 762 ecb_function_ ecb_const uint64_t ecb_bswap64 (uint64_t x)
586 ecb_bswap64 (uint64_t x)
587 { 763 {
588 return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32); 764 return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32);
589 } 765 }
590#endif 766#endif
591 767
598#endif 774#endif
599 775
600/* try to tell the compiler that some condition is definitely true */ 776/* try to tell the compiler that some condition is definitely true */
601#define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0 777#define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0
602 778
603ecb_inline ecb_const unsigned char ecb_byteorder_helper (void); 779ecb_inline ecb_const uint32_t ecb_byteorder_helper (void);
604ecb_inline ecb_const unsigned char 780ecb_inline ecb_const uint32_t ecb_byteorder_helper (void)
605ecb_byteorder_helper (void)
606{ 781{
607 /* the union code still generates code under pressure in gcc, */ 782 /* the union code still generates code under pressure in gcc, */
608 /* but less than using pointers, and always seems to */ 783 /* but less than using pointers, and always seems to */
609 /* successfully return a constant. */ 784 /* successfully return a constant. */
610 /* the reason why we have this horrible preprocessor mess */ 785 /* the reason why we have this horrible preprocessor mess */
611 /* is to avoid it in all cases, at least on common architectures */ 786 /* is to avoid it in all cases, at least on common architectures */
612 /* or when using a recent enough gcc version (>= 4.6) */ 787 /* or when using a recent enough gcc version (>= 4.6) */
613#if ((__i386 || __i386__) && !__VOS__) || _M_IX86 || ECB_GCC_AMD64 || ECB_MSVC_AMD64
614 return 0x44;
615#elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 788#if (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \
789 || ((__i386 || __i386__ || _M_IX86 || ECB_GCC_AMD64 || ECB_MSVC_AMD64) && !__VOS__)
790 #define ECB_LITTLE_ENDIAN 1
616 return 0x44; 791 return 0x44332211;
617#elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 792#elif (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) \
793 || ((__AARCH64EB__ || __MIPSEB__ || __ARMEB__) && !__VOS__)
794 #define ECB_BIG_ENDIAN 1
618 return 0x11; 795 return 0x11223344;
619#else 796#else
620 union 797 union
621 { 798 {
799 uint8_t c[4];
622 uint32_t i; 800 uint32_t u;
623 uint8_t c;
624 } u = { 0x11223344 }; 801 } u = { 0x11, 0x22, 0x33, 0x44 };
625 return u.c; 802 return u.u;
626#endif 803#endif
627} 804}
628 805
629ecb_inline ecb_const ecb_bool ecb_big_endian (void); 806ecb_inline ecb_const ecb_bool ecb_big_endian (void);
630ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; } 807ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11223344; }
631ecb_inline ecb_const ecb_bool ecb_little_endian (void); 808ecb_inline ecb_const ecb_bool ecb_little_endian (void);
632ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44; } 809ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44332211; }
810
811/*****************************************************************************/
812/* unaligned load/store */
813
814ecb_inline uint_fast16_t ecb_be_u16_to_host (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; }
815ecb_inline uint_fast32_t ecb_be_u32_to_host (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; }
816ecb_inline uint_fast64_t ecb_be_u64_to_host (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; }
817
818ecb_inline uint_fast16_t ecb_le_u16_to_host (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; }
819ecb_inline uint_fast32_t ecb_le_u32_to_host (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; }
820ecb_inline uint_fast64_t ecb_le_u64_to_host (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; }
821
822ecb_inline uint_fast16_t ecb_peek_u16_u (const void *ptr) { uint16_t v; memcpy (&v, ptr, sizeof (v)); return v; }
823ecb_inline uint_fast32_t ecb_peek_u32_u (const void *ptr) { uint32_t v; memcpy (&v, ptr, sizeof (v)); return v; }
824ecb_inline uint_fast64_t ecb_peek_u64_u (const void *ptr) { uint64_t v; memcpy (&v, ptr, sizeof (v)); return v; }
825
826ecb_inline uint_fast16_t ecb_peek_be_u16_u (const void *ptr) { return ecb_be_u16_to_host (ecb_peek_u16_u (ptr)); }
827ecb_inline uint_fast32_t ecb_peek_be_u32_u (const void *ptr) { return ecb_be_u32_to_host (ecb_peek_u32_u (ptr)); }
828ecb_inline uint_fast64_t ecb_peek_be_u64_u (const void *ptr) { return ecb_be_u64_to_host (ecb_peek_u64_u (ptr)); }
829
830ecb_inline uint_fast16_t ecb_peek_le_u16_u (const void *ptr) { return ecb_le_u16_to_host (ecb_peek_u16_u (ptr)); }
831ecb_inline uint_fast32_t ecb_peek_le_u32_u (const void *ptr) { return ecb_le_u32_to_host (ecb_peek_u32_u (ptr)); }
832ecb_inline uint_fast64_t ecb_peek_le_u64_u (const void *ptr) { return ecb_le_u64_to_host (ecb_peek_u64_u (ptr)); }
833
834ecb_inline uint_fast16_t ecb_host_to_be_u16 (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; }
835ecb_inline uint_fast32_t ecb_host_to_be_u32 (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; }
836ecb_inline uint_fast64_t ecb_host_to_be_u64 (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; }
837
838ecb_inline uint_fast16_t ecb_host_to_le_u16 (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; }
839ecb_inline uint_fast32_t ecb_host_to_le_u32 (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; }
840ecb_inline uint_fast64_t ecb_host_to_le_u64 (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; }
841
842ecb_inline void ecb_poke_u16_u (void *ptr, uint16_t v) { memcpy (ptr, &v, sizeof (v)); }
843ecb_inline void ecb_poke_u32_u (void *ptr, uint32_t v) { memcpy (ptr, &v, sizeof (v)); }
844ecb_inline void ecb_poke_u64_u (void *ptr, uint64_t v) { memcpy (ptr, &v, sizeof (v)); }
845
846ecb_inline void ecb_poke_be_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_be_u16 (v)); }
847ecb_inline void ecb_poke_be_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_be_u32 (v)); }
848ecb_inline void ecb_poke_be_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_be_u64 (v)); }
849
850ecb_inline void ecb_poke_le_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_le_u16 (v)); }
851ecb_inline void ecb_poke_le_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_le_u32 (v)); }
852ecb_inline void ecb_poke_le_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_le_u64 (v)); }
853
854#if ECB_CPP
855
856inline uint8_t ecb_bswap (uint8_t v) { return v; }
857inline uint16_t ecb_bswap (uint16_t v) { return ecb_bswap16 (v); }
858inline uint32_t ecb_bswap (uint32_t v) { return ecb_bswap32 (v); }
859inline uint64_t ecb_bswap (uint64_t v) { return ecb_bswap64 (v); }
860
861template<typename T> inline T ecb_be_to_host (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; }
862template<typename T> inline T ecb_le_to_host (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; }
863template<typename T> inline T ecb_peek (const void *ptr) { return *(const T *)ptr; }
864template<typename T> inline T ecb_peek_be (const void *ptr) { return ecb_be_to_host (ecb_peek <T> (ptr)); }
865template<typename T> inline T ecb_peek_le (const void *ptr) { return ecb_le_to_host (ecb_peek <T> (ptr)); }
866template<typename T> inline T ecb_peek_u (const void *ptr) { T v; memcpy (&v, ptr, sizeof (v)); return v; }
867template<typename T> inline T ecb_peek_be_u (const void *ptr) { return ecb_be_to_host (ecb_peek_u<T> (ptr)); }
868template<typename T> inline T ecb_peek_le_u (const void *ptr) { return ecb_le_to_host (ecb_peek_u<T> (ptr)); }
869
870template<typename T> inline T ecb_host_to_be (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; }
871template<typename T> inline T ecb_host_to_le (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; }
872template<typename T> inline void ecb_poke (void *ptr, T v) { *(T *)ptr = v; }
873template<typename T> inline void ecb_poke_be (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_be (v)); }
874template<typename T> inline void ecb_poke_le (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_le (v)); }
875template<typename T> inline void ecb_poke_u (void *ptr, T v) { memcpy (ptr, &v, sizeof (v)); }
876template<typename T> inline void ecb_poke_be_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_be (v)); }
877template<typename T> inline void ecb_poke_le_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_le (v)); }
878
879#endif
880
881/*****************************************************************************/
882/* pointer/integer hashing */
883
884/* based on hash by Chris Wellons, https://nullprogram.com/blog/2018/07/31/ */
885ecb_function_ uint32_t ecb_mix32 (uint32_t v);
886ecb_function_ uint32_t ecb_mix32 (uint32_t v)
887{
888 v ^= v >> 16; v *= 0x7feb352dU;
889 v ^= v >> 15; v *= 0x846ca68bU;
890 v ^= v >> 16;
891 return v;
892}
893
894ecb_function_ uint32_t ecb_unmix32 (uint32_t v);
895ecb_function_ uint32_t ecb_unmix32 (uint32_t v)
896{
897 v ^= v >> 16 ; v *= 0x43021123U;
898 v ^= v >> 15 ^ v >> 30; v *= 0x1d69e2a5U;
899 v ^= v >> 16 ;
900 return v;
901}
902
903/* based on splitmix64, by Sebastiona Vigna, https://prng.di.unimi.it/splitmix64.c */
904ecb_function_ uint64_t ecb_mix64 (uint64_t v);
905ecb_function_ uint64_t ecb_mix64 (uint64_t v)
906{
907 v ^= v >> 30; v *= 0xbf58476d1ce4e5b9U;
908 v ^= v >> 27; v *= 0x94d049bb133111ebU;
909 v ^= v >> 31;
910 return v;
911}
912
913ecb_function_ uint64_t ecb_unmix64 (uint64_t v);
914ecb_function_ uint64_t ecb_unmix64 (uint64_t v)
915{
916 v ^= v >> 31 ^ v >> 62; v *= 0x319642b2d24d8ec3U;
917 v ^= v >> 27 ^ v >> 54; v *= 0x96de1b173f119089U;
918 v ^= v >> 30 ^ v >> 60;
919 return v;
920}
921
922ecb_function_ uintptr_t ecb_ptrmix (void *p);
923ecb_function_ uintptr_t ecb_ptrmix (void *p)
924{
925 #if ECB_PTRSIZE <= 4
926 return ecb_mix32 ((uint32_t)p);
927 #else
928 return ecb_mix64 ((uint64_t)p);
929 #endif
930}
931
932ecb_function_ void *ecb_ptrunmix (uintptr_t v);
933ecb_function_ void *ecb_ptrunmix (uintptr_t v)
934{
935 #if ECB_PTRSIZE <= 4
936 return (void *)ecb_unmix32 (v);
937 #else
938 return (void *)ecb_unmix64 (v);
939 #endif
940}
941
942#if ECB_CPP
943
944template<typename T>
945inline uintptr_t ecb_ptrmix (T *p)
946{
947 return ecb_ptrmix (static_cast<void *>(p));
948}
949
950template<typename T>
951inline T *ecb_ptrunmix (uintptr_t v)
952{
953 return static_cast<T *>(ecb_ptrunmix (v));
954}
955
956#endif
957
958/*****************************************************************************/
959/* gray code */
960
961ecb_inline uint_fast8_t ecb_gray_encode8 (uint_fast8_t b) { return b ^ (b >> 1); }
962ecb_inline uint_fast16_t ecb_gray_encode16 (uint_fast16_t b) { return b ^ (b >> 1); }
963ecb_inline uint_fast32_t ecb_gray_encode32 (uint_fast32_t b) { return b ^ (b >> 1); }
964ecb_inline uint_fast64_t ecb_gray_encode64 (uint_fast64_t b) { return b ^ (b >> 1); }
965
966ecb_function_ uint8_t ecb_gray_decode8 (uint8_t g);
967ecb_function_ uint8_t ecb_gray_decode8 (uint8_t g)
968{
969 g ^= g >> 1;
970 g ^= g >> 2;
971 g ^= g >> 4;
972
973 return g;
974}
975
976ecb_function_ uint16_t ecb_gray_decode16 (uint16_t g);
977ecb_function_ uint16_t ecb_gray_decode16 (uint16_t g)
978{
979 g ^= g >> 1;
980 g ^= g >> 2;
981 g ^= g >> 4;
982 g ^= g >> 8;
983
984 return g;
985}
986
987ecb_function_ uint32_t ecb_gray_decode32 (uint32_t g);
988ecb_function_ uint32_t ecb_gray_decode32 (uint32_t g)
989{
990 g ^= g >> 1;
991 g ^= g >> 2;
992 g ^= g >> 4;
993 g ^= g >> 8;
994 g ^= g >> 16;
995
996 return g;
997}
998
999ecb_function_ uint64_t ecb_gray_decode64 (uint64_t g);
1000ecb_function_ uint64_t ecb_gray_decode64 (uint64_t g)
1001{
1002 g ^= g >> 1;
1003 g ^= g >> 2;
1004 g ^= g >> 4;
1005 g ^= g >> 8;
1006 g ^= g >> 16;
1007 g ^= g >> 32;
1008
1009 return g;
1010}
1011
1012#if ECB_CPP
1013
1014ecb_inline uint8_t ecb_gray_encode (uint8_t b) { return ecb_gray_encode8 (b); }
1015ecb_inline uint16_t ecb_gray_encode (uint16_t b) { return ecb_gray_encode16 (b); }
1016ecb_inline uint32_t ecb_gray_encode (uint32_t b) { return ecb_gray_encode32 (b); }
1017ecb_inline uint64_t ecb_gray_encode (uint64_t b) { return ecb_gray_encode64 (b); }
1018
1019ecb_inline uint8_t ecb_gray_decode (uint8_t g) { return ecb_gray_decode8 (g); }
1020ecb_inline uint16_t ecb_gray_decode (uint16_t g) { return ecb_gray_decode16 (g); }
1021ecb_inline uint32_t ecb_gray_decode (uint32_t g) { return ecb_gray_decode32 (g); }
1022ecb_inline uint64_t ecb_gray_decode (uint64_t g) { return ecb_gray_decode64 (g); }
1023
1024#endif
1025
1026/*****************************************************************************/
1027/* 2d hilbert curves */
1028
1029/* algorithm from the book Hacker's Delight, modified to not */
1030/* run into undefined behaviour for n==16 */
1031static uint32_t ecb_hilbert2d_index_to_coord32 (int n, uint32_t s);
1032static uint32_t ecb_hilbert2d_index_to_coord32 (int n, uint32_t s)
1033{
1034 uint32_t comp, swap, cs, t, sr;
1035
1036 /* pad s on the left (unused) bits with 01 (no change groups) */
1037 s |= 0x55555555U << n << n;
1038 /* "s shift right" */
1039 sr = (s >> 1) & 0x55555555U;
1040 /* compute complement and swap info in two-bit groups */
1041 cs = ((s & 0x55555555U) + sr) ^ 0x55555555U;
1042
1043 /* parallel prefix xor op to propagate both complement
1044 * and swap info together from left to right (there is
1045 * no step "cs ^= cs >> 1", so in effect it computes
1046 * two independent parallel prefix operations on two
1047 * interleaved sets of sixteen bits).
1048 */
1049 cs ^= cs >> 2;
1050 cs ^= cs >> 4;
1051 cs ^= cs >> 8;
1052 cs ^= cs >> 16;
1053
1054 /* separate swap and complement bits */
1055 swap = cs & 0x55555555U;
1056 comp = (cs >> 1) & 0x55555555U;
1057
1058 /* calculate coordinates in odd and even bit positions */
1059 t = (s & swap) ^ comp;
1060 s = s ^ sr ^ t ^ (t << 1);
1061
1062 /* unpad/clear out any junk on the left */
1063 s = s & ((1 << n << n) - 1);
1064
1065 /* Now "unshuffle" to separate the x and y bits. */
1066 t = (s ^ (s >> 1)) & 0x22222222U; s ^= t ^ (t << 1);
1067 t = (s ^ (s >> 2)) & 0x0c0c0c0cU; s ^= t ^ (t << 2);
1068 t = (s ^ (s >> 4)) & 0x00f000f0U; s ^= t ^ (t << 4);
1069 t = (s ^ (s >> 8)) & 0x0000ff00U; s ^= t ^ (t << 8);
1070
1071 /* now s contains two 16-bit coordinates */
1072 return s;
1073}
1074
1075/* 64 bit, a straightforward extension to the 32 bit case */
1076static uint64_t ecb_hilbert2d_index_to_coord64 (int n, uint64_t s);
1077static uint64_t ecb_hilbert2d_index_to_coord64 (int n, uint64_t s)
1078{
1079 uint64_t comp, swap, cs, t, sr;
1080
1081 /* pad s on the left (unused) bits with 01 (no change groups) */
1082 s |= 0x5555555555555555U << n << n;
1083 /* "s shift right" */
1084 sr = (s >> 1) & 0x5555555555555555U;
1085 /* compute complement and swap info in two-bit groups */
1086 cs = ((s & 0x5555555555555555U) + sr) ^ 0x5555555555555555U;
1087
1088 /* parallel prefix xor op to propagate both complement
1089 * and swap info together from left to right (there is
1090 * no step "cs ^= cs >> 1", so in effect it computes
1091 * two independent parallel prefix operations on two
1092 * interleaved sets of thirty-two bits).
1093 */
1094 cs ^= cs >> 2;
1095 cs ^= cs >> 4;
1096 cs ^= cs >> 8;
1097 cs ^= cs >> 16;
1098 cs ^= cs >> 32;
1099
1100 /* separate swap and complement bits */
1101 swap = cs & 0x5555555555555555U;
1102 comp = (cs >> 1) & 0x5555555555555555U;
1103
1104 /* calculate coordinates in odd and even bit positions */
1105 t = (s & swap) ^ comp;
1106 s = s ^ sr ^ t ^ (t << 1);
1107
1108 /* unpad/clear out any junk on the left */
1109 s = s & ((1 << n << n) - 1);
1110
1111 /* Now "unshuffle" to separate the x and y bits. */
1112 t = (s ^ (s >> 1)) & 0x2222222222222222U; s ^= t ^ (t << 1);
1113 t = (s ^ (s >> 2)) & 0x0c0c0c0c0c0c0c0cU; s ^= t ^ (t << 2);
1114 t = (s ^ (s >> 4)) & 0x00f000f000f000f0U; s ^= t ^ (t << 4);
1115 t = (s ^ (s >> 8)) & 0x0000ff000000ff00U; s ^= t ^ (t << 8);
1116 t = (s ^ (s >> 16)) & 0x00000000ffff0000U; s ^= t ^ (t << 16);
1117
1118 /* now s contains two 32-bit coordinates */
1119 return s;
1120}
1121
1122/* algorithm from the book Hacker's Delight, but a similar algorithm*/
1123/* is given in https://doi.org/10.1002/spe.4380160103 */
1124/* this has been slightly improved over the original version */
1125ecb_function_ uint32_t ecb_hilbert2d_coord_to_index32 (int n, uint32_t xy);
1126ecb_function_ uint32_t ecb_hilbert2d_coord_to_index32 (int n, uint32_t xy)
1127{
1128 uint32_t row;
1129 uint32_t state = 0;
1130 uint32_t s = 0;
1131
1132 do
1133 {
1134 --n;
1135
1136 row = 4 * state
1137 | (2 & (xy >> n >> 15))
1138 | (1 & (xy >> n ));
1139
1140 /* these funky constants are lookup tables for two-bit values */
1141 s = (s << 2) | (0x361e9cb4U >> 2 * row) & 3;
1142 state = (0x8fe65831U >> 2 * row) & 3;
1143 }
1144 while (n > 0);
1145
1146 return s;
1147}
1148
1149/* 64 bit, essentially the same as 32 bit */
1150ecb_function_ uint64_t ecb_hilbert2d_coord_to_index64 (int n, uint64_t xy);
1151ecb_function_ uint64_t ecb_hilbert2d_coord_to_index64 (int n, uint64_t xy)
1152{
1153 uint32_t row;
1154 uint32_t state = 0;
1155 uint64_t s = 0;
1156
1157 do
1158 {
1159 --n;
1160
1161 row = 4 * state
1162 | (2 & (xy >> n >> 31))
1163 | (1 & (xy >> n ));
1164
1165 /* these funky constants are lookup tables for two-bit values */
1166 s = (s << 2) | (0x361e9cb4U >> 2 * row) & 3;
1167 state = (0x8fe65831U >> 2 * row) & 3;
1168 }
1169 while (n > 0);
1170
1171 return s;
1172}
1173
1174/*****************************************************************************/
1175/* division */
633 1176
634#if ECB_GCC_VERSION(3,0) || ECB_C99 1177#if ECB_GCC_VERSION(3,0) || ECB_C99
1178 /* C99 tightened the definition of %, so we can use a more efficient version */
635 #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0)) 1179 #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0))
636#else 1180#else
637 #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n))) 1181 #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n)))
638#endif 1182#endif
639 1183
650 } 1194 }
651#else 1195#else
652 #define ecb_div_rd(val,div) ((val) < 0 ? - ((-(val) + (div) - 1) / (div)) : ((val) ) / (div)) 1196 #define ecb_div_rd(val,div) ((val) < 0 ? - ((-(val) + (div) - 1) / (div)) : ((val) ) / (div))
653 #define ecb_div_ru(val,div) ((val) < 0 ? - ((-(val) ) / (div)) : ((val) + (div) - 1) / (div)) 1197 #define ecb_div_ru(val,div) ((val) < 0 ? - ((-(val) ) / (div)) : ((val) + (div) - 1) / (div))
654#endif 1198#endif
1199
1200/*****************************************************************************/
1201/* array length */
655 1202
656#if ecb_cplusplus_does_not_suck 1203#if ecb_cplusplus_does_not_suck
657 /* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */ 1204 /* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */
658 template<typename T, int N> 1205 template<typename T, int N>
659 static inline int ecb_array_length (const T (&arr)[N]) 1206 static inline int ecb_array_length (const T (&arr)[N])
661 return N; 1208 return N;
662 } 1209 }
663#else 1210#else
664 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) 1211 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0]))
665#endif 1212#endif
1213
1214/*****************************************************************************/
1215/* IEEE 754-2008 half float conversions */
1216
1217ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x);
1218ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x)
1219{
1220 unsigned int s = (x & 0x8000) << (31 - 15);
1221 int e = (x >> 10) & 0x001f;
1222 unsigned int m = x & 0x03ff;
1223
1224 if (ecb_expect_false (e == 31))
1225 /* infinity or NaN */
1226 e = 255 - (127 - 15);
1227 else if (ecb_expect_false (!e))
1228 {
1229 if (ecb_expect_true (!m))
1230 /* zero, handled by code below by forcing e to 0 */
1231 e = 0 - (127 - 15);
1232 else
1233 {
1234 /* subnormal, renormalise */
1235 unsigned int s = 10 - ecb_ld32 (m);
1236
1237 m = (m << s) & 0x3ff; /* mask implicit bit */
1238 e -= s - 1;
1239 }
1240 }
1241
1242 /* e and m now are normalised, or zero, (or inf or nan) */
1243 e += 127 - 15;
1244
1245 return s | (e << 23) | (m << (23 - 10));
1246}
1247
1248ecb_function_ ecb_const uint16_t ecb_binary32_to_binary16 (uint32_t x);
1249ecb_function_ ecb_const uint16_t ecb_binary32_to_binary16 (uint32_t x)
1250{
1251 unsigned int s = (x >> 16) & 0x00008000; /* sign bit, the easy part */
1252 int e = ((x >> 23) & 0x000000ff) - (127 - 15); /* the desired exponent */
1253 unsigned int m = x & 0x007fffff;
1254
1255 x &= 0x7fffffff;
1256
1257 /* if it's within range of binary16 normals, use fast path */
1258 if (ecb_expect_true (0x38800000 <= x && x <= 0x477fefff))
1259 {
1260 /* mantissa round-to-even */
1261 m += 0x00000fff + ((m >> (23 - 10)) & 1);
1262
1263 /* handle overflow */
1264 if (ecb_expect_false (m >= 0x00800000))
1265 {
1266 m >>= 1;
1267 e += 1;
1268 }
1269
1270 return s | (e << 10) | (m >> (23 - 10));
1271 }
1272
1273 /* handle large numbers and infinity */
1274 if (ecb_expect_true (0x477fefff < x && x <= 0x7f800000))
1275 return s | 0x7c00;
1276
1277 /* handle zero, subnormals and small numbers */
1278 if (ecb_expect_true (x < 0x38800000))
1279 {
1280 /* zero */
1281 if (ecb_expect_true (!x))
1282 return s;
1283
1284 /* handle subnormals */
1285
1286 /* too small, will be zero */
1287 if (e < (14 - 24)) /* might not be sharp, but is good enough */
1288 return s;
1289
1290 m |= 0x00800000; /* make implicit bit explicit */
1291
1292 /* very tricky - we need to round to the nearest e (+10) bit value */
1293 {
1294 unsigned int bits = 14 - e;
1295 unsigned int half = (1 << (bits - 1)) - 1;
1296 unsigned int even = (m >> bits) & 1;
1297
1298 /* if this overflows, we will end up with a normalised number */
1299 m = (m + half + even) >> bits;
1300 }
1301
1302 return s | m;
1303 }
1304
1305 /* handle NaNs, preserve leftmost nan bits, but make sure we don't turn them into infinities */
1306 m >>= 13;
1307
1308 return s | 0x7c00 | m | !m;
1309}
1310
1311/*******************************************************************************/
1312/* fast integer to ascii */
1313
1314/*
1315 * This code is pretty complicated because it is general. The idea behind it,
1316 * however, is pretty simple: first, the number is multiplied with a scaling
1317 * factor (2**bits / 10**(digits-1)) to convert the integer into a fixed-point
1318 * number with the first digit in the upper bits.
1319 * Then this digit is converted to text and masked out. The resulting number
1320 * is then multiplied by 10, by multiplying the fixed point representation
1321 * by 5 and shifting the (binary) decimal point one to the right, so a 4.28
1322 * format becomes 5.27, 6.26 and so on.
1323 * The rest involves only advancing the pointer if we already generated a
1324 * non-zero digit, so leading zeroes are overwritten.
1325 */
1326
1327/* simply return a mask with "bits" bits set */
1328#define ecb_i2a_mask(type,bits) ((((type)1) << (bits)) - 1)
1329
1330/* oputput a single digit. maskvalue is 10**digitidx */
1331#define ecb_i2a_digit(type,bits,digitmask,maskvalue,digitidx) \
1332 if (digitmask >= maskvalue) /* constant, used to decide how many digits to generate */ \
1333 { \
1334 char digit = x >> (bits - digitidx); /* calculate the topmost digit */ \
1335 *ptr = digit + '0'; /* output it */ \
1336 nz = (digitmask == maskvalue) || nz || digit; /* first term == always output last digit */ \
1337 ptr += nz; /* output digit only if non-zero digit seen */ \
1338 x = (x & ecb_i2a_mask (type, bits - digitidx)) * 5; /* *10, but shift decimal point right */ \
1339 }
1340
1341/* convert integer to fixed point format and multiply out digits, highest first */
1342/* requires magic constants: max. digits and number of bits after the decimal point */
1343#define ecb_i2a_def(suffix,ptr,v,type,bits,digitmask,lz) \
1344ecb_inline char *ecb_i2a_ ## suffix (char *ptr, uint32_t u) \
1345{ \
1346 char nz = lz; /* non-zero digit seen? */ \
1347 /* convert to x.bits fixed-point */ \
1348 type x = u * ((ecb_i2a_mask (type, bits) + digitmask) / digitmask); \
1349 /* output up to 10 digits */ \
1350 ecb_i2a_digit (type,bits,digitmask, 1, 0); \
1351 ecb_i2a_digit (type,bits,digitmask, 10, 1); \
1352 ecb_i2a_digit (type,bits,digitmask, 100, 2); \
1353 ecb_i2a_digit (type,bits,digitmask, 1000, 3); \
1354 ecb_i2a_digit (type,bits,digitmask, 10000, 4); \
1355 ecb_i2a_digit (type,bits,digitmask, 100000, 5); \
1356 ecb_i2a_digit (type,bits,digitmask, 1000000, 6); \
1357 ecb_i2a_digit (type,bits,digitmask, 10000000, 7); \
1358 ecb_i2a_digit (type,bits,digitmask, 100000000, 8); \
1359 ecb_i2a_digit (type,bits,digitmask, 1000000000, 9); \
1360 return ptr; \
1361}
1362
1363/* predefined versions of the above, for various digits */
1364/* ecb_i2a_xN = almost N digits, limit defined by macro */
1365/* ecb_i2a_N = up to N digits, leading zeroes suppressed */
1366/* ecb_i2a_0N = exactly N digits, including leading zeroes */
1367
1368/* non-leading-zero versions, limited range */
1369#define ECB_I2A_MAX_X5 59074 /* limit for ecb_i2a_x5 */
1370#define ECB_I2A_MAX_X10 2932500665 /* limit for ecb_i2a_x10 */
1371ecb_i2a_def ( x5, ptr, v, uint32_t, 26, 10000, 0)
1372ecb_i2a_def (x10, ptr, v, uint64_t, 60, 1000000000, 0)
1373
1374/* non-leading zero versions, all digits, 4 and 9 are optimal for 32/64 bit */
1375ecb_i2a_def ( 2, ptr, v, uint32_t, 10, 10, 0)
1376ecb_i2a_def ( 3, ptr, v, uint32_t, 12, 100, 0)
1377ecb_i2a_def ( 4, ptr, v, uint32_t, 26, 1000, 0)
1378ecb_i2a_def ( 5, ptr, v, uint64_t, 30, 10000, 0)
1379ecb_i2a_def ( 6, ptr, v, uint64_t, 36, 100000, 0)
1380ecb_i2a_def ( 7, ptr, v, uint64_t, 44, 1000000, 0)
1381ecb_i2a_def ( 8, ptr, v, uint64_t, 50, 10000000, 0)
1382ecb_i2a_def ( 9, ptr, v, uint64_t, 56, 100000000, 0)
1383
1384/* leading-zero versions, all digits, 04 and 09 are optimal for 32/64 bit */
1385ecb_i2a_def (02, ptr, v, uint32_t, 10, 10, 1)
1386ecb_i2a_def (03, ptr, v, uint32_t, 12, 100, 1)
1387ecb_i2a_def (04, ptr, v, uint32_t, 26, 1000, 1)
1388ecb_i2a_def (05, ptr, v, uint64_t, 30, 10000, 1)
1389ecb_i2a_def (06, ptr, v, uint64_t, 36, 100000, 1)
1390ecb_i2a_def (07, ptr, v, uint64_t, 44, 1000000, 1)
1391ecb_i2a_def (08, ptr, v, uint64_t, 50, 10000000, 1)
1392ecb_i2a_def (09, ptr, v, uint64_t, 56, 100000000, 1)
1393
1394#define ECB_I2A_I32_DIGITS 11
1395#define ECB_I2A_U32_DIGITS 10
1396#define ECB_I2A_I64_DIGITS 20
1397#define ECB_I2A_U64_DIGITS 21
1398#define ECB_I2A_MAX_DIGITS 21
1399
1400ecb_function_ char * ecb_i2a_u32 (char *ptr, uint32_t u);
1401ecb_function_ char * ecb_i2a_u32 (char *ptr, uint32_t u)
1402{
1403 #if ECB_64BIT_NATIVE
1404 if (ecb_expect_true (u <= ECB_I2A_MAX_X10))
1405 ptr = ecb_i2a_x10 (ptr, u);
1406 else /* x10 almost, but not fully, covers 32 bit */
1407 {
1408 uint32_t u1 = u % 1000000000;
1409 uint32_t u2 = u / 1000000000;
1410
1411 *ptr++ = u2 + '0';
1412 ptr = ecb_i2a_09 (ptr, u1);
1413 }
1414 #else
1415 if (ecb_expect_true (u <= ECB_I2A_MAX_X5))
1416 ecb_i2a_x5 (ptr, u);
1417 else if (ecb_expect_true (u <= ECB_I2A_MAX_X5 * 10000))
1418 {
1419 uint32_t u1 = u % 10000;
1420 uint32_t u2 = u / 10000;
1421
1422 ptr = ecb_i2a_x5 (ptr, u2);
1423 ptr = ecb_i2a_04 (ptr, u1);
1424 }
1425 else
1426 {
1427 uint32_t u1 = u % 10000;
1428 uint32_t ua = u / 10000;
1429 uint32_t u2 = ua % 10000;
1430 uint32_t u3 = ua / 10000;
1431
1432 ptr = ecb_i2a_2 (ptr, u3);
1433 ptr = ecb_i2a_04 (ptr, u2);
1434 ptr = ecb_i2a_04 (ptr, u1);
1435 }
1436 #endif
1437
1438 return ptr;
1439}
1440
1441ecb_function_ char * ecb_i2a_i32 (char *ptr, int32_t v);
1442ecb_function_ char * ecb_i2a_i32 (char *ptr, int32_t v)
1443{
1444 *ptr = '-'; ptr += v < 0;
1445 uint32_t u = v < 0 ? -(uint32_t)v : v;
1446
1447 #if ECB_64BIT_NATIVE
1448 ptr = ecb_i2a_x10 (ptr, u); /* x10 fully covers 31 bit */
1449 #else
1450 ptr = ecb_i2a_u32 (ptr, u);
1451 #endif
1452
1453 return ptr;
1454}
1455
1456ecb_function_ char * ecb_i2a_u64 (char *ptr, uint64_t u);
1457ecb_function_ char * ecb_i2a_u64 (char *ptr, uint64_t u)
1458{
1459 #if ECB_64BIT_NATIVE
1460 if (ecb_expect_true (u <= ECB_I2A_MAX_X10))
1461 ptr = ecb_i2a_x10 (ptr, u);
1462 else if (ecb_expect_false (u <= ECB_I2A_MAX_X10 * 1000000000))
1463 {
1464 uint64_t u1 = u % 1000000000;
1465 uint64_t u2 = u / 1000000000;
1466
1467 ptr = ecb_i2a_x10 (ptr, u2);
1468 ptr = ecb_i2a_09 (ptr, u1);
1469 }
1470 else
1471 {
1472 uint64_t u1 = u % 1000000000;
1473 uint64_t ua = u / 1000000000;
1474 uint64_t u2 = ua % 1000000000;
1475 uint64_t u3 = ua / 1000000000;
1476
1477 ptr = ecb_i2a_2 (ptr, u3);
1478 ptr = ecb_i2a_09 (ptr, u2);
1479 ptr = ecb_i2a_09 (ptr, u1);
1480 }
1481 #else
1482 if (ecb_expect_true (u <= ECB_I2A_MAX_X5))
1483 ptr = ecb_i2a_x5 (ptr, u);
1484 else
1485 {
1486 uint64_t u1 = u % 10000;
1487 uint64_t u2 = u / 10000;
1488
1489 ptr = ecb_i2a_u64 (ptr, u2);
1490 ptr = ecb_i2a_04 (ptr, u1);
1491 }
1492 #endif
1493
1494 return ptr;
1495}
1496
1497ecb_function_ char * ecb_i2a_i64 (char *ptr, int64_t v);
1498ecb_function_ char * ecb_i2a_i64 (char *ptr, int64_t v)
1499{
1500 *ptr = '-'; ptr += v < 0;
1501 uint64_t u = v < 0 ? -(uint64_t)v : v;
1502
1503 #if ECB_64BIT_NATIVE
1504 if (ecb_expect_true (u <= ECB_I2A_MAX_X10))
1505 ptr = ecb_i2a_x10 (ptr, u);
1506 else if (ecb_expect_false (u <= ECB_I2A_MAX_X10 * 1000000000))
1507 {
1508 uint64_t u1 = u % 1000000000;
1509 uint64_t u2 = u / 1000000000;
1510
1511 ptr = ecb_i2a_x10 (ptr, u2);
1512 ptr = ecb_i2a_09 (ptr, u1);
1513 }
1514 else
1515 {
1516 uint64_t u1 = u % 1000000000;
1517 uint64_t ua = u / 1000000000;
1518 uint64_t u2 = ua % 1000000000;
1519 uint64_t u3 = ua / 1000000000;
1520
1521 /* 2**31 is 19 digits, so the top is exactly one digit */
1522 *ptr++ = u3 + '0';
1523 ptr = ecb_i2a_09 (ptr, u2);
1524 ptr = ecb_i2a_09 (ptr, u1);
1525 }
1526 #else
1527 ptr = ecb_i2a_u64 (ptr, u);
1528 #endif
1529
1530 return ptr;
1531}
666 1532
667/*******************************************************************************/ 1533/*******************************************************************************/
668/* floating point stuff, can be disabled by defining ECB_NO_LIBM */ 1534/* floating point stuff, can be disabled by defining ECB_NO_LIBM */
669 1535
670/* basically, everything uses "ieee pure-endian" floating point numbers */ 1536/* basically, everything uses "ieee pure-endian" floating point numbers */
683 || defined __sh__ \ 1549 || defined __sh__ \
684 || defined _M_IX86 || defined ECB_MSVC_AMD64 || defined _M_IA64 \ 1550 || defined _M_IX86 || defined ECB_MSVC_AMD64 || defined _M_IA64 \
685 || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \ 1551 || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \
686 || defined __aarch64__ 1552 || defined __aarch64__
687 #define ECB_STDFP 1 1553 #define ECB_STDFP 1
688 #include <string.h> /* for memcpy */
689#else 1554#else
690 #define ECB_STDFP 0 1555 #define ECB_STDFP 0
691#endif 1556#endif
692 1557
693#ifndef ECB_NO_LIBM 1558#ifndef ECB_NO_LIBM
713 #else 1578 #else
714 #define ecb_ldexpf(x,e) (float) ldexp ((double) (x), (e)) 1579 #define ecb_ldexpf(x,e) (float) ldexp ((double) (x), (e))
715 #define ecb_frexpf(x,e) (float) frexp ((double) (x), (e)) 1580 #define ecb_frexpf(x,e) (float) frexp ((double) (x), (e))
716 #endif 1581 #endif
717 1582
718 /* converts an ieee half/binary16 to a float */
719 ecb_function_ ecb_const float ecb_binary16_to_float (uint16_t x);
720 ecb_function_ ecb_const float
721 ecb_binary16_to_float (uint16_t x)
722 {
723 int e = (x >> 10) & 0x1f;
724 int m = x & 0x3ff;
725 float r;
726
727 if (!e ) r = ecb_ldexpf (m , -24);
728 else if (e != 31) r = ecb_ldexpf (m + 0x400, e - 25);
729 else if (m ) r = ECB_NAN;
730 else r = ECB_INFINITY;
731
732 return x & 0x8000 ? -r : r;
733 }
734
735 /* convert a float to ieee single/binary32 */ 1583 /* convert a float to ieee single/binary32 */
736 ecb_function_ ecb_const uint32_t ecb_float_to_binary32 (float x); 1584 ecb_function_ ecb_const uint32_t ecb_float_to_binary32 (float x);
737 ecb_function_ ecb_const uint32_t 1585 ecb_function_ ecb_const uint32_t ecb_float_to_binary32 (float x)
738 ecb_float_to_binary32 (float x)
739 { 1586 {
740 uint32_t r; 1587 uint32_t r;
741 1588
742 #if ECB_STDFP 1589 #if ECB_STDFP
743 memcpy (&r, &x, 4); 1590 memcpy (&r, &x, 4);
772 return r; 1619 return r;
773 } 1620 }
774 1621
775 /* converts an ieee single/binary32 to a float */ 1622 /* converts an ieee single/binary32 to a float */
776 ecb_function_ ecb_const float ecb_binary32_to_float (uint32_t x); 1623 ecb_function_ ecb_const float ecb_binary32_to_float (uint32_t x);
777 ecb_function_ ecb_const float 1624 ecb_function_ ecb_const float ecb_binary32_to_float (uint32_t x)
778 ecb_binary32_to_float (uint32_t x)
779 { 1625 {
780 float r; 1626 float r;
781 1627
782 #if ECB_STDFP 1628 #if ECB_STDFP
783 memcpy (&r, &x, 4); 1629 memcpy (&r, &x, 4);
802 return r; 1648 return r;
803 } 1649 }
804 1650
805 /* convert a double to ieee double/binary64 */ 1651 /* convert a double to ieee double/binary64 */
806 ecb_function_ ecb_const uint64_t ecb_double_to_binary64 (double x); 1652 ecb_function_ ecb_const uint64_t ecb_double_to_binary64 (double x);
807 ecb_function_ ecb_const uint64_t 1653 ecb_function_ ecb_const uint64_t ecb_double_to_binary64 (double x)
808 ecb_double_to_binary64 (double x)
809 { 1654 {
810 uint64_t r; 1655 uint64_t r;
811 1656
812 #if ECB_STDFP 1657 #if ECB_STDFP
813 memcpy (&r, &x, 8); 1658 memcpy (&r, &x, 8);
842 return r; 1687 return r;
843 } 1688 }
844 1689
845 /* converts an ieee double/binary64 to a double */ 1690 /* converts an ieee double/binary64 to a double */
846 ecb_function_ ecb_const double ecb_binary64_to_double (uint64_t x); 1691 ecb_function_ ecb_const double ecb_binary64_to_double (uint64_t x);
847 ecb_function_ ecb_const double 1692 ecb_function_ ecb_const double ecb_binary64_to_double (uint64_t x)
848 ecb_binary64_to_double (uint64_t x)
849 { 1693 {
850 double r; 1694 double r;
851 1695
852 #if ECB_STDFP 1696 #if ECB_STDFP
853 memcpy (&r, &x, 8); 1697 memcpy (&r, &x, 8);
870 #endif 1714 #endif
871 1715
872 return r; 1716 return r;
873 } 1717 }
874 1718
875#endif 1719 /* convert a float to ieee half/binary16 */
1720 ecb_function_ ecb_const uint16_t ecb_float_to_binary16 (float x);
1721 ecb_function_ ecb_const uint16_t ecb_float_to_binary16 (float x)
1722 {
1723 return ecb_binary32_to_binary16 (ecb_float_to_binary32 (x));
1724 }
876 1725
877#endif 1726 /* convert an ieee half/binary16 to float */
1727 ecb_function_ ecb_const float ecb_binary16_to_float (uint16_t x);
1728 ecb_function_ ecb_const float ecb_binary16_to_float (uint16_t x)
1729 {
1730 return ecb_binary32_to_float (ecb_binary16_to_binary32 (x));
1731 }
878 1732
1733#endif
1734
1735#endif
1736

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines