ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libecb/ecb.h
Revision: 1.210
Committed: Fri Mar 25 15:28:08 2022 UTC (2 years, 4 months ago) by root
Content type: text/plain
Branch: MAIN
Changes since 1.209: +23 -17 lines
Log Message:
*** empty log message ***

File Contents

# User Rev Content
1 root 1.1 /*
2 root 1.17 * libecb - http://software.schmorp.de/pkg/libecb
3 root 1.1 *
4 root 1.189 * Copyright (©) 2009-2015,2018-2021 Marc Alexander Lehmann <libecb@schmorp.de>
5 root 1.7 * Copyright (©) 2011 Emanuele Giaquinta
6 root 1.1 * All rights reserved.
7     *
8     * Redistribution and use in source and binary forms, with or without modifica-
9     * tion, are permitted provided that the following conditions are met:
10     *
11     * 1. Redistributions of source code must retain the above copyright notice,
12     * this list of conditions and the following disclaimer.
13     *
14     * 2. Redistributions in binary form must reproduce the above copyright
15     * notice, this list of conditions and the following disclaimer in the
16     * documentation and/or other materials provided with the distribution.
17     *
18     * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
19     * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
20     * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
21     * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
22     * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23     * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24     * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25     * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
26     * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
27     * OF THE POSSIBILITY OF SUCH DAMAGE.
28 root 1.133 *
29     * Alternatively, the contents of this file may be used under the terms of
30     * the GNU General Public License ("GPL") version 2 or any later version,
31     * in which case the provisions of the GPL are applicable instead of
32     * the above. If you wish to allow the use of your version of this file
33     * only under the terms of the GPL and not to allow others to use your
34     * version of this file under the BSD license, indicate your decision
35     * by deleting the provisions above and replace them with the notice
36     * and other provisions required by the GPL. If you do not delete the
37     * provisions above, a recipient may use your version of this file under
38     * either the BSD or the GPL.
39 root 1.1 */
40    
41     #ifndef ECB_H
42     #define ECB_H
43    
44 root 1.87 /* 16 bits major, 16 bits minor */
45 root 1.204 #define ECB_VERSION 0x0001000c
46 root 1.87
47 root 1.184 #include <string.h> /* for memcpy */
48    
49 root 1.187 #if defined (_WIN32) && !defined (__MINGW32__)
50 root 1.44 typedef signed char int8_t;
51     typedef unsigned char uint8_t;
52 root 1.180 typedef signed char int_fast8_t;
53     typedef unsigned char uint_fast8_t;
54 root 1.44 typedef signed short int16_t;
55     typedef unsigned short uint16_t;
56 root 1.180 typedef signed int int_fast16_t;
57     typedef unsigned int uint_fast16_t;
58 root 1.44 typedef signed int int32_t;
59     typedef unsigned int uint32_t;
60 root 1.180 typedef signed int int_fast32_t;
61     typedef unsigned int uint_fast32_t;
62 root 1.44 #if __GNUC__
63     typedef signed long long int64_t;
64     typedef unsigned long long uint64_t;
65 root 1.51 #else /* _MSC_VER || __BORLANDC__ */
66 root 1.44 typedef signed __int64 int64_t;
67     typedef unsigned __int64 uint64_t;
68     #endif
69 root 1.180 typedef int64_t int_fast64_t;
70     typedef uint64_t uint_fast64_t;
71 root 1.87 #ifdef _WIN64
72     #define ECB_PTRSIZE 8
73     typedef uint64_t uintptr_t;
74     typedef int64_t intptr_t;
75     #else
76     #define ECB_PTRSIZE 4
77     typedef uint32_t uintptr_t;
78     typedef int32_t intptr_t;
79     #endif
80 root 1.44 #else
81     #include <inttypes.h>
82 root 1.173 #if (defined INTPTR_MAX ? INTPTR_MAX : ULONG_MAX) > 0xffffffffU
83 root 1.87 #define ECB_PTRSIZE 8
84     #else
85     #define ECB_PTRSIZE 4
86     #endif
87 root 1.44 #endif
88 root 1.6
89 sf-exg 1.159 #define ECB_GCC_AMD64 (__amd64 || __amd64__ || __x86_64 || __x86_64__)
90     #define ECB_MSVC_AMD64 (_M_AMD64 || _M_X64)
91    
92 root 1.179 #ifndef ECB_OPTIMIZE_SIZE
93     #if __OPTIMIZE_SIZE__
94     #define ECB_OPTIMIZE_SIZE 1
95     #else
96     #define ECB_OPTIMIZE_SIZE 0
97     #endif
98     #endif
99    
100 root 1.114 /* work around x32 idiocy by defining proper macros */
101 sf-exg 1.159 #if ECB_GCC_AMD64 || ECB_MSVC_AMD64
102 root 1.119 #if _ILP32
103 root 1.115 #define ECB_AMD64_X32 1
104 root 1.114 #else
105 root 1.115 #define ECB_AMD64 1
106 root 1.114 #endif
107     #endif
108    
109 root 1.189 #if ECB_PTRSIZE >= 8 || ECB_AMD64_X32
110     #define ECB_64BIT_NATIVE 1
111     #else
112     #define ECB_64BIT_NATIVE 0
113     #endif
114    
115 root 1.12 /* many compilers define _GNUC_ to some versions but then only implement
116     * what their idiot authors think are the "more important" extensions,
117 sf-exg 1.59 * causing enormous grief in return for some better fake benchmark numbers.
118 root 1.18 * or so.
119 root 1.12 * we try to detect these and simply assume they are not gcc - if they have
120     * an issue with that they should have done it right in the first place.
121     */
122 root 1.137 #if !defined __GNUC_MINOR__ || defined __INTEL_COMPILER || defined __SUNPRO_C || defined __SUNPRO_CC || defined __llvm__ || defined __clang__
123     #define ECB_GCC_VERSION(major,minor) 0
124     #else
125     #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))
126 root 1.12 #endif
127 root 1.1
128 sf-exg 1.138 #define ECB_CLANG_VERSION(major,minor) (__clang_major__ > (major) || (__clang_major__ == (major) && __clang_minor__ >= (minor)))
129    
130 root 1.147 #if __clang__ && defined __has_builtin
131     #define ECB_CLANG_BUILTIN(x) __has_builtin (x)
132 sf-exg 1.138 #else
133     #define ECB_CLANG_BUILTIN(x) 0
134     #endif
135    
136 root 1.147 #if __clang__ && defined __has_extension
137     #define ECB_CLANG_EXTENSION(x) __has_extension (x)
138 sf-exg 1.140 #else
139     #define ECB_CLANG_EXTENSION(x) 0
140     #endif
141    
142 root 1.91 #define ECB_CPP (__cplusplus+0)
143     #define ECB_CPP11 (__cplusplus >= 201103L)
144 root 1.177 #define ECB_CPP14 (__cplusplus >= 201402L)
145     #define ECB_CPP17 (__cplusplus >= 201703L)
146 root 1.90
147 root 1.102 #if ECB_CPP
148 root 1.127 #define ECB_C 0
149     #define ECB_STDC_VERSION 0
150     #else
151     #define ECB_C 1
152     #define ECB_STDC_VERSION __STDC_VERSION__
153     #endif
154    
155     #define ECB_C99 (ECB_STDC_VERSION >= 199901L)
156     #define ECB_C11 (ECB_STDC_VERSION >= 201112L)
157 root 1.177 #define ECB_C17 (ECB_STDC_VERSION >= 201710L)
158 root 1.127
159     #if ECB_CPP
160 root 1.102 #define ECB_EXTERN_C extern "C"
161     #define ECB_EXTERN_C_BEG ECB_EXTERN_C {
162     #define ECB_EXTERN_C_END }
163     #else
164     #define ECB_EXTERN_C extern
165     #define ECB_EXTERN_C_BEG
166     #define ECB_EXTERN_C_END
167     #endif
168    
169 root 1.52 /*****************************************************************************/
170    
171 root 1.58 /* ECB_NO_THREADS - ecb is not used by multiple threads, ever */
172     /* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */
173    
174 root 1.79 #if ECB_NO_THREADS
175 root 1.95 #define ECB_NO_SMP 1
176 root 1.79 #endif
177    
178 root 1.93 #if ECB_NO_SMP
179 root 1.64 #define ECB_MEMORY_FENCE do { } while (0)
180 root 1.58 #endif
181    
182 sf-exg 1.165 /* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/compiler_ref/compiler_builtins.html */
183     #if __xlC__ && ECB_CPP
184     #include <builtins.h>
185     #endif
186    
187 root 1.171 #if 1400 <= _MSC_VER
188     #include <intrin.h> /* fence functions _ReadBarrier, also bit search functions _BitScanReverse */
189     #endif
190    
191 root 1.52 #ifndef ECB_MEMORY_FENCE
192 root 1.85 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
193 root 1.178 #define ECB_MEMORY_FENCE_RELAXED __asm__ __volatile__ ("" : : : "memory")
194 root 1.73 #if __i386 || __i386__
195 root 1.54 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
196 root 1.94 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
197 root 1.176 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory")
198 sf-exg 1.159 #elif ECB_GCC_AMD64
199 root 1.94 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory")
200     #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
201 root 1.176 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory")
202 root 1.63 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__
203 root 1.94 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory")
204 root 1.175 #elif defined __ARM_ARCH_2__ \
205     || defined __ARM_ARCH_3__ || defined __ARM_ARCH_3M__ \
206     || defined __ARM_ARCH_4__ || defined __ARM_ARCH_4T__ \
207     || defined __ARM_ARCH_5__ || defined __ARM_ARCH_5E__ \
208     || defined __ARM_ARCH_5T__ || defined __ARM_ARCH_5TE__ \
209     || defined __ARM_ARCH_5TEJ__
210     /* should not need any, unless running old code on newer cpu - arm doesn't support that */
211 root 1.85 #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \
212 root 1.175 || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ \
213     || defined __ARM_ARCH_6T2__
214 root 1.84 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory")
215 root 1.85 #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \
216 root 1.175 || defined __ARM_ARCH_7R__ || defined __ARM_ARCH_7M__
217 root 1.94 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory")
218 root 1.129 #elif __aarch64__
219     #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory")
220 root 1.166 #elif (__sparc || __sparc__) && !(__sparc_v8__ || defined __sparcv8)
221 root 1.94 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory")
222     #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory")
223     #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore")
224 root 1.85 #elif defined __s390__ || defined __s390x__
225 root 1.77 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory")
226 root 1.85 #elif defined __mips__
227 root 1.118 /* GNU/Linux emulates sync on mips1 architectures, so we force its use */
228 root 1.116 /* anybody else who still uses mips1 is supposed to send in their version, with detection code. */
229     #define ECB_MEMORY_FENCE __asm__ __volatile__ (".set mips2; sync; .set mips0" : : : "memory")
230 root 1.86 #elif defined __alpha__
231 root 1.94 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory")
232     #elif defined __hppa__
233     #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
234     #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
235     #elif defined __ia64__
236     #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory")
237 root 1.117 #elif defined __m68k__
238     #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
239     #elif defined __m88k__
240     #define ECB_MEMORY_FENCE __asm__ __volatile__ ("tb1 0,%%r0,128" : : : "memory")
241     #elif defined __sh__
242     #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
243 root 1.52 #endif
244     #endif
245     #endif
246    
247     #ifndef ECB_MEMORY_FENCE
248 root 1.93 #if ECB_GCC_VERSION(4,7)
249 root 1.97 /* see comment below (stdatomic.h) about the C11 memory model. */
250 root 1.93 #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST)
251 root 1.128 #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE)
252     #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE)
253 root 1.190 #undef ECB_MEMORY_FENCE_RELAXED
254 root 1.178 #define ECB_MEMORY_FENCE_RELAXED __atomic_thread_fence (__ATOMIC_RELAXED)
255 root 1.110
256 sf-exg 1.140 #elif ECB_CLANG_EXTENSION(c_atomic)
257     /* see comment below (stdatomic.h) about the C11 memory model. */
258     #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST)
259     #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE)
260     #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE)
261 root 1.190 #undef ECB_MEMORY_FENCE_RELAXED
262 root 1.178 #define ECB_MEMORY_FENCE_RELAXED __c11_atomic_thread_fence (__ATOMIC_RELAXED)
263 root 1.110
264 root 1.93 #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__
265 root 1.52 #define ECB_MEMORY_FENCE __sync_synchronize ()
266 root 1.126 #elif _MSC_VER >= 1500 /* VC++ 2008 */
267     /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */
268     #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
269     #define ECB_MEMORY_FENCE _ReadWriteBarrier (); MemoryBarrier()
270     #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier (); MemoryBarrier() /* according to msdn, _ReadBarrier is not a load fence */
271     #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier (); MemoryBarrier()
272 root 1.57 #elif _MSC_VER >= 1400 /* VC++ 2005 */
273     #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
274     #define ECB_MEMORY_FENCE _ReadWriteBarrier ()
275     #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */
276     #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier ()
277 root 1.85 #elif defined _WIN32
278 root 1.55 #include <WinNT.h>
279 root 1.57 #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */
280 root 1.72 #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
281     #include <mbarrier.h>
282 root 1.178 #define ECB_MEMORY_FENCE __machine_rw_barrier ()
283     #define ECB_MEMORY_FENCE_ACQUIRE __machine_acq_barrier ()
284     #define ECB_MEMORY_FENCE_RELEASE __machine_rel_barrier ()
285     #define ECB_MEMORY_FENCE_RELAXED __compiler_barrier ()
286 root 1.82 #elif __xlC__
287 root 1.83 #define ECB_MEMORY_FENCE __sync ()
288 root 1.52 #endif
289     #endif
290    
291 root 1.53 #ifndef ECB_MEMORY_FENCE
292 root 1.94 #if ECB_C11 && !defined __STDC_NO_ATOMICS__
293     /* we assume that these memory fences work on all variables/all memory accesses, */
294     /* not just C11 atomics and atomic accesses */
295     #include <stdatomic.h>
296     #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst)
297 root 1.178 #define ECB_MEMORY_FENCE_ACQUIRE atomic_thread_fence (memory_order_acquire)
298     #define ECB_MEMORY_FENCE_RELEASE atomic_thread_fence (memory_order_release)
299 root 1.94 #endif
300     #endif
301    
302     #ifndef ECB_MEMORY_FENCE
303 root 1.62 #if !ECB_AVOID_PTHREADS
304     /*
305     * if you get undefined symbol references to pthread_mutex_lock,
306     * or failure to find pthread.h, then you should implement
307     * the ECB_MEMORY_FENCE operations for your cpu/compiler
308     * OR provide pthread.h and link against the posix thread library
309     * of your system.
310     */
311     #include <pthread.h>
312     #define ECB_NEEDS_PTHREADS 1
313     #define ECB_MEMORY_FENCE_NEEDS_PTHREADS 1
314 root 1.52
315 root 1.62 static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER;
316     #define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0)
317     #endif
318     #endif
319    
320 root 1.85 #if !defined ECB_MEMORY_FENCE_ACQUIRE && defined ECB_MEMORY_FENCE
321 root 1.52 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
322 root 1.62 #endif
323    
324 root 1.85 #if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE
325 root 1.52 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
326     #endif
327    
328 root 1.178 #if !defined ECB_MEMORY_FENCE_RELAXED && defined ECB_MEMORY_FENCE
329     #define ECB_MEMORY_FENCE_RELAXED ECB_MEMORY_FENCE /* very heavy-handed */
330     #endif
331    
332 root 1.52 /*****************************************************************************/
333    
334 root 1.149 #if ECB_CPP
335 root 1.46 #define ecb_inline static inline
336 root 1.38 #elif ECB_GCC_VERSION(2,5)
337 root 1.46 #define ecb_inline static __inline__
338 root 1.39 #elif ECB_C99
339 root 1.46 #define ecb_inline static inline
340 root 1.29 #else
341 root 1.46 #define ecb_inline static
342 root 1.38 #endif
343    
344     #if ECB_GCC_VERSION(3,3)
345     #define ecb_restrict __restrict__
346 root 1.39 #elif ECB_C99
347 root 1.38 #define ecb_restrict restrict
348     #else
349     #define ecb_restrict
350 root 1.4 #endif
351    
352 root 1.38 typedef int ecb_bool;
353    
354 root 1.8 #define ECB_CONCAT_(a, b) a ## b
355     #define ECB_CONCAT(a, b) ECB_CONCAT_(a, b)
356     #define ECB_STRINGIFY_(a) # a
357     #define ECB_STRINGIFY(a) ECB_STRINGIFY_(a)
358 root 1.155 #define ECB_STRINGIFY_EXPR(expr) ((expr), ECB_STRINGIFY_ (expr))
359 root 1.8
360 root 1.210 /* This marks larger functions that do not neccessarily need to be inlined */
361     /* TODO: popssibly static would be best for these at the moment? */
362 root 1.46 #define ecb_function_ ecb_inline
363 root 1.3
364 sf-exg 1.138 #if ECB_GCC_VERSION(3,1) || ECB_CLANG_VERSION(2,8)
365 root 1.142 #define ecb_attribute(attrlist) __attribute__ (attrlist)
366 root 1.37 #else
367     #define ecb_attribute(attrlist)
368 sf-exg 1.138 #endif
369 root 1.127
370 sf-exg 1.138 #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_constant_p)
371     #define ecb_is_constant(expr) __builtin_constant_p (expr)
372     #else
373 root 1.127 /* possible C11 impl for integral types
374     typedef struct ecb_is_constant_struct ecb_is_constant_struct;
375     #define ecb_is_constant(expr) _Generic ((1 ? (struct ecb_is_constant_struct *)0 : (void *)((expr) - (expr)), ecb_is_constant_struct *: 0, default: 1)) */
376    
377 root 1.37 #define ecb_is_constant(expr) 0
378 sf-exg 1.138 #endif
379    
380     #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_expect)
381     #define ecb_expect(expr,value) __builtin_expect ((expr),(value))
382     #else
383 root 1.37 #define ecb_expect(expr,value) (expr)
384 sf-exg 1.138 #endif
385    
386     #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_prefetch)
387     #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality)
388     #else
389 root 1.37 #define ecb_prefetch(addr,rw,locality)
390 root 1.1 #endif
391    
392 root 1.2 /* no emulation for ecb_decltype */
393 root 1.143 #if ECB_CPP11
394 root 1.144 // older implementations might have problems with decltype(x)::type, work around it
395 root 1.146 template<class T> struct ecb_decltype_t { typedef T type; };
396     #define ecb_decltype(x) ecb_decltype_t<decltype (x)>::type
397 root 1.143 #elif ECB_GCC_VERSION(3,0) || ECB_CLANG_VERSION(2,8)
398     #define ecb_decltype(x) __typeof__ (x)
399 root 1.1 #endif
400    
401 root 1.135 #if _MSC_VER >= 1300
402 root 1.149 #define ecb_deprecated __declspec (deprecated)
403 root 1.135 #else
404     #define ecb_deprecated ecb_attribute ((__deprecated__))
405     #endif
406    
407 sf-exg 1.162 #if _MSC_VER >= 1500
408 root 1.154 #define ecb_deprecated_message(msg) __declspec (deprecated (msg))
409     #elif ECB_GCC_VERSION(4,5)
410     #define ecb_deprecated_message(msg) ecb_attribute ((__deprecated__ (msg))
411     #else
412     #define ecb_deprecated_message(msg) ecb_deprecated
413     #endif
414    
415     #if _MSC_VER >= 1400
416     #define ecb_noinline __declspec (noinline)
417     #else
418     #define ecb_noinline ecb_attribute ((__noinline__))
419     #endif
420    
421 root 1.24 #define ecb_unused ecb_attribute ((__unused__))
422     #define ecb_const ecb_attribute ((__const__))
423     #define ecb_pure ecb_attribute ((__pure__))
424 root 1.35
425 root 1.145 #if ECB_C11 || __IBMC_NORETURN
426 sf-exg 1.165 /* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/language_ref/noreturn.html */
427 root 1.90 #define ecb_noreturn _Noreturn
428 root 1.153 #elif ECB_CPP11
429     #define ecb_noreturn [[noreturn]]
430     #elif _MSC_VER >= 1200
431 sf-exg 1.156 /* http://msdn.microsoft.com/en-us/library/k6ktzx3s.aspx */
432 root 1.153 #define ecb_noreturn __declspec (noreturn)
433 root 1.90 #else
434     #define ecb_noreturn ecb_attribute ((__noreturn__))
435     #endif
436    
437 root 1.35 #if ECB_GCC_VERSION(4,3)
438 root 1.39 #define ecb_artificial ecb_attribute ((__artificial__))
439     #define ecb_hot ecb_attribute ((__hot__))
440     #define ecb_cold ecb_attribute ((__cold__))
441 root 1.35 #else
442     #define ecb_artificial
443     #define ecb_hot
444     #define ecb_cold
445     #endif
446 root 1.1
447 root 1.39 /* put around conditional expressions if you are very sure that the */
448     /* expression is mostly true or mostly false. note that these return */
449     /* booleans, not the expression. */
450 root 1.33 #define ecb_expect_false(expr) ecb_expect (!!(expr), 0)
451     #define ecb_expect_true(expr) ecb_expect (!!(expr), 1)
452 root 1.36 /* for compatibility to the rest of the world */
453 root 1.33 #define ecb_likely(expr) ecb_expect_true (expr)
454     #define ecb_unlikely(expr) ecb_expect_false (expr)
455 root 1.1
456 root 1.3 /* count trailing zero bits and count # of one bits */
457 root 1.139 #if ECB_GCC_VERSION(3,4) \
458     || (ECB_CLANG_BUILTIN(__builtin_clz) && ECB_CLANG_BUILTIN(__builtin_clzll) \
459     && ECB_CLANG_BUILTIN(__builtin_ctz) && ECB_CLANG_BUILTIN(__builtin_ctzll) \
460     && ECB_CLANG_BUILTIN(__builtin_popcount))
461 root 1.206 #define ecb_ctz32(x) __builtin_ctz (x)
462     #define ecb_ctz64(x) (__SIZEOF_LONG__ == 64 ? __builtin_ctzl (x) : __builtin_ctzll (x))
463     #define ecb_clz32(x) __builtin_clz (x)
464     #define ecb_clz64(x) (__SIZEOF_LONG__ == 64 ? __builtin_clzl (x) : __builtin_clzll (x))
465     #define ecb_ld32(x) (ecb_clz32 (x) ^ 31)
466     #define ecb_ld64(x) (ecb_clz64 (x) ^ 63)
467 root 1.35 #define ecb_popcount32(x) __builtin_popcount (x)
468 root 1.206 /* ecb_popcount64 is more difficult, see below */
469 root 1.1 #else
470 root 1.151 ecb_function_ ecb_const int ecb_ctz32 (uint32_t x);
471     ecb_function_ ecb_const int
472 root 1.35 ecb_ctz32 (uint32_t x)
473     {
474 root 1.171 #if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM)
475 root 1.172 unsigned long r;
476 root 1.171 _BitScanForward (&r, x);
477     return (int)r;
478     #else
479 root 1.208 int r;
480 root 1.35
481 root 1.208 x &= ~x + 1; /* this isolates the lowest bit */
482 root 1.205
483 root 1.208 #if 1
484     /* David Seal's algorithm, Message-ID: <32975@armltd.uucp> from 1994 */
485     /* This happens to return 32 for x == 0, but the API does not support this */
486 root 1.35
487 root 1.208 /* -0 marks unused entries */
488     static unsigned char table[64] =
489     {
490     32, 0, 1, 12, 2, 6, -0, 13, 3, -0, 7, -0, -0, -0, -0, 14,
491     10, 4, -0, -0, 8, -0, -0, 25, -0, -0, -0, -0, -0, 21, 27, 15,
492     31, 11, 5, -0, -0, -0, -0, -0, 9, -0, -0, 24, -0, -0, 20, 26,
493     30, -0, -0, -0, -0, 23, -0, 19, 29, -0, 22, 18, 28, 17, 16, -0
494     };
495    
496     /* magic constant results in 33 unique values in the upper 6 bits */
497     x *= 0x0450fbafU; /* == 17 * 65 * 65535 */
498    
499     r = table [x >> 26];
500     #elif 0 /* branchless on i386, typically */
501     r = 0;
502 root 1.50 r += !!(x & 0xaaaaaaaa) << 0;
503     r += !!(x & 0xcccccccc) << 1;
504     r += !!(x & 0xf0f0f0f0) << 2;
505     r += !!(x & 0xff00ff00) << 3;
506     r += !!(x & 0xffff0000) << 4;
507 root 1.208 #else /* branchless on modern compilers, typically */
508     r = 0;
509 root 1.35 if (x & 0xaaaaaaaa) r += 1;
510     if (x & 0xcccccccc) r += 2;
511     if (x & 0xf0f0f0f0) r += 4;
512     if (x & 0xff00ff00) r += 8;
513     if (x & 0xffff0000) r += 16;
514 root 1.50 #endif
515 root 1.35
516     return r;
517 root 1.171 #endif
518 root 1.35 }
519    
520 root 1.151 ecb_function_ ecb_const int ecb_ctz64 (uint64_t x);
521     ecb_function_ ecb_const int
522 root 1.49 ecb_ctz64 (uint64_t x)
523     {
524 root 1.171 #if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM)
525 root 1.172 unsigned long r;
526 root 1.171 _BitScanForward64 (&r, x);
527     return (int)r;
528     #else
529 root 1.168 int shift = x & 0xffffffff ? 0 : 32;
530 root 1.50 return ecb_ctz32 (x >> shift) + shift;
531 root 1.171 #endif
532 root 1.49 }
533    
534 root 1.208 ecb_function_ ecb_const int ecb_clz32 (uint32_t x);
535     ecb_function_ ecb_const int
536     ecb_clz32 (uint32_t x)
537     {
538     #if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM)
539     unsigned long r;
540     _BitScanReverse (&r, x);
541     return (int)r;
542     #else
543    
544     /* Robert Harley's algorithm from comp.arch 1996-12-07 */
545     /* This happens to return 32 for x == 0, but the API does not support this */
546    
547     /* -0 marks unused table elements */
548     static unsigned char table[64] =
549     {
550     32, 31, -0, 16, -0, 30, 3, -0, 15, -0, -0, -0, 29, 10, 2, -0,
551     -0, -0, 12, 14, 21, -0, 19, -0, -0, 28, -0, 25, -0, 9, 1, -0,
552     17, -0, 4, -0, -0, -0, 11, -0, 13, 22, 20, -0, 26, -0, -0, 18,
553     5, -0, -0, 23, -0, 27, -0, 6, -0, 24, 7, -0, 8, -0, 0, -0
554     };
555    
556     /* propagate leftmost 1 bit to the right */
557     x |= x >> 1;
558     x |= x >> 2;
559     x |= x >> 4;
560     x |= x >> 8;
561     x |= x >> 16;
562    
563     /* magic constant results in 33 unique values in the upper 6 bits */
564     x *= 0x06EB14F9U; /* == 7 * 255 * 255 * 255 */
565    
566     return table [x >> 26];
567     #endif
568     }
569    
570     ecb_function_ ecb_const int ecb_clz64 (uint64_t x);
571     ecb_function_ ecb_const int
572     ecb_clz64 (uint64_t x)
573     {
574     #if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM)
575     unsigned long r;
576     _BitScanReverse64 (&r, x);
577     return (int)r;
578     #else
579     uint32_t l = x >> 32;
580     int shift = l ? 0 : 32;
581     return ecb_clz32 (l ? l : x) + shift;
582     #endif
583     }
584    
585 root 1.151 ecb_function_ ecb_const int ecb_popcount32 (uint32_t x);
586     ecb_function_ ecb_const int
587 root 1.35 ecb_popcount32 (uint32_t x)
588     {
589     x -= (x >> 1) & 0x55555555;
590     x = ((x >> 2) & 0x33333333) + (x & 0x33333333);
591     x = ((x >> 4) + x) & 0x0f0f0f0f;
592     x *= 0x01010101;
593 root 1.1
594 root 1.35 return x >> 24;
595     }
596 root 1.49
597 root 1.151 ecb_function_ ecb_const int ecb_ld32 (uint32_t x);
598     ecb_function_ ecb_const int ecb_ld32 (uint32_t x)
599 root 1.49 {
600 root 1.171 #if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM)
601 root 1.172 unsigned long r;
602 root 1.171 _BitScanReverse (&r, x);
603     return (int)r;
604     #else
605 root 1.50 int r = 0;
606 root 1.49
607 root 1.50 if (x >> 16) { x >>= 16; r += 16; }
608     if (x >> 8) { x >>= 8; r += 8; }
609     if (x >> 4) { x >>= 4; r += 4; }
610     if (x >> 2) { x >>= 2; r += 2; }
611     if (x >> 1) { r += 1; }
612 root 1.49
613     return r;
614 root 1.171 #endif
615 root 1.49 }
616    
617 root 1.151 ecb_function_ ecb_const int ecb_ld64 (uint64_t x);
618     ecb_function_ ecb_const int ecb_ld64 (uint64_t x)
619 root 1.49 {
620 root 1.171 #if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM)
621 root 1.172 unsigned long r;
622 root 1.171 _BitScanReverse64 (&r, x);
623     return (int)r;
624     #else
625 root 1.50 int r = 0;
626 root 1.49
627 root 1.50 if (x >> 32) { x >>= 32; r += 32; }
628 root 1.49
629 root 1.50 return r + ecb_ld32 (x);
630 root 1.171 #endif
631 root 1.49 }
632 root 1.1 #endif
633    
634 root 1.151 ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x);
635     ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); }
636     ecb_function_ ecb_const ecb_bool ecb_is_pot64 (uint64_t x);
637     ecb_function_ ecb_const ecb_bool ecb_is_pot64 (uint64_t x) { return !(x & (x - 1)); }
638 root 1.88
639 root 1.151 ecb_function_ ecb_const uint8_t ecb_bitrev8 (uint8_t x);
640     ecb_function_ ecb_const uint8_t ecb_bitrev8 (uint8_t x)
641 root 1.70 {
642     return ( (x * 0x0802U & 0x22110U)
643 root 1.151 | (x * 0x8020U & 0x88440U)) * 0x10101U >> 16;
644 root 1.70 }
645    
646 root 1.151 ecb_function_ ecb_const uint16_t ecb_bitrev16 (uint16_t x);
647     ecb_function_ ecb_const uint16_t ecb_bitrev16 (uint16_t x)
648 root 1.70 {
649     x = ((x >> 1) & 0x5555) | ((x & 0x5555) << 1);
650     x = ((x >> 2) & 0x3333) | ((x & 0x3333) << 2);
651     x = ((x >> 4) & 0x0f0f) | ((x & 0x0f0f) << 4);
652     x = ( x >> 8 ) | ( x << 8);
653    
654     return x;
655     }
656    
657 root 1.151 ecb_function_ ecb_const uint32_t ecb_bitrev32 (uint32_t x);
658     ecb_function_ ecb_const uint32_t ecb_bitrev32 (uint32_t x)
659 root 1.70 {
660     x = ((x >> 1) & 0x55555555) | ((x & 0x55555555) << 1);
661     x = ((x >> 2) & 0x33333333) | ((x & 0x33333333) << 2);
662     x = ((x >> 4) & 0x0f0f0f0f) | ((x & 0x0f0f0f0f) << 4);
663     x = ((x >> 8) & 0x00ff00ff) | ((x & 0x00ff00ff) << 8);
664     x = ( x >> 16 ) | ( x << 16);
665    
666     return x;
667     }
668    
669 root 1.151 ecb_function_ ecb_const int ecb_popcount64 (uint64_t x);
670     ecb_function_ ecb_const int
671 root 1.49 ecb_popcount64 (uint64_t x)
672     {
673 root 1.205 /* popcount64 is only available on 64 bit cpus as gcc builtin. */
674     /* also, gcc/clang make this surprisingly difficult to use */
675 root 1.207 #if (__SIZEOF_LONG__ == 8) && (ECB_GCC_VERSION(3,4) || ECB_CLANG_BUILTIN (__builtin_popcountl))
676 root 1.205 return __builtin_popcountl (x);
677     #else
678 root 1.49 return ecb_popcount32 (x) + ecb_popcount32 (x >> 32);
679 root 1.205 #endif
680 root 1.49 }
681    
682 root 1.151 ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count);
683     ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count);
684     ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count);
685     ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count);
686     ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count);
687     ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count);
688     ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count);
689     ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count);
690    
691 root 1.198 ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> (-count & 7)) | (x << (count & 7)); }
692     ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << (-count & 7)) | (x >> (count & 7)); }
693     ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (-count & 15)) | (x << (count & 15)); }
694     ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (-count & 15)) | (x >> (count & 15)); }
695     ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (-count & 31)) | (x << (count & 31)); }
696     ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (-count & 31)) | (x >> (count & 31)); }
697     ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (-count & 63)) | (x << (count & 63)); }
698     ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (-count & 63)) | (x >> (count & 63)); }
699 root 1.50
700 root 1.182 #if ECB_CPP
701    
702     inline uint8_t ecb_ctz (uint8_t v) { return ecb_ctz32 (v); }
703     inline uint16_t ecb_ctz (uint16_t v) { return ecb_ctz32 (v); }
704     inline uint32_t ecb_ctz (uint32_t v) { return ecb_ctz32 (v); }
705     inline uint64_t ecb_ctz (uint64_t v) { return ecb_ctz64 (v); }
706    
707     inline bool ecb_is_pot (uint8_t v) { return ecb_is_pot32 (v); }
708     inline bool ecb_is_pot (uint16_t v) { return ecb_is_pot32 (v); }
709     inline bool ecb_is_pot (uint32_t v) { return ecb_is_pot32 (v); }
710     inline bool ecb_is_pot (uint64_t v) { return ecb_is_pot64 (v); }
711    
712     inline int ecb_ld (uint8_t v) { return ecb_ld32 (v); }
713     inline int ecb_ld (uint16_t v) { return ecb_ld32 (v); }
714     inline int ecb_ld (uint32_t v) { return ecb_ld32 (v); }
715     inline int ecb_ld (uint64_t v) { return ecb_ld64 (v); }
716    
717     inline int ecb_popcount (uint8_t v) { return ecb_popcount32 (v); }
718     inline int ecb_popcount (uint16_t v) { return ecb_popcount32 (v); }
719     inline int ecb_popcount (uint32_t v) { return ecb_popcount32 (v); }
720     inline int ecb_popcount (uint64_t v) { return ecb_popcount64 (v); }
721    
722     inline uint8_t ecb_bitrev (uint8_t v) { return ecb_bitrev8 (v); }
723     inline uint16_t ecb_bitrev (uint16_t v) { return ecb_bitrev16 (v); }
724     inline uint32_t ecb_bitrev (uint32_t v) { return ecb_bitrev32 (v); }
725    
726 root 1.183 inline uint8_t ecb_rotl (uint8_t v, unsigned int count) { return ecb_rotl8 (v, count); }
727     inline uint16_t ecb_rotl (uint16_t v, unsigned int count) { return ecb_rotl16 (v, count); }
728     inline uint32_t ecb_rotl (uint32_t v, unsigned int count) { return ecb_rotl32 (v, count); }
729     inline uint64_t ecb_rotl (uint64_t v, unsigned int count) { return ecb_rotl64 (v, count); }
730    
731     inline uint8_t ecb_rotr (uint8_t v, unsigned int count) { return ecb_rotr8 (v, count); }
732     inline uint16_t ecb_rotr (uint16_t v, unsigned int count) { return ecb_rotr16 (v, count); }
733     inline uint32_t ecb_rotr (uint32_t v, unsigned int count) { return ecb_rotr32 (v, count); }
734     inline uint64_t ecb_rotr (uint64_t v, unsigned int count) { return ecb_rotr64 (v, count); }
735 root 1.182
736     #endif
737    
738 sf-exg 1.138 #if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64))
739 root 1.164 #if ECB_GCC_VERSION(4,8) || ECB_CLANG_BUILTIN(__builtin_bswap16)
740     #define ecb_bswap16(x) __builtin_bswap16 (x)
741     #else
742 root 1.49 #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16)
743 root 1.164 #endif
744 root 1.49 #define ecb_bswap32(x) __builtin_bswap32 (x)
745     #define ecb_bswap64(x) __builtin_bswap64 (x)
746 root 1.164 #elif _MSC_VER
747     #include <stdlib.h>
748     #define ecb_bswap16(x) ((uint16_t)_byteswap_ushort ((uint16_t)(x)))
749     #define ecb_bswap32(x) ((uint32_t)_byteswap_ulong ((uint32_t)(x)))
750     #define ecb_bswap64(x) ((uint64_t)_byteswap_uint64 ((uint64_t)(x)))
751 root 1.13 #else
752 root 1.151 ecb_function_ ecb_const uint16_t ecb_bswap16 (uint16_t x);
753     ecb_function_ ecb_const uint16_t
754 root 1.50 ecb_bswap16 (uint16_t x)
755 root 1.49 {
756 root 1.50 return ecb_rotl16 (x, 8);
757 root 1.49 }
758    
759 root 1.151 ecb_function_ ecb_const uint32_t ecb_bswap32 (uint32_t x);
760     ecb_function_ ecb_const uint32_t
761 root 1.35 ecb_bswap32 (uint32_t x)
762     {
763 root 1.50 return (((uint32_t)ecb_bswap16 (x)) << 16) | ecb_bswap16 (x >> 16);
764 root 1.35 }
765    
766 root 1.151 ecb_function_ ecb_const uint64_t ecb_bswap64 (uint64_t x);
767     ecb_function_ ecb_const uint64_t
768 root 1.49 ecb_bswap64 (uint64_t x)
769 root 1.35 {
770 root 1.50 return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32);
771 root 1.35 }
772 root 1.13 #endif
773    
774 sf-exg 1.138 #if ECB_GCC_VERSION(4,5) || ECB_CLANG_BUILTIN(__builtin_unreachable)
775 root 1.35 #define ecb_unreachable() __builtin_unreachable ()
776 root 1.13 #else
777 root 1.35 /* this seems to work fine, but gcc always emits a warning for it :/ */
778 root 1.151 ecb_inline ecb_noreturn void ecb_unreachable (void);
779     ecb_inline ecb_noreturn void ecb_unreachable (void) { }
780 root 1.13 #endif
781    
782 root 1.41 /* try to tell the compiler that some condition is definitely true */
783 root 1.100 #define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0
784 root 1.41
785 root 1.174 ecb_inline ecb_const uint32_t ecb_byteorder_helper (void);
786     ecb_inline ecb_const uint32_t
787 root 1.23 ecb_byteorder_helper (void)
788 root 1.3 {
789 root 1.98 /* the union code still generates code under pressure in gcc, */
790 sf-exg 1.111 /* but less than using pointers, and always seems to */
791 root 1.98 /* successfully return a constant. */
792     /* the reason why we have this horrible preprocessor mess */
793 root 1.99 /* is to avoid it in all cases, at least on common architectures */
794 sf-exg 1.111 /* or when using a recent enough gcc version (>= 4.6) */
795 root 1.174 #if (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \
796     || ((__i386 || __i386__ || _M_IX86 || ECB_GCC_AMD64 || ECB_MSVC_AMD64) && !__VOS__)
797     #define ECB_LITTLE_ENDIAN 1
798     return 0x44332211;
799     #elif (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) \
800     || ((__AARCH64EB__ || __MIPSEB__ || __ARMEB__) && !__VOS__)
801     #define ECB_BIG_ENDIAN 1
802     return 0x11223344;
803 root 1.98 #else
804     union
805     {
806 root 1.174 uint8_t c[4];
807     uint32_t u;
808     } u = { 0x11, 0x22, 0x33, 0x44 };
809     return u.u;
810 root 1.98 #endif
811 root 1.3 }
812    
813 root 1.151 ecb_inline ecb_const ecb_bool ecb_big_endian (void);
814 root 1.174 ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11223344; }
815 root 1.151 ecb_inline ecb_const ecb_bool ecb_little_endian (void);
816 root 1.174 ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44332211; }
817 root 1.3
818 root 1.180 /*****************************************************************************/
819     /* unaligned load/store */
820    
821     ecb_inline uint_fast16_t ecb_be_u16_to_host (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; }
822     ecb_inline uint_fast32_t ecb_be_u32_to_host (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; }
823     ecb_inline uint_fast64_t ecb_be_u64_to_host (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; }
824    
825     ecb_inline uint_fast16_t ecb_le_u16_to_host (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; }
826     ecb_inline uint_fast32_t ecb_le_u32_to_host (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; }
827     ecb_inline uint_fast64_t ecb_le_u64_to_host (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; }
828    
829     ecb_inline uint_fast16_t ecb_peek_u16_u (const void *ptr) { uint16_t v; memcpy (&v, ptr, sizeof (v)); return v; }
830     ecb_inline uint_fast32_t ecb_peek_u32_u (const void *ptr) { uint32_t v; memcpy (&v, ptr, sizeof (v)); return v; }
831     ecb_inline uint_fast64_t ecb_peek_u64_u (const void *ptr) { uint64_t v; memcpy (&v, ptr, sizeof (v)); return v; }
832    
833     ecb_inline uint_fast16_t ecb_peek_be_u16_u (const void *ptr) { return ecb_be_u16_to_host (ecb_peek_u16_u (ptr)); }
834     ecb_inline uint_fast32_t ecb_peek_be_u32_u (const void *ptr) { return ecb_be_u32_to_host (ecb_peek_u32_u (ptr)); }
835     ecb_inline uint_fast64_t ecb_peek_be_u64_u (const void *ptr) { return ecb_be_u64_to_host (ecb_peek_u64_u (ptr)); }
836    
837     ecb_inline uint_fast16_t ecb_peek_le_u16_u (const void *ptr) { return ecb_le_u16_to_host (ecb_peek_u16_u (ptr)); }
838     ecb_inline uint_fast32_t ecb_peek_le_u32_u (const void *ptr) { return ecb_le_u32_to_host (ecb_peek_u32_u (ptr)); }
839     ecb_inline uint_fast64_t ecb_peek_le_u64_u (const void *ptr) { return ecb_le_u64_to_host (ecb_peek_u64_u (ptr)); }
840    
841     ecb_inline uint_fast16_t ecb_host_to_be_u16 (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; }
842     ecb_inline uint_fast32_t ecb_host_to_be_u32 (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; }
843     ecb_inline uint_fast64_t ecb_host_to_be_u64 (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; }
844    
845     ecb_inline uint_fast16_t ecb_host_to_le_u16 (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; }
846     ecb_inline uint_fast32_t ecb_host_to_le_u32 (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; }
847     ecb_inline uint_fast64_t ecb_host_to_le_u64 (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; }
848    
849     ecb_inline void ecb_poke_u16_u (void *ptr, uint16_t v) { memcpy (ptr, &v, sizeof (v)); }
850     ecb_inline void ecb_poke_u32_u (void *ptr, uint32_t v) { memcpy (ptr, &v, sizeof (v)); }
851     ecb_inline void ecb_poke_u64_u (void *ptr, uint64_t v) { memcpy (ptr, &v, sizeof (v)); }
852    
853     ecb_inline void ecb_poke_be_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_be_u16 (v)); }
854     ecb_inline void ecb_poke_be_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_be_u32 (v)); }
855     ecb_inline void ecb_poke_be_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_be_u64 (v)); }
856 sf-exg 1.196
857 root 1.180 ecb_inline void ecb_poke_le_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_le_u16 (v)); }
858     ecb_inline void ecb_poke_le_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_le_u32 (v)); }
859     ecb_inline void ecb_poke_le_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_le_u64 (v)); }
860    
861 root 1.186 #if ECB_CPP
862 root 1.180
863     inline uint8_t ecb_bswap (uint8_t v) { return v; }
864     inline uint16_t ecb_bswap (uint16_t v) { return ecb_bswap16 (v); }
865     inline uint32_t ecb_bswap (uint32_t v) { return ecb_bswap32 (v); }
866     inline uint64_t ecb_bswap (uint64_t v) { return ecb_bswap64 (v); }
867    
868     template<typename T> inline T ecb_be_to_host (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; }
869     template<typename T> inline T ecb_le_to_host (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; }
870     template<typename T> inline T ecb_peek (const void *ptr) { return *(const T *)ptr; }
871     template<typename T> inline T ecb_peek_be (const void *ptr) { return ecb_be_to_host (ecb_peek <T> (ptr)); }
872     template<typename T> inline T ecb_peek_le (const void *ptr) { return ecb_le_to_host (ecb_peek <T> (ptr)); }
873 root 1.184 template<typename T> inline T ecb_peek_u (const void *ptr) { T v; memcpy (&v, ptr, sizeof (v)); return v; }
874 root 1.180 template<typename T> inline T ecb_peek_be_u (const void *ptr) { return ecb_be_to_host (ecb_peek_u<T> (ptr)); }
875     template<typename T> inline T ecb_peek_le_u (const void *ptr) { return ecb_le_to_host (ecb_peek_u<T> (ptr)); }
876    
877     template<typename T> inline T ecb_host_to_be (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; }
878     template<typename T> inline T ecb_host_to_le (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; }
879     template<typename T> inline void ecb_poke (void *ptr, T v) { *(T *)ptr = v; }
880     template<typename T> inline void ecb_poke_be (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_be (v)); }
881     template<typename T> inline void ecb_poke_le (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_le (v)); }
882 root 1.184 template<typename T> inline void ecb_poke_u (void *ptr, T v) { memcpy (ptr, &v, sizeof (v)); }
883 root 1.180 template<typename T> inline void ecb_poke_be_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_be (v)); }
884     template<typename T> inline void ecb_poke_le_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_le (v)); }
885    
886     #endif
887    
888     /*****************************************************************************/
889 root 1.199 /* pointer/integer hashing */
890    
891     /* based on hash by Chris Wellons, https://nullprogram.com/blog/2018/07/31/ */
892     ecb_function_ uint32_t ecb_mix32 (uint32_t v);
893     ecb_function_ uint32_t ecb_mix32 (uint32_t v)
894     {
895     v ^= v >> 16; v *= 0x7feb352dU;
896     v ^= v >> 15; v *= 0x846ca68bU;
897     v ^= v >> 16;
898     return v;
899     }
900    
901     ecb_function_ uint32_t ecb_unmix32 (uint32_t v);
902     ecb_function_ uint32_t ecb_unmix32 (uint32_t v)
903     {
904     v ^= v >> 16 ; v *= 0x43021123U;
905     v ^= v >> 15 ^ v >> 30; v *= 0x1d69e2a5U;
906     v ^= v >> 16 ;
907     return v;
908     }
909    
910     /* based on splitmix64, by Sebastiona Vigna, https://prng.di.unimi.it/splitmix64.c */
911     ecb_function_ uint64_t ecb_mix64 (uint64_t v);
912     ecb_function_ uint64_t ecb_mix64 (uint64_t v)
913     {
914     v ^= v >> 30; v *= 0xbf58476d1ce4e5b9U;
915     v ^= v >> 27; v *= 0x94d049bb133111ebU;
916     v ^= v >> 31;
917     return v;
918     }
919    
920     ecb_function_ uint64_t ecb_unmix64 (uint64_t v);
921     ecb_function_ uint64_t ecb_unmix64 (uint64_t v)
922     {
923     v ^= v >> 31 ^ v >> 62; v *= 0x319642b2d24d8ec3U;
924     v ^= v >> 27 ^ v >> 54; v *= 0x96de1b173f119089U;
925     v ^= v >> 30 ^ v >> 60;
926     return v;
927     }
928    
929     ecb_function_ uintptr_t ecb_ptrmix (void *p);
930     ecb_function_ uintptr_t ecb_ptrmix (void *p)
931     {
932     #if ECB_PTRSIZE <= 4
933     return ecb_mix32 ((uint32_t)p);
934     #else
935     return ecb_mix64 ((uint64_t)p);
936     #endif
937     }
938    
939     ecb_function_ void *ecb_ptrunmix (uintptr_t v);
940     ecb_function_ void *ecb_ptrunmix (uintptr_t v)
941     {
942     #if ECB_PTRSIZE <= 4
943     return (void *)ecb_unmix32 (v);
944     #else
945     return (void *)ecb_unmix64 (v);
946     #endif
947     }
948    
949     #if ECB_CPP
950    
951     template<typename T>
952     inline uintptr_t ecb_ptrmix (T *p)
953     {
954     return ecb_ptrmix (static_cast<void *>(p));
955     }
956    
957     template<typename T>
958     inline T *ecb_ptrunmix (uintptr_t v)
959     {
960     return static_cast<T *>(ecb_ptrunmix (v));
961     }
962    
963     #endif
964    
965     /*****************************************************************************/
966 root 1.202 /* gray code */
967    
968 root 1.210 ecb_inline uint_fast8_t ecb_gray_encode8 (uint_fast8_t b) { return b ^ (b >> 1); }
969     ecb_inline uint_fast16_t ecb_gray_encode16 (uint_fast16_t b) { return b ^ (b >> 1); }
970     ecb_inline uint_fast32_t ecb_gray_encode32 (uint_fast32_t b) { return b ^ (b >> 1); }
971     ecb_inline uint_fast64_t ecb_gray_encode64 (uint_fast64_t b) { return b ^ (b >> 1); }
972 root 1.202
973 root 1.210 ecb_function_ uint8_t ecb_gray_decode8 (uint8_t g);
974     ecb_function_ uint8_t ecb_gray_decode8 (uint8_t g)
975 root 1.202 {
976 root 1.203 g ^= g >> 1;
977     g ^= g >> 2;
978     g ^= g >> 4;
979    
980 root 1.202 return g;
981     }
982    
983 root 1.210 ecb_function_ uint16_t ecb_gray_decode16 (uint16_t g);
984     ecb_function_ uint16_t ecb_gray_decode16 (uint16_t g)
985 root 1.202 {
986 root 1.203 g ^= g >> 1;
987     g ^= g >> 2;
988     g ^= g >> 4;
989     g ^= g >> 8;
990    
991 root 1.202 return g;
992     }
993    
994 root 1.210 ecb_function_ uint32_t ecb_gray_decode32 (uint32_t g);
995     ecb_function_ uint32_t ecb_gray_decode32 (uint32_t g)
996 root 1.202 {
997 root 1.203 g ^= g >> 1;
998     g ^= g >> 2;
999     g ^= g >> 4;
1000     g ^= g >> 8;
1001     g ^= g >> 16;
1002    
1003 root 1.202 return g;
1004     }
1005    
1006 root 1.210 ecb_function_ uint64_t ecb_gray_decode64 (uint64_t g);
1007     ecb_function_ uint64_t ecb_gray_decode64 (uint64_t g)
1008 root 1.202 {
1009 root 1.203 g ^= g >> 1;
1010     g ^= g >> 2;
1011     g ^= g >> 4;
1012     g ^= g >> 8;
1013     g ^= g >> 16;
1014     g ^= g >> 32;
1015    
1016 root 1.202 return g;
1017     }
1018    
1019     #if ECB_CPP
1020    
1021 root 1.210 ecb_inline uint8_t ecb_gray_encode (uint8_t b) { return ecb_gray_encode8 (b); }
1022     ecb_inline uint16_t ecb_gray_encode (uint16_t b) { return ecb_gray_encode16 (b); }
1023     ecb_inline uint32_t ecb_gray_encode (uint32_t b) { return ecb_gray_encode32 (b); }
1024     ecb_inline uint64_t ecb_gray_encode (uint64_t b) { return ecb_gray_encode64 (b); }
1025    
1026     ecb_inline uint8_t ecb_gray_decode (uint8_t g) { return ecb_gray_decode8 (g); }
1027     ecb_inline uint16_t ecb_gray_decode (uint16_t g) { return ecb_gray_decode16 (g); }
1028     ecb_inline uint32_t ecb_gray_decode (uint32_t g) { return ecb_gray_decode32 (g); }
1029     ecb_inline uint64_t ecb_gray_decode (uint64_t g) { return ecb_gray_decode64 (g); }
1030 root 1.202
1031     #endif
1032    
1033     /*****************************************************************************/
1034 root 1.204 /* 2d hilbert curves */
1035    
1036     /* algorithm from the book Hacker's Delight, modified to not */
1037     /* run into undefined behaviour for n==16 */
1038     static uint32_t
1039     ecb_hilbert2d_index_to_coord32 (int n, uint32_t s)
1040     {
1041     uint32_t comp, swap, cs, t, sr;
1042    
1043     /* pad s on the left (unused) bits with 01 (no change groups) */
1044     s |= 0x55555555U << n << n;
1045     /* "s shift right" */
1046     sr = (s >> 1) & 0x55555555U;
1047     /* compute complement and swap info in two-bit groups */
1048     cs = ((s & 0x55555555U) + sr) ^ 0x55555555U;
1049    
1050     /* parallel prefix xor op to propagate both complement
1051     * and swap info together from left to right (there is
1052     * no step "cs ^= cs >> 1", so in effect it computes
1053     * two independent parallel prefix operations on two
1054     * interleaved sets of sixteen bits).
1055     */
1056     cs ^= cs >> 2;
1057     cs ^= cs >> 4;
1058     cs ^= cs >> 8;
1059     cs ^= cs >> 16;
1060    
1061     /* separate swap and complement bits */
1062     swap = cs & 0x55555555U;
1063     comp = (cs >> 1) & 0x55555555U;
1064    
1065     /* calculate coordinates in odd and even bit positions */
1066     t = (s & swap) ^ comp;
1067     s = s ^ sr ^ t ^ (t << 1);
1068    
1069     /* unpad/clear out any junk on the left */
1070     s = s & ((1 << n << n) - 1);
1071    
1072     /* Now "unshuffle" to separate the x and y bits. */
1073     t = (s ^ (s >> 1)) & 0x22222222U; s ^= t ^ (t << 1);
1074     t = (s ^ (s >> 2)) & 0x0c0c0c0cU; s ^= t ^ (t << 2);
1075     t = (s ^ (s >> 4)) & 0x00f000f0U; s ^= t ^ (t << 4);
1076     t = (s ^ (s >> 8)) & 0x0000ff00U; s ^= t ^ (t << 8);
1077    
1078     /* now s contains two 16-bit coordinates */
1079     return s;
1080     }
1081    
1082     /* 64 bit, a straightforward extension to the 32 bit case */
1083     static uint64_t
1084     ecb_hilbert2d_index_to_coord64 (int n, uint64_t s)
1085     {
1086     uint64_t comp, swap, cs, t, sr;
1087    
1088     /* pad s on the left (unused) bits with 01 (no change groups) */
1089     s |= 0x5555555555555555U << n << n;
1090     /* "s shift right" */
1091     sr = (s >> 1) & 0x5555555555555555U;
1092     /* compute complement and swap info in two-bit groups */
1093     cs = ((s & 0x5555555555555555U) + sr) ^ 0x5555555555555555U;
1094    
1095     /* parallel prefix xor op to propagate both complement
1096     * and swap info together from left to right (there is
1097     * no step "cs ^= cs >> 1", so in effect it computes
1098     * two independent parallel prefix operations on two
1099     * interleaved sets of thirty-two bits).
1100     */
1101     cs ^= cs >> 2;
1102     cs ^= cs >> 4;
1103     cs ^= cs >> 8;
1104     cs ^= cs >> 16;
1105     cs ^= cs >> 32;
1106    
1107     /* separate swap and complement bits */
1108     swap = cs & 0x5555555555555555U;
1109     comp = (cs >> 1) & 0x5555555555555555U;
1110    
1111     /* calculate coordinates in odd and even bit positions */
1112     t = (s & swap) ^ comp;
1113     s = s ^ sr ^ t ^ (t << 1);
1114    
1115     /* unpad/clear out any junk on the left */
1116     s = s & ((1 << n << n) - 1);
1117    
1118     /* Now "unshuffle" to separate the x and y bits. */
1119     t = (s ^ (s >> 1)) & 0x2222222222222222U; s ^= t ^ (t << 1);
1120     t = (s ^ (s >> 2)) & 0x0c0c0c0c0c0c0c0cU; s ^= t ^ (t << 2);
1121     t = (s ^ (s >> 4)) & 0x00f000f000f000f0U; s ^= t ^ (t << 4);
1122     t = (s ^ (s >> 8)) & 0x0000ff000000ff00U; s ^= t ^ (t << 8);
1123     t = (s ^ (s >> 16)) & 0x00000000ffff0000U; s ^= t ^ (t << 16);
1124    
1125     /* now s contains two 32-bit coordinates */
1126     return s;
1127     }
1128    
1129     /* algorithm from the book Hacker's Delight, but a similar algorithm*/
1130     /* is given in https://doi.org/10.1002/spe.4380160103 */
1131     /* this has been slightly improved over the original version */
1132     ecb_function_ uint32_t
1133     ecb_hilbert2d_coord_to_index32 (int n, uint32_t xy)
1134     {
1135     uint32_t row;
1136     uint32_t state = 0;
1137     uint32_t s = 0;
1138    
1139     do
1140     {
1141     --n;
1142    
1143     row = 4 * state
1144     | (2 & (xy >> n >> 15))
1145     | (1 & (xy >> n ));
1146    
1147     /* these funky constants are lookup tables for two-bit values */
1148     s = (s << 2) | (0x361e9cb4U >> 2 * row) & 3;
1149     state = (0x8fe65831U >> 2 * row) & 3;
1150     }
1151     while (n > 0);
1152    
1153     return s;
1154     }
1155    
1156     /* 64 bit, essentially the same as 32 bit */
1157     ecb_function_ uint64_t
1158     ecb_hilbert2d_coord_to_index64 (int n, uint64_t xy)
1159     {
1160     uint32_t row;
1161     uint32_t state = 0;
1162     uint64_t s = 0;
1163    
1164     do
1165     {
1166     --n;
1167    
1168     row = 4 * state
1169     | (2 & (xy >> n >> 31))
1170     | (1 & (xy >> n ));
1171    
1172     /* these funky constants are lookup tables for two-bit values */
1173     s = (s << 2) | (0x361e9cb4U >> 2 * row) & 3;
1174     state = (0x8fe65831U >> 2 * row) & 3;
1175     }
1176     while (n > 0);
1177    
1178     return s;
1179     }
1180    
1181     /*****************************************************************************/
1182 root 1.188 /* division */
1183 root 1.180
1184 root 1.39 #if ECB_GCC_VERSION(3,0) || ECB_C99
1185 root 1.188 /* C99 tightened the definition of %, so we can use a more efficient version */
1186 root 1.35 #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0))
1187 root 1.31 #else
1188 root 1.35 #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n)))
1189 root 1.31 #endif
1190 root 1.21
1191 root 1.149 #if ECB_CPP
1192 sf-exg 1.68 template<typename T>
1193     static inline T ecb_div_rd (T val, T div)
1194     {
1195     return val < 0 ? - ((-val + div - 1) / div) : (val ) / div;
1196     }
1197     template<typename T>
1198     static inline T ecb_div_ru (T val, T div)
1199     {
1200     return val < 0 ? - ((-val ) / div) : (val + div - 1) / div;
1201     }
1202     #else
1203     #define ecb_div_rd(val,div) ((val) < 0 ? - ((-(val) + (div) - 1) / (div)) : ((val) ) / (div))
1204     #define ecb_div_ru(val,div) ((val) < 0 ? - ((-(val) ) / (div)) : ((val) + (div) - 1) / (div))
1205     #endif
1206 sf-exg 1.67
1207 root 1.188 /*****************************************************************************/
1208     /* array length */
1209    
1210 root 1.5 #if ecb_cplusplus_does_not_suck
1211 root 1.40 /* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */
1212 root 1.35 template<typename T, int N>
1213     static inline int ecb_array_length (const T (&arr)[N])
1214     {
1215     return N;
1216     }
1217 root 1.5 #else
1218 root 1.35 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0]))
1219 root 1.5 #endif
1220    
1221 root 1.180 /*****************************************************************************/
1222 root 1.188 /* IEEE 754-2008 half float conversions */
1223 root 1.180
1224 root 1.170 ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x);
1225 root 1.167 ecb_function_ ecb_const uint32_t
1226 root 1.170 ecb_binary16_to_binary32 (uint32_t x)
1227 root 1.167 {
1228     unsigned int s = (x & 0x8000) << (31 - 15);
1229     int e = (x >> 10) & 0x001f;
1230     unsigned int m = x & 0x03ff;
1231    
1232     if (ecb_expect_false (e == 31))
1233     /* infinity or NaN */
1234     e = 255 - (127 - 15);
1235     else if (ecb_expect_false (!e))
1236     {
1237     if (ecb_expect_true (!m))
1238     /* zero, handled by code below by forcing e to 0 */
1239     e = 0 - (127 - 15);
1240     else
1241     {
1242     /* subnormal, renormalise */
1243     unsigned int s = 10 - ecb_ld32 (m);
1244    
1245     m = (m << s) & 0x3ff; /* mask implicit bit */
1246     e -= s - 1;
1247     }
1248     }
1249    
1250     /* e and m now are normalised, or zero, (or inf or nan) */
1251     e += 127 - 15;
1252    
1253     return s | (e << 23) | (m << (23 - 10));
1254     }
1255    
1256     ecb_function_ ecb_const uint16_t ecb_binary32_to_binary16 (uint32_t x);
1257     ecb_function_ ecb_const uint16_t
1258     ecb_binary32_to_binary16 (uint32_t x)
1259     {
1260     unsigned int s = (x >> 16) & 0x00008000; /* sign bit, the easy part */
1261 root 1.188 int e = ((x >> 23) & 0x000000ff) - (127 - 15); /* the desired exponent */
1262 root 1.167 unsigned int m = x & 0x007fffff;
1263    
1264     x &= 0x7fffffff;
1265    
1266     /* if it's within range of binary16 normals, use fast path */
1267     if (ecb_expect_true (0x38800000 <= x && x <= 0x477fefff))
1268     {
1269     /* mantissa round-to-even */
1270     m += 0x00000fff + ((m >> (23 - 10)) & 1);
1271    
1272     /* handle overflow */
1273     if (ecb_expect_false (m >= 0x00800000))
1274     {
1275     m >>= 1;
1276     e += 1;
1277     }
1278    
1279     return s | (e << 10) | (m >> (23 - 10));
1280     }
1281    
1282     /* handle large numbers and infinity */
1283     if (ecb_expect_true (0x477fefff < x && x <= 0x7f800000))
1284     return s | 0x7c00;
1285    
1286 root 1.169 /* handle zero, subnormals and small numbers */
1287 root 1.167 if (ecb_expect_true (x < 0x38800000))
1288     {
1289     /* zero */
1290     if (ecb_expect_true (!x))
1291     return s;
1292    
1293     /* handle subnormals */
1294    
1295 root 1.169 /* too small, will be zero */
1296     if (e < (14 - 24)) /* might not be sharp, but is good enough */
1297     return s;
1298    
1299 root 1.167 m |= 0x00800000; /* make implicit bit explicit */
1300    
1301     /* very tricky - we need to round to the nearest e (+10) bit value */
1302     {
1303     unsigned int bits = 14 - e;
1304     unsigned int half = (1 << (bits - 1)) - 1;
1305     unsigned int even = (m >> bits) & 1;
1306    
1307     /* if this overflows, we will end up with a normalised number */
1308     m = (m + half + even) >> bits;
1309     }
1310    
1311     return s | m;
1312     }
1313    
1314     /* handle NaNs, preserve leftmost nan bits, but make sure we don't turn them into infinities */
1315     m >>= 13;
1316    
1317     return s | 0x7c00 | m | !m;
1318     }
1319    
1320 root 1.104 /*******************************************************************************/
1321 root 1.191 /* fast integer to ascii */
1322    
1323 root 1.195 /*
1324     * This code is pretty complicated because it is general. The idea behind it,
1325     * however, is pretty simple: first, the number is multiplied with a scaling
1326 root 1.197 * factor (2**bits / 10**(digits-1)) to convert the integer into a fixed-point
1327 root 1.195 * number with the first digit in the upper bits.
1328     * Then this digit is converted to text and masked out. The resulting number
1329     * is then multiplied by 10, by multiplying the fixed point representation
1330     * by 5 and shifting the (binary) decimal point one to the right, so a 4.28
1331     * format becomes 5.27, 6.26 and so on.
1332     * The rest involves only advancing the pointer if we already generated a
1333     * non-zero digit, so leading zeroes are overwritten.
1334     */
1335    
1336 root 1.201 /* simply return a mask with "bits" bits set */
1337 root 1.191 #define ecb_i2a_mask(type,bits) ((((type)1) << (bits)) - 1)
1338    
1339 root 1.200 /* oputput a single digit. maskvalue is 10**digitidx */
1340 root 1.191 #define ecb_i2a_digit(type,bits,digitmask,maskvalue,digitidx) \
1341     if (digitmask >= maskvalue) /* constant, used to decide how many digits to generate */ \
1342     { \
1343     char digit = x >> (bits - digitidx); /* calculate the topmost digit */ \
1344     *ptr = digit + '0'; /* output it */ \
1345     nz = (digitmask == maskvalue) || nz || digit; /* first term == always output last digit */ \
1346     ptr += nz; /* output digit only if non-zero digit seen */ \
1347     x = (x & ecb_i2a_mask (type, bits - digitidx)) * 5; /* *10, but shift decimal point right */ \
1348     }
1349    
1350 root 1.200 /* convert integer to fixed point format and multiply out digits, highest first */
1351     /* requires magic constants: max. digits and number of bits after the decimal point */
1352 root 1.191 #define ecb_i2a_def(suffix,ptr,v,type,bits,digitmask,lz) \
1353     ecb_inline char *ecb_i2a_ ## suffix (char *ptr, uint32_t u) \
1354     { \
1355     char nz = lz; /* non-zero digit seen? */ \
1356     /* convert to x.bits fixed-point */ \
1357     type x = u * ((ecb_i2a_mask (type, bits) + digitmask) / digitmask); \
1358     /* output up to 10 digits */ \
1359     ecb_i2a_digit (type,bits,digitmask, 1, 0); \
1360     ecb_i2a_digit (type,bits,digitmask, 10, 1); \
1361     ecb_i2a_digit (type,bits,digitmask, 100, 2); \
1362     ecb_i2a_digit (type,bits,digitmask, 1000, 3); \
1363     ecb_i2a_digit (type,bits,digitmask, 10000, 4); \
1364     ecb_i2a_digit (type,bits,digitmask, 100000, 5); \
1365     ecb_i2a_digit (type,bits,digitmask, 1000000, 6); \
1366     ecb_i2a_digit (type,bits,digitmask, 10000000, 7); \
1367     ecb_i2a_digit (type,bits,digitmask, 100000000, 8); \
1368     ecb_i2a_digit (type,bits,digitmask, 1000000000, 9); \
1369     return ptr; \
1370     }
1371    
1372 root 1.200 /* predefined versions of the above, for various digits */
1373     /* ecb_i2a_xN = almost N digits, limit defined by macro */
1374     /* ecb_i2a_N = up to N digits, leading zeroes suppressed */
1375     /* ecb_i2a_0N = exactly N digits, including leading zeroes */
1376    
1377     /* non-leading-zero versions, limited range */
1378     #define ECB_I2A_MAX_X5 59074 /* limit for ecb_i2a_x5 */
1379     #define ECB_I2A_MAX_X10 2932500665 /* limit for ecb_i2a_x10 */
1380 root 1.191 ecb_i2a_def ( x5, ptr, v, uint32_t, 26, 10000, 0)
1381     ecb_i2a_def (x10, ptr, v, uint64_t, 60, 1000000000, 0)
1382    
1383 root 1.200 /* non-leading zero versions, all digits, 4 and 9 are optimal for 32/64 bit */
1384 root 1.194 ecb_i2a_def ( 2, ptr, v, uint32_t, 10, 10, 0)
1385     ecb_i2a_def ( 3, ptr, v, uint32_t, 12, 100, 0)
1386     ecb_i2a_def ( 4, ptr, v, uint32_t, 26, 1000, 0)
1387     ecb_i2a_def ( 5, ptr, v, uint64_t, 30, 10000, 0)
1388     ecb_i2a_def ( 6, ptr, v, uint64_t, 36, 100000, 0)
1389     ecb_i2a_def ( 7, ptr, v, uint64_t, 44, 1000000, 0)
1390     ecb_i2a_def ( 8, ptr, v, uint64_t, 50, 10000000, 0)
1391     ecb_i2a_def ( 9, ptr, v, uint64_t, 56, 100000000, 0)
1392 root 1.191
1393 root 1.200 /* leading-zero versions, all digits, 04 and 09 are optimal for 32/64 bit */
1394 root 1.194 ecb_i2a_def (02, ptr, v, uint32_t, 10, 10, 1)
1395     ecb_i2a_def (03, ptr, v, uint32_t, 12, 100, 1)
1396     ecb_i2a_def (04, ptr, v, uint32_t, 26, 1000, 1)
1397     ecb_i2a_def (05, ptr, v, uint64_t, 30, 10000, 1)
1398     ecb_i2a_def (06, ptr, v, uint64_t, 36, 100000, 1)
1399     ecb_i2a_def (07, ptr, v, uint64_t, 44, 1000000, 1)
1400     ecb_i2a_def (08, ptr, v, uint64_t, 50, 10000000, 1)
1401     ecb_i2a_def (09, ptr, v, uint64_t, 56, 100000000, 1)
1402 root 1.191
1403 root 1.192 #define ECB_I2A_I32_DIGITS 11
1404     #define ECB_I2A_U32_DIGITS 10
1405     #define ECB_I2A_I64_DIGITS 20
1406 root 1.194 #define ECB_I2A_U64_DIGITS 21
1407 root 1.193 #define ECB_I2A_MAX_DIGITS 21
1408 root 1.192
1409 root 1.191 ecb_inline char *
1410     ecb_i2a_u32 (char *ptr, uint32_t u)
1411     {
1412     #if ECB_64BIT_NATIVE
1413     if (ecb_expect_true (u <= ECB_I2A_MAX_X10))
1414     ptr = ecb_i2a_x10 (ptr, u);
1415 root 1.200 else /* x10 almost, but not fully, covers 32 bit */
1416 root 1.191 {
1417     uint32_t u1 = u % 1000000000;
1418     uint32_t u2 = u / 1000000000;
1419    
1420     *ptr++ = u2 + '0';
1421     ptr = ecb_i2a_09 (ptr, u1);
1422     }
1423     #else
1424     if (ecb_expect_true (u <= ECB_I2A_MAX_X5))
1425     ecb_i2a_x5 (ptr, u);
1426     else if (ecb_expect_true (u <= ECB_I2A_MAX_X5 * 10000))
1427     {
1428     uint32_t u1 = u % 10000;
1429     uint32_t u2 = u / 10000;
1430    
1431     ptr = ecb_i2a_x5 (ptr, u2);
1432     ptr = ecb_i2a_04 (ptr, u1);
1433     }
1434     else
1435     {
1436     uint32_t u1 = u % 10000;
1437     uint32_t ua = u / 10000;
1438     uint32_t u2 = ua % 10000;
1439     uint32_t u3 = ua / 10000;
1440    
1441     ptr = ecb_i2a_2 (ptr, u3);
1442     ptr = ecb_i2a_04 (ptr, u2);
1443     ptr = ecb_i2a_04 (ptr, u1);
1444     }
1445     #endif
1446    
1447     return ptr;
1448     }
1449    
1450     ecb_inline char *
1451     ecb_i2a_i32 (char *ptr, int32_t v)
1452     {
1453     *ptr = '-'; ptr += v < 0;
1454     uint32_t u = v < 0 ? -(uint32_t)v : v;
1455    
1456     #if ECB_64BIT_NATIVE
1457 root 1.200 ptr = ecb_i2a_x10 (ptr, u); /* x10 fully covers 31 bit */
1458 root 1.191 #else
1459     ptr = ecb_i2a_u32 (ptr, u);
1460     #endif
1461    
1462     return ptr;
1463     }
1464    
1465     ecb_inline char *
1466     ecb_i2a_u64 (char *ptr, uint64_t u)
1467     {
1468     #if ECB_64BIT_NATIVE
1469     if (ecb_expect_true (u <= ECB_I2A_MAX_X10))
1470     ptr = ecb_i2a_x10 (ptr, u);
1471     else if (ecb_expect_false (u <= ECB_I2A_MAX_X10 * 1000000000))
1472     {
1473     uint64_t u1 = u % 1000000000;
1474     uint64_t u2 = u / 1000000000;
1475    
1476     ptr = ecb_i2a_x10 (ptr, u2);
1477     ptr = ecb_i2a_09 (ptr, u1);
1478     }
1479     else
1480     {
1481     uint64_t u1 = u % 1000000000;
1482     uint64_t ua = u / 1000000000;
1483     uint64_t u2 = ua % 1000000000;
1484     uint64_t u3 = ua / 1000000000;
1485    
1486     ptr = ecb_i2a_2 (ptr, u3);
1487     ptr = ecb_i2a_09 (ptr, u2);
1488     ptr = ecb_i2a_09 (ptr, u1);
1489     }
1490     #else
1491     if (ecb_expect_true (u <= ECB_I2A_MAX_X5))
1492     ptr = ecb_i2a_x5 (ptr, u);
1493     else
1494     {
1495     uint64_t u1 = u % 10000;
1496     uint64_t u2 = u / 10000;
1497    
1498     ptr = ecb_i2a_u64 (ptr, u2);
1499     ptr = ecb_i2a_04 (ptr, u1);
1500     }
1501     #endif
1502    
1503     return ptr;
1504     }
1505    
1506     ecb_inline char *
1507     ecb_i2a_i64 (char *ptr, int64_t v)
1508     {
1509     *ptr = '-'; ptr += v < 0;
1510     uint64_t u = v < 0 ? -(uint64_t)v : v;
1511    
1512     #if ECB_64BIT_NATIVE
1513     if (ecb_expect_true (u <= ECB_I2A_MAX_X10))
1514     ptr = ecb_i2a_x10 (ptr, u);
1515     else if (ecb_expect_false (u <= ECB_I2A_MAX_X10 * 1000000000))
1516     {
1517     uint64_t u1 = u % 1000000000;
1518     uint64_t u2 = u / 1000000000;
1519    
1520     ptr = ecb_i2a_x10 (ptr, u2);
1521     ptr = ecb_i2a_09 (ptr, u1);
1522     }
1523     else
1524     {
1525     uint64_t u1 = u % 1000000000;
1526     uint64_t ua = u / 1000000000;
1527     uint64_t u2 = ua % 1000000000;
1528     uint64_t u3 = ua / 1000000000;
1529    
1530 root 1.200 /* 2**31 is 19 digits, so the top is exactly one digit */
1531 root 1.191 *ptr++ = u3 + '0';
1532     ptr = ecb_i2a_09 (ptr, u2);
1533     ptr = ecb_i2a_09 (ptr, u1);
1534     }
1535     #else
1536     ptr = ecb_i2a_u64 (ptr, u);
1537     #endif
1538    
1539     return ptr;
1540     }
1541    
1542     /*******************************************************************************/
1543 root 1.104 /* floating point stuff, can be disabled by defining ECB_NO_LIBM */
1544    
1545     /* basically, everything uses "ieee pure-endian" floating point numbers */
1546     /* the only noteworthy exception is ancient armle, which uses order 43218765 */
1547     #if 0 \
1548     || __i386 || __i386__ \
1549 sf-exg 1.159 || ECB_GCC_AMD64 \
1550 root 1.104 || __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \
1551     || defined __s390__ || defined __s390x__ \
1552     || defined __mips__ \
1553     || defined __alpha__ \
1554     || defined __hppa__ \
1555     || defined __ia64__ \
1556 root 1.117 || defined __m68k__ \
1557     || defined __m88k__ \
1558     || defined __sh__ \
1559 sf-exg 1.159 || defined _M_IX86 || defined ECB_MSVC_AMD64 || defined _M_IA64 \
1560 root 1.131 || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \
1561 root 1.132 || defined __aarch64__
1562 root 1.104 #define ECB_STDFP 1
1563 root 1.102 #else
1564 root 1.104 #define ECB_STDFP 0
1565 root 1.102 #endif
1566    
1567 root 1.104 #ifndef ECB_NO_LIBM
1568 root 1.103
1569 root 1.121 #include <math.h> /* for frexp*, ldexp*, INFINITY, NAN */
1570    
1571 root 1.122 /* only the oldest of old doesn't have this one. solaris. */
1572     #ifdef INFINITY
1573     #define ECB_INFINITY INFINITY
1574     #else
1575     #define ECB_INFINITY HUGE_VAL
1576     #endif
1577    
1578     #ifdef NAN
1579 root 1.121 #define ECB_NAN NAN
1580     #else
1581 root 1.122 #define ECB_NAN ECB_INFINITY
1582 root 1.121 #endif
1583 root 1.120
1584 root 1.148 #if ECB_C99 || _XOPEN_VERSION >= 600 || _POSIX_VERSION >= 200112L
1585 root 1.150 #define ecb_ldexpf(x,e) ldexpf ((x), (e))
1586 sf-exg 1.163 #define ecb_frexpf(x,e) frexpf ((x), (e))
1587 root 1.148 #else
1588 sf-exg 1.161 #define ecb_ldexpf(x,e) (float) ldexp ((double) (x), (e))
1589 sf-exg 1.163 #define ecb_frexpf(x,e) (float) frexp ((double) (x), (e))
1590 root 1.148 #endif
1591    
1592 root 1.104 /* convert a float to ieee single/binary32 */
1593 root 1.151 ecb_function_ ecb_const uint32_t ecb_float_to_binary32 (float x);
1594     ecb_function_ ecb_const uint32_t
1595 root 1.103 ecb_float_to_binary32 (float x)
1596     {
1597     uint32_t r;
1598    
1599     #if ECB_STDFP
1600 root 1.104 memcpy (&r, &x, 4);
1601 root 1.103 #else
1602 root 1.105 /* slow emulation, works for anything but -0 */
1603 root 1.103 uint32_t m;
1604     int e;
1605    
1606 root 1.108 if (x == 0e0f ) return 0x00000000U;
1607 root 1.103 if (x > +3.40282346638528860e+38f) return 0x7f800000U;
1608     if (x < -3.40282346638528860e+38f) return 0xff800000U;
1609 root 1.105 if (x != x ) return 0x7fbfffffU;
1610 root 1.103
1611 sf-exg 1.163 m = ecb_frexpf (x, &e) * 0x1000000U;
1612 root 1.103
1613     r = m & 0x80000000U;
1614    
1615     if (r)
1616     m = -m;
1617    
1618 root 1.108 if (e <= -126)
1619 root 1.103 {
1620     m &= 0xffffffU;
1621     m >>= (-125 - e);
1622     e = -126;
1623     }
1624    
1625     r |= (e + 126) << 23;
1626     r |= m & 0x7fffffU;
1627     #endif
1628    
1629     return r;
1630     }
1631    
1632 root 1.104 /* converts an ieee single/binary32 to a float */
1633 root 1.151 ecb_function_ ecb_const float ecb_binary32_to_float (uint32_t x);
1634     ecb_function_ ecb_const float
1635 root 1.103 ecb_binary32_to_float (uint32_t x)
1636     {
1637     float r;
1638    
1639     #if ECB_STDFP
1640 root 1.104 memcpy (&r, &x, 4);
1641 root 1.103 #else
1642     /* emulation, only works for normals and subnormals and +0 */
1643     int neg = x >> 31;
1644     int e = (x >> 23) & 0xffU;
1645    
1646     x &= 0x7fffffU;
1647    
1648     if (e)
1649     x |= 0x800000U;
1650 root 1.104 else
1651     e = 1;
1652 root 1.103
1653     /* we distrust ldexpf a bit and do the 2**-24 scaling by an extra multiply */
1654 root 1.148 r = ecb_ldexpf (x * (0.5f / 0x800000U), e - 126);
1655 root 1.103
1656     r = neg ? -r : r;
1657     #endif
1658    
1659     return r;
1660     }
1661    
1662 root 1.104 /* convert a double to ieee double/binary64 */
1663 root 1.151 ecb_function_ ecb_const uint64_t ecb_double_to_binary64 (double x);
1664     ecb_function_ ecb_const uint64_t
1665 root 1.103 ecb_double_to_binary64 (double x)
1666     {
1667 root 1.104 uint64_t r;
1668    
1669     #if ECB_STDFP
1670     memcpy (&r, &x, 8);
1671     #else
1672 root 1.105 /* slow emulation, works for anything but -0 */
1673 root 1.104 uint64_t m;
1674     int e;
1675    
1676 root 1.108 if (x == 0e0 ) return 0x0000000000000000U;
1677 root 1.104 if (x > +1.79769313486231470e+308) return 0x7ff0000000000000U;
1678     if (x < -1.79769313486231470e+308) return 0xfff0000000000000U;
1679 root 1.105 if (x != x ) return 0X7ff7ffffffffffffU;
1680 root 1.104
1681     m = frexp (x, &e) * 0x20000000000000U;
1682    
1683     r = m & 0x8000000000000000;;
1684    
1685     if (r)
1686     m = -m;
1687    
1688 root 1.108 if (e <= -1022)
1689 root 1.104 {
1690     m &= 0x1fffffffffffffU;
1691     m >>= (-1021 - e);
1692     e = -1022;
1693     }
1694    
1695     r |= ((uint64_t)(e + 1022)) << 52;
1696     r |= m & 0xfffffffffffffU;
1697     #endif
1698    
1699     return r;
1700     }
1701    
1702     /* converts an ieee double/binary64 to a double */
1703 root 1.151 ecb_function_ ecb_const double ecb_binary64_to_double (uint64_t x);
1704     ecb_function_ ecb_const double
1705 root 1.104 ecb_binary64_to_double (uint64_t x)
1706     {
1707     double r;
1708    
1709     #if ECB_STDFP
1710     memcpy (&r, &x, 8);
1711     #else
1712     /* emulation, only works for normals and subnormals and +0 */
1713     int neg = x >> 63;
1714     int e = (x >> 52) & 0x7ffU;
1715    
1716     x &= 0xfffffffffffffU;
1717    
1718     if (e)
1719     x |= 0x10000000000000U;
1720     else
1721     e = 1;
1722    
1723 root 1.107 /* we distrust ldexp a bit and do the 2**-53 scaling by an extra multiply */
1724 root 1.108 r = ldexp (x * (0.5 / 0x10000000000000U), e - 1022);
1725 root 1.104
1726     r = neg ? -r : r;
1727     #endif
1728    
1729     return r;
1730 root 1.103 }
1731    
1732 root 1.167 /* convert a float to ieee half/binary16 */
1733     ecb_function_ ecb_const uint16_t ecb_float_to_binary16 (float x);
1734     ecb_function_ ecb_const uint16_t
1735     ecb_float_to_binary16 (float x)
1736     {
1737     return ecb_binary32_to_binary16 (ecb_float_to_binary32 (x));
1738     }
1739    
1740     /* convert an ieee half/binary16 to float */
1741     ecb_function_ ecb_const float ecb_binary16_to_float (uint16_t x);
1742     ecb_function_ ecb_const float
1743     ecb_binary16_to_float (uint16_t x)
1744     {
1745     return ecb_binary32_to_float (ecb_binary16_to_binary32 (x));
1746     }
1747    
1748 root 1.103 #endif
1749 root 1.102
1750 root 1.1 #endif
1751