ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libecb/ecb.h
Revision: 1.209
Committed: Fri Mar 25 15:23:14 2022 UTC (2 years, 1 month ago) by root
Content type: text/plain
Branch: MAIN
Changes since 1.208: +0 -3 lines
Log Message:
*** empty log message ***

File Contents

# User Rev Content
1 root 1.1 /*
2 root 1.17 * libecb - http://software.schmorp.de/pkg/libecb
3 root 1.1 *
4 root 1.189 * Copyright (©) 2009-2015,2018-2021 Marc Alexander Lehmann <libecb@schmorp.de>
5 root 1.7 * Copyright (©) 2011 Emanuele Giaquinta
6 root 1.1 * All rights reserved.
7     *
8     * Redistribution and use in source and binary forms, with or without modifica-
9     * tion, are permitted provided that the following conditions are met:
10     *
11     * 1. Redistributions of source code must retain the above copyright notice,
12     * this list of conditions and the following disclaimer.
13     *
14     * 2. Redistributions in binary form must reproduce the above copyright
15     * notice, this list of conditions and the following disclaimer in the
16     * documentation and/or other materials provided with the distribution.
17     *
18     * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
19     * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
20     * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
21     * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
22     * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23     * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24     * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25     * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
26     * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
27     * OF THE POSSIBILITY OF SUCH DAMAGE.
28 root 1.133 *
29     * Alternatively, the contents of this file may be used under the terms of
30     * the GNU General Public License ("GPL") version 2 or any later version,
31     * in which case the provisions of the GPL are applicable instead of
32     * the above. If you wish to allow the use of your version of this file
33     * only under the terms of the GPL and not to allow others to use your
34     * version of this file under the BSD license, indicate your decision
35     * by deleting the provisions above and replace them with the notice
36     * and other provisions required by the GPL. If you do not delete the
37     * provisions above, a recipient may use your version of this file under
38     * either the BSD or the GPL.
39 root 1.1 */
40    
41     #ifndef ECB_H
42     #define ECB_H
43    
44 root 1.87 /* 16 bits major, 16 bits minor */
45 root 1.204 #define ECB_VERSION 0x0001000c
46 root 1.87
47 root 1.184 #include <string.h> /* for memcpy */
48    
49 root 1.187 #if defined (_WIN32) && !defined (__MINGW32__)
50 root 1.44 typedef signed char int8_t;
51     typedef unsigned char uint8_t;
52 root 1.180 typedef signed char int_fast8_t;
53     typedef unsigned char uint_fast8_t;
54 root 1.44 typedef signed short int16_t;
55     typedef unsigned short uint16_t;
56 root 1.180 typedef signed int int_fast16_t;
57     typedef unsigned int uint_fast16_t;
58 root 1.44 typedef signed int int32_t;
59     typedef unsigned int uint32_t;
60 root 1.180 typedef signed int int_fast32_t;
61     typedef unsigned int uint_fast32_t;
62 root 1.44 #if __GNUC__
63     typedef signed long long int64_t;
64     typedef unsigned long long uint64_t;
65 root 1.51 #else /* _MSC_VER || __BORLANDC__ */
66 root 1.44 typedef signed __int64 int64_t;
67     typedef unsigned __int64 uint64_t;
68     #endif
69 root 1.180 typedef int64_t int_fast64_t;
70     typedef uint64_t uint_fast64_t;
71 root 1.87 #ifdef _WIN64
72     #define ECB_PTRSIZE 8
73     typedef uint64_t uintptr_t;
74     typedef int64_t intptr_t;
75     #else
76     #define ECB_PTRSIZE 4
77     typedef uint32_t uintptr_t;
78     typedef int32_t intptr_t;
79     #endif
80 root 1.44 #else
81     #include <inttypes.h>
82 root 1.173 #if (defined INTPTR_MAX ? INTPTR_MAX : ULONG_MAX) > 0xffffffffU
83 root 1.87 #define ECB_PTRSIZE 8
84     #else
85     #define ECB_PTRSIZE 4
86     #endif
87 root 1.44 #endif
88 root 1.6
89 sf-exg 1.159 #define ECB_GCC_AMD64 (__amd64 || __amd64__ || __x86_64 || __x86_64__)
90     #define ECB_MSVC_AMD64 (_M_AMD64 || _M_X64)
91    
92 root 1.179 #ifndef ECB_OPTIMIZE_SIZE
93     #if __OPTIMIZE_SIZE__
94     #define ECB_OPTIMIZE_SIZE 1
95     #else
96     #define ECB_OPTIMIZE_SIZE 0
97     #endif
98     #endif
99    
100 root 1.114 /* work around x32 idiocy by defining proper macros */
101 sf-exg 1.159 #if ECB_GCC_AMD64 || ECB_MSVC_AMD64
102 root 1.119 #if _ILP32
103 root 1.115 #define ECB_AMD64_X32 1
104 root 1.114 #else
105 root 1.115 #define ECB_AMD64 1
106 root 1.114 #endif
107     #endif
108    
109 root 1.189 #if ECB_PTRSIZE >= 8 || ECB_AMD64_X32
110     #define ECB_64BIT_NATIVE 1
111     #else
112     #define ECB_64BIT_NATIVE 0
113     #endif
114    
115 root 1.12 /* many compilers define _GNUC_ to some versions but then only implement
116     * what their idiot authors think are the "more important" extensions,
117 sf-exg 1.59 * causing enormous grief in return for some better fake benchmark numbers.
118 root 1.18 * or so.
119 root 1.12 * we try to detect these and simply assume they are not gcc - if they have
120     * an issue with that they should have done it right in the first place.
121     */
122 root 1.137 #if !defined __GNUC_MINOR__ || defined __INTEL_COMPILER || defined __SUNPRO_C || defined __SUNPRO_CC || defined __llvm__ || defined __clang__
123     #define ECB_GCC_VERSION(major,minor) 0
124     #else
125     #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))
126 root 1.12 #endif
127 root 1.1
128 sf-exg 1.138 #define ECB_CLANG_VERSION(major,minor) (__clang_major__ > (major) || (__clang_major__ == (major) && __clang_minor__ >= (minor)))
129    
130 root 1.147 #if __clang__ && defined __has_builtin
131     #define ECB_CLANG_BUILTIN(x) __has_builtin (x)
132 sf-exg 1.138 #else
133     #define ECB_CLANG_BUILTIN(x) 0
134     #endif
135    
136 root 1.147 #if __clang__ && defined __has_extension
137     #define ECB_CLANG_EXTENSION(x) __has_extension (x)
138 sf-exg 1.140 #else
139     #define ECB_CLANG_EXTENSION(x) 0
140     #endif
141    
142 root 1.91 #define ECB_CPP (__cplusplus+0)
143     #define ECB_CPP11 (__cplusplus >= 201103L)
144 root 1.177 #define ECB_CPP14 (__cplusplus >= 201402L)
145     #define ECB_CPP17 (__cplusplus >= 201703L)
146 root 1.90
147 root 1.102 #if ECB_CPP
148 root 1.127 #define ECB_C 0
149     #define ECB_STDC_VERSION 0
150     #else
151     #define ECB_C 1
152     #define ECB_STDC_VERSION __STDC_VERSION__
153     #endif
154    
155     #define ECB_C99 (ECB_STDC_VERSION >= 199901L)
156     #define ECB_C11 (ECB_STDC_VERSION >= 201112L)
157 root 1.177 #define ECB_C17 (ECB_STDC_VERSION >= 201710L)
158 root 1.127
159     #if ECB_CPP
160 root 1.102 #define ECB_EXTERN_C extern "C"
161     #define ECB_EXTERN_C_BEG ECB_EXTERN_C {
162     #define ECB_EXTERN_C_END }
163     #else
164     #define ECB_EXTERN_C extern
165     #define ECB_EXTERN_C_BEG
166     #define ECB_EXTERN_C_END
167     #endif
168    
169 root 1.52 /*****************************************************************************/
170    
171 root 1.58 /* ECB_NO_THREADS - ecb is not used by multiple threads, ever */
172     /* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */
173    
174 root 1.79 #if ECB_NO_THREADS
175 root 1.95 #define ECB_NO_SMP 1
176 root 1.79 #endif
177    
178 root 1.93 #if ECB_NO_SMP
179 root 1.64 #define ECB_MEMORY_FENCE do { } while (0)
180 root 1.58 #endif
181    
182 sf-exg 1.165 /* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/compiler_ref/compiler_builtins.html */
183     #if __xlC__ && ECB_CPP
184     #include <builtins.h>
185     #endif
186    
187 root 1.171 #if 1400 <= _MSC_VER
188     #include <intrin.h> /* fence functions _ReadBarrier, also bit search functions _BitScanReverse */
189     #endif
190    
191 root 1.52 #ifndef ECB_MEMORY_FENCE
192 root 1.85 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
193 root 1.178 #define ECB_MEMORY_FENCE_RELAXED __asm__ __volatile__ ("" : : : "memory")
194 root 1.73 #if __i386 || __i386__
195 root 1.54 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
196 root 1.94 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
197 root 1.176 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory")
198 sf-exg 1.159 #elif ECB_GCC_AMD64
199 root 1.94 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory")
200     #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
201 root 1.176 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory")
202 root 1.63 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__
203 root 1.94 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory")
204 root 1.175 #elif defined __ARM_ARCH_2__ \
205     || defined __ARM_ARCH_3__ || defined __ARM_ARCH_3M__ \
206     || defined __ARM_ARCH_4__ || defined __ARM_ARCH_4T__ \
207     || defined __ARM_ARCH_5__ || defined __ARM_ARCH_5E__ \
208     || defined __ARM_ARCH_5T__ || defined __ARM_ARCH_5TE__ \
209     || defined __ARM_ARCH_5TEJ__
210     /* should not need any, unless running old code on newer cpu - arm doesn't support that */
211 root 1.85 #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \
212 root 1.175 || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ \
213     || defined __ARM_ARCH_6T2__
214 root 1.84 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory")
215 root 1.85 #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \
216 root 1.175 || defined __ARM_ARCH_7R__ || defined __ARM_ARCH_7M__
217 root 1.94 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory")
218 root 1.129 #elif __aarch64__
219     #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory")
220 root 1.166 #elif (__sparc || __sparc__) && !(__sparc_v8__ || defined __sparcv8)
221 root 1.94 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory")
222     #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory")
223     #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore")
224 root 1.85 #elif defined __s390__ || defined __s390x__
225 root 1.77 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory")
226 root 1.85 #elif defined __mips__
227 root 1.118 /* GNU/Linux emulates sync on mips1 architectures, so we force its use */
228 root 1.116 /* anybody else who still uses mips1 is supposed to send in their version, with detection code. */
229     #define ECB_MEMORY_FENCE __asm__ __volatile__ (".set mips2; sync; .set mips0" : : : "memory")
230 root 1.86 #elif defined __alpha__
231 root 1.94 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory")
232     #elif defined __hppa__
233     #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
234     #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
235     #elif defined __ia64__
236     #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory")
237 root 1.117 #elif defined __m68k__
238     #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
239     #elif defined __m88k__
240     #define ECB_MEMORY_FENCE __asm__ __volatile__ ("tb1 0,%%r0,128" : : : "memory")
241     #elif defined __sh__
242     #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
243 root 1.52 #endif
244     #endif
245     #endif
246    
247     #ifndef ECB_MEMORY_FENCE
248 root 1.93 #if ECB_GCC_VERSION(4,7)
249 root 1.97 /* see comment below (stdatomic.h) about the C11 memory model. */
250 root 1.93 #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST)
251 root 1.128 #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE)
252     #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE)
253 root 1.190 #undef ECB_MEMORY_FENCE_RELAXED
254 root 1.178 #define ECB_MEMORY_FENCE_RELAXED __atomic_thread_fence (__ATOMIC_RELAXED)
255 root 1.110
256 sf-exg 1.140 #elif ECB_CLANG_EXTENSION(c_atomic)
257     /* see comment below (stdatomic.h) about the C11 memory model. */
258     #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST)
259     #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE)
260     #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE)
261 root 1.190 #undef ECB_MEMORY_FENCE_RELAXED
262 root 1.178 #define ECB_MEMORY_FENCE_RELAXED __c11_atomic_thread_fence (__ATOMIC_RELAXED)
263 root 1.110
264 root 1.93 #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__
265 root 1.52 #define ECB_MEMORY_FENCE __sync_synchronize ()
266 root 1.126 #elif _MSC_VER >= 1500 /* VC++ 2008 */
267     /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */
268     #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
269     #define ECB_MEMORY_FENCE _ReadWriteBarrier (); MemoryBarrier()
270     #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier (); MemoryBarrier() /* according to msdn, _ReadBarrier is not a load fence */
271     #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier (); MemoryBarrier()
272 root 1.57 #elif _MSC_VER >= 1400 /* VC++ 2005 */
273     #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
274     #define ECB_MEMORY_FENCE _ReadWriteBarrier ()
275     #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */
276     #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier ()
277 root 1.85 #elif defined _WIN32
278 root 1.55 #include <WinNT.h>
279 root 1.57 #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */
280 root 1.72 #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
281     #include <mbarrier.h>
282 root 1.178 #define ECB_MEMORY_FENCE __machine_rw_barrier ()
283     #define ECB_MEMORY_FENCE_ACQUIRE __machine_acq_barrier ()
284     #define ECB_MEMORY_FENCE_RELEASE __machine_rel_barrier ()
285     #define ECB_MEMORY_FENCE_RELAXED __compiler_barrier ()
286 root 1.82 #elif __xlC__
287 root 1.83 #define ECB_MEMORY_FENCE __sync ()
288 root 1.52 #endif
289     #endif
290    
291 root 1.53 #ifndef ECB_MEMORY_FENCE
292 root 1.94 #if ECB_C11 && !defined __STDC_NO_ATOMICS__
293     /* we assume that these memory fences work on all variables/all memory accesses, */
294     /* not just C11 atomics and atomic accesses */
295     #include <stdatomic.h>
296     #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst)
297 root 1.178 #define ECB_MEMORY_FENCE_ACQUIRE atomic_thread_fence (memory_order_acquire)
298     #define ECB_MEMORY_FENCE_RELEASE atomic_thread_fence (memory_order_release)
299 root 1.94 #endif
300     #endif
301    
302     #ifndef ECB_MEMORY_FENCE
303 root 1.62 #if !ECB_AVOID_PTHREADS
304     /*
305     * if you get undefined symbol references to pthread_mutex_lock,
306     * or failure to find pthread.h, then you should implement
307     * the ECB_MEMORY_FENCE operations for your cpu/compiler
308     * OR provide pthread.h and link against the posix thread library
309     * of your system.
310     */
311     #include <pthread.h>
312     #define ECB_NEEDS_PTHREADS 1
313     #define ECB_MEMORY_FENCE_NEEDS_PTHREADS 1
314 root 1.52
315 root 1.62 static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER;
316     #define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0)
317     #endif
318     #endif
319    
320 root 1.85 #if !defined ECB_MEMORY_FENCE_ACQUIRE && defined ECB_MEMORY_FENCE
321 root 1.52 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
322 root 1.62 #endif
323    
324 root 1.85 #if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE
325 root 1.52 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
326     #endif
327    
328 root 1.178 #if !defined ECB_MEMORY_FENCE_RELAXED && defined ECB_MEMORY_FENCE
329     #define ECB_MEMORY_FENCE_RELAXED ECB_MEMORY_FENCE /* very heavy-handed */
330     #endif
331    
332 root 1.52 /*****************************************************************************/
333    
334 root 1.149 #if ECB_CPP
335 root 1.46 #define ecb_inline static inline
336 root 1.38 #elif ECB_GCC_VERSION(2,5)
337 root 1.46 #define ecb_inline static __inline__
338 root 1.39 #elif ECB_C99
339 root 1.46 #define ecb_inline static inline
340 root 1.29 #else
341 root 1.46 #define ecb_inline static
342 root 1.38 #endif
343    
344     #if ECB_GCC_VERSION(3,3)
345     #define ecb_restrict __restrict__
346 root 1.39 #elif ECB_C99
347 root 1.38 #define ecb_restrict restrict
348     #else
349     #define ecb_restrict
350 root 1.4 #endif
351    
352 root 1.38 typedef int ecb_bool;
353    
354 root 1.8 #define ECB_CONCAT_(a, b) a ## b
355     #define ECB_CONCAT(a, b) ECB_CONCAT_(a, b)
356     #define ECB_STRINGIFY_(a) # a
357     #define ECB_STRINGIFY(a) ECB_STRINGIFY_(a)
358 root 1.155 #define ECB_STRINGIFY_EXPR(expr) ((expr), ECB_STRINGIFY_ (expr))
359 root 1.8
360 root 1.46 #define ecb_function_ ecb_inline
361 root 1.3
362 sf-exg 1.138 #if ECB_GCC_VERSION(3,1) || ECB_CLANG_VERSION(2,8)
363 root 1.142 #define ecb_attribute(attrlist) __attribute__ (attrlist)
364 root 1.37 #else
365     #define ecb_attribute(attrlist)
366 sf-exg 1.138 #endif
367 root 1.127
368 sf-exg 1.138 #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_constant_p)
369     #define ecb_is_constant(expr) __builtin_constant_p (expr)
370     #else
371 root 1.127 /* possible C11 impl for integral types
372     typedef struct ecb_is_constant_struct ecb_is_constant_struct;
373     #define ecb_is_constant(expr) _Generic ((1 ? (struct ecb_is_constant_struct *)0 : (void *)((expr) - (expr)), ecb_is_constant_struct *: 0, default: 1)) */
374    
375 root 1.37 #define ecb_is_constant(expr) 0
376 sf-exg 1.138 #endif
377    
378     #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_expect)
379     #define ecb_expect(expr,value) __builtin_expect ((expr),(value))
380     #else
381 root 1.37 #define ecb_expect(expr,value) (expr)
382 sf-exg 1.138 #endif
383    
384     #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_prefetch)
385     #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality)
386     #else
387 root 1.37 #define ecb_prefetch(addr,rw,locality)
388 root 1.1 #endif
389    
390 root 1.2 /* no emulation for ecb_decltype */
391 root 1.143 #if ECB_CPP11
392 root 1.144 // older implementations might have problems with decltype(x)::type, work around it
393 root 1.146 template<class T> struct ecb_decltype_t { typedef T type; };
394     #define ecb_decltype(x) ecb_decltype_t<decltype (x)>::type
395 root 1.143 #elif ECB_GCC_VERSION(3,0) || ECB_CLANG_VERSION(2,8)
396     #define ecb_decltype(x) __typeof__ (x)
397 root 1.1 #endif
398    
399 root 1.135 #if _MSC_VER >= 1300
400 root 1.149 #define ecb_deprecated __declspec (deprecated)
401 root 1.135 #else
402     #define ecb_deprecated ecb_attribute ((__deprecated__))
403     #endif
404    
405 sf-exg 1.162 #if _MSC_VER >= 1500
406 root 1.154 #define ecb_deprecated_message(msg) __declspec (deprecated (msg))
407     #elif ECB_GCC_VERSION(4,5)
408     #define ecb_deprecated_message(msg) ecb_attribute ((__deprecated__ (msg))
409     #else
410     #define ecb_deprecated_message(msg) ecb_deprecated
411     #endif
412    
413     #if _MSC_VER >= 1400
414     #define ecb_noinline __declspec (noinline)
415     #else
416     #define ecb_noinline ecb_attribute ((__noinline__))
417     #endif
418    
419 root 1.24 #define ecb_unused ecb_attribute ((__unused__))
420     #define ecb_const ecb_attribute ((__const__))
421     #define ecb_pure ecb_attribute ((__pure__))
422 root 1.35
423 root 1.145 #if ECB_C11 || __IBMC_NORETURN
424 sf-exg 1.165 /* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/language_ref/noreturn.html */
425 root 1.90 #define ecb_noreturn _Noreturn
426 root 1.153 #elif ECB_CPP11
427     #define ecb_noreturn [[noreturn]]
428     #elif _MSC_VER >= 1200
429 sf-exg 1.156 /* http://msdn.microsoft.com/en-us/library/k6ktzx3s.aspx */
430 root 1.153 #define ecb_noreturn __declspec (noreturn)
431 root 1.90 #else
432     #define ecb_noreturn ecb_attribute ((__noreturn__))
433     #endif
434    
435 root 1.35 #if ECB_GCC_VERSION(4,3)
436 root 1.39 #define ecb_artificial ecb_attribute ((__artificial__))
437     #define ecb_hot ecb_attribute ((__hot__))
438     #define ecb_cold ecb_attribute ((__cold__))
439 root 1.35 #else
440     #define ecb_artificial
441     #define ecb_hot
442     #define ecb_cold
443     #endif
444 root 1.1
445 root 1.39 /* put around conditional expressions if you are very sure that the */
446     /* expression is mostly true or mostly false. note that these return */
447     /* booleans, not the expression. */
448 root 1.33 #define ecb_expect_false(expr) ecb_expect (!!(expr), 0)
449     #define ecb_expect_true(expr) ecb_expect (!!(expr), 1)
450 root 1.36 /* for compatibility to the rest of the world */
451 root 1.33 #define ecb_likely(expr) ecb_expect_true (expr)
452     #define ecb_unlikely(expr) ecb_expect_false (expr)
453 root 1.1
454 root 1.3 /* count trailing zero bits and count # of one bits */
455 root 1.139 #if ECB_GCC_VERSION(3,4) \
456     || (ECB_CLANG_BUILTIN(__builtin_clz) && ECB_CLANG_BUILTIN(__builtin_clzll) \
457     && ECB_CLANG_BUILTIN(__builtin_ctz) && ECB_CLANG_BUILTIN(__builtin_ctzll) \
458     && ECB_CLANG_BUILTIN(__builtin_popcount))
459 root 1.206 #define ecb_ctz32(x) __builtin_ctz (x)
460     #define ecb_ctz64(x) (__SIZEOF_LONG__ == 64 ? __builtin_ctzl (x) : __builtin_ctzll (x))
461     #define ecb_clz32(x) __builtin_clz (x)
462     #define ecb_clz64(x) (__SIZEOF_LONG__ == 64 ? __builtin_clzl (x) : __builtin_clzll (x))
463     #define ecb_ld32(x) (ecb_clz32 (x) ^ 31)
464     #define ecb_ld64(x) (ecb_clz64 (x) ^ 63)
465 root 1.35 #define ecb_popcount32(x) __builtin_popcount (x)
466 root 1.206 /* ecb_popcount64 is more difficult, see below */
467 root 1.1 #else
468 root 1.151 ecb_function_ ecb_const int ecb_ctz32 (uint32_t x);
469     ecb_function_ ecb_const int
470 root 1.35 ecb_ctz32 (uint32_t x)
471     {
472 root 1.171 #if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM)
473 root 1.172 unsigned long r;
474 root 1.171 _BitScanForward (&r, x);
475     return (int)r;
476     #else
477 root 1.208 int r;
478 root 1.35
479 root 1.208 x &= ~x + 1; /* this isolates the lowest bit */
480 root 1.205
481 root 1.208 #if 1
482     /* David Seal's algorithm, Message-ID: <32975@armltd.uucp> from 1994 */
483     /* This happens to return 32 for x == 0, but the API does not support this */
484 root 1.35
485 root 1.208 /* -0 marks unused entries */
486     static unsigned char table[64] =
487     {
488     32, 0, 1, 12, 2, 6, -0, 13, 3, -0, 7, -0, -0, -0, -0, 14,
489     10, 4, -0, -0, 8, -0, -0, 25, -0, -0, -0, -0, -0, 21, 27, 15,
490     31, 11, 5, -0, -0, -0, -0, -0, 9, -0, -0, 24, -0, -0, 20, 26,
491     30, -0, -0, -0, -0, 23, -0, 19, 29, -0, 22, 18, 28, 17, 16, -0
492     };
493    
494     /* magic constant results in 33 unique values in the upper 6 bits */
495     x *= 0x0450fbafU; /* == 17 * 65 * 65535 */
496    
497     r = table [x >> 26];
498     #elif 0 /* branchless on i386, typically */
499     r = 0;
500 root 1.50 r += !!(x & 0xaaaaaaaa) << 0;
501     r += !!(x & 0xcccccccc) << 1;
502     r += !!(x & 0xf0f0f0f0) << 2;
503     r += !!(x & 0xff00ff00) << 3;
504     r += !!(x & 0xffff0000) << 4;
505 root 1.208 #else /* branchless on modern compilers, typically */
506     r = 0;
507 root 1.35 if (x & 0xaaaaaaaa) r += 1;
508     if (x & 0xcccccccc) r += 2;
509     if (x & 0xf0f0f0f0) r += 4;
510     if (x & 0xff00ff00) r += 8;
511     if (x & 0xffff0000) r += 16;
512 root 1.50 #endif
513 root 1.35
514     return r;
515 root 1.171 #endif
516 root 1.35 }
517    
518 root 1.151 ecb_function_ ecb_const int ecb_ctz64 (uint64_t x);
519     ecb_function_ ecb_const int
520 root 1.49 ecb_ctz64 (uint64_t x)
521     {
522 root 1.171 #if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM)
523 root 1.172 unsigned long r;
524 root 1.171 _BitScanForward64 (&r, x);
525     return (int)r;
526     #else
527 root 1.168 int shift = x & 0xffffffff ? 0 : 32;
528 root 1.50 return ecb_ctz32 (x >> shift) + shift;
529 root 1.171 #endif
530 root 1.49 }
531    
532 root 1.208 ecb_function_ ecb_const int ecb_clz32 (uint32_t x);
533     ecb_function_ ecb_const int
534     ecb_clz32 (uint32_t x)
535     {
536     #if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM)
537     unsigned long r;
538     _BitScanReverse (&r, x);
539     return (int)r;
540     #else
541    
542     /* Robert Harley's algorithm from comp.arch 1996-12-07 */
543     /* This happens to return 32 for x == 0, but the API does not support this */
544    
545     /* -0 marks unused table elements */
546     static unsigned char table[64] =
547     {
548     32, 31, -0, 16, -0, 30, 3, -0, 15, -0, -0, -0, 29, 10, 2, -0,
549     -0, -0, 12, 14, 21, -0, 19, -0, -0, 28, -0, 25, -0, 9, 1, -0,
550     17, -0, 4, -0, -0, -0, 11, -0, 13, 22, 20, -0, 26, -0, -0, 18,
551     5, -0, -0, 23, -0, 27, -0, 6, -0, 24, 7, -0, 8, -0, 0, -0
552     };
553    
554     /* propagate leftmost 1 bit to the right */
555     x |= x >> 1;
556     x |= x >> 2;
557     x |= x >> 4;
558     x |= x >> 8;
559     x |= x >> 16;
560    
561     /* magic constant results in 33 unique values in the upper 6 bits */
562     x *= 0x06EB14F9U; /* == 7 * 255 * 255 * 255 */
563    
564     return table [x >> 26];
565     #endif
566     }
567    
568     ecb_function_ ecb_const int ecb_clz64 (uint64_t x);
569     ecb_function_ ecb_const int
570     ecb_clz64 (uint64_t x)
571     {
572     #if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM)
573     unsigned long r;
574     _BitScanReverse64 (&r, x);
575     return (int)r;
576     #else
577     uint32_t l = x >> 32;
578     int shift = l ? 0 : 32;
579     return ecb_clz32 (l ? l : x) + shift;
580     #endif
581     }
582    
583 root 1.151 ecb_function_ ecb_const int ecb_popcount32 (uint32_t x);
584     ecb_function_ ecb_const int
585 root 1.35 ecb_popcount32 (uint32_t x)
586     {
587     x -= (x >> 1) & 0x55555555;
588     x = ((x >> 2) & 0x33333333) + (x & 0x33333333);
589     x = ((x >> 4) + x) & 0x0f0f0f0f;
590     x *= 0x01010101;
591 root 1.1
592 root 1.35 return x >> 24;
593     }
594 root 1.49
595 root 1.151 ecb_function_ ecb_const int ecb_ld32 (uint32_t x);
596     ecb_function_ ecb_const int ecb_ld32 (uint32_t x)
597 root 1.49 {
598 root 1.171 #if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM)
599 root 1.172 unsigned long r;
600 root 1.171 _BitScanReverse (&r, x);
601     return (int)r;
602     #else
603 root 1.50 int r = 0;
604 root 1.49
605 root 1.50 if (x >> 16) { x >>= 16; r += 16; }
606     if (x >> 8) { x >>= 8; r += 8; }
607     if (x >> 4) { x >>= 4; r += 4; }
608     if (x >> 2) { x >>= 2; r += 2; }
609     if (x >> 1) { r += 1; }
610 root 1.49
611     return r;
612 root 1.171 #endif
613 root 1.49 }
614    
615 root 1.151 ecb_function_ ecb_const int ecb_ld64 (uint64_t x);
616     ecb_function_ ecb_const int ecb_ld64 (uint64_t x)
617 root 1.49 {
618 root 1.171 #if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM)
619 root 1.172 unsigned long r;
620 root 1.171 _BitScanReverse64 (&r, x);
621     return (int)r;
622     #else
623 root 1.50 int r = 0;
624 root 1.49
625 root 1.50 if (x >> 32) { x >>= 32; r += 32; }
626 root 1.49
627 root 1.50 return r + ecb_ld32 (x);
628 root 1.171 #endif
629 root 1.49 }
630 root 1.1 #endif
631    
632 root 1.151 ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x);
633     ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); }
634     ecb_function_ ecb_const ecb_bool ecb_is_pot64 (uint64_t x);
635     ecb_function_ ecb_const ecb_bool ecb_is_pot64 (uint64_t x) { return !(x & (x - 1)); }
636 root 1.88
637 root 1.151 ecb_function_ ecb_const uint8_t ecb_bitrev8 (uint8_t x);
638     ecb_function_ ecb_const uint8_t ecb_bitrev8 (uint8_t x)
639 root 1.70 {
640     return ( (x * 0x0802U & 0x22110U)
641 root 1.151 | (x * 0x8020U & 0x88440U)) * 0x10101U >> 16;
642 root 1.70 }
643    
644 root 1.151 ecb_function_ ecb_const uint16_t ecb_bitrev16 (uint16_t x);
645     ecb_function_ ecb_const uint16_t ecb_bitrev16 (uint16_t x)
646 root 1.70 {
647     x = ((x >> 1) & 0x5555) | ((x & 0x5555) << 1);
648     x = ((x >> 2) & 0x3333) | ((x & 0x3333) << 2);
649     x = ((x >> 4) & 0x0f0f) | ((x & 0x0f0f) << 4);
650     x = ( x >> 8 ) | ( x << 8);
651    
652     return x;
653     }
654    
655 root 1.151 ecb_function_ ecb_const uint32_t ecb_bitrev32 (uint32_t x);
656     ecb_function_ ecb_const uint32_t ecb_bitrev32 (uint32_t x)
657 root 1.70 {
658     x = ((x >> 1) & 0x55555555) | ((x & 0x55555555) << 1);
659     x = ((x >> 2) & 0x33333333) | ((x & 0x33333333) << 2);
660     x = ((x >> 4) & 0x0f0f0f0f) | ((x & 0x0f0f0f0f) << 4);
661     x = ((x >> 8) & 0x00ff00ff) | ((x & 0x00ff00ff) << 8);
662     x = ( x >> 16 ) | ( x << 16);
663    
664     return x;
665     }
666    
667 root 1.151 ecb_function_ ecb_const int ecb_popcount64 (uint64_t x);
668     ecb_function_ ecb_const int
669 root 1.49 ecb_popcount64 (uint64_t x)
670     {
671 root 1.205 /* popcount64 is only available on 64 bit cpus as gcc builtin. */
672     /* also, gcc/clang make this surprisingly difficult to use */
673 root 1.207 #if (__SIZEOF_LONG__ == 8) && (ECB_GCC_VERSION(3,4) || ECB_CLANG_BUILTIN (__builtin_popcountl))
674 root 1.205 return __builtin_popcountl (x);
675     #else
676 root 1.49 return ecb_popcount32 (x) + ecb_popcount32 (x >> 32);
677 root 1.205 #endif
678 root 1.49 }
679    
680 root 1.151 ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count);
681     ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count);
682     ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count);
683     ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count);
684     ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count);
685     ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count);
686     ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count);
687     ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count);
688    
689 root 1.198 ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> (-count & 7)) | (x << (count & 7)); }
690     ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << (-count & 7)) | (x >> (count & 7)); }
691     ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (-count & 15)) | (x << (count & 15)); }
692     ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (-count & 15)) | (x >> (count & 15)); }
693     ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (-count & 31)) | (x << (count & 31)); }
694     ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (-count & 31)) | (x >> (count & 31)); }
695     ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (-count & 63)) | (x << (count & 63)); }
696     ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (-count & 63)) | (x >> (count & 63)); }
697 root 1.50
698 root 1.182 #if ECB_CPP
699    
700     inline uint8_t ecb_ctz (uint8_t v) { return ecb_ctz32 (v); }
701     inline uint16_t ecb_ctz (uint16_t v) { return ecb_ctz32 (v); }
702     inline uint32_t ecb_ctz (uint32_t v) { return ecb_ctz32 (v); }
703     inline uint64_t ecb_ctz (uint64_t v) { return ecb_ctz64 (v); }
704    
705     inline bool ecb_is_pot (uint8_t v) { return ecb_is_pot32 (v); }
706     inline bool ecb_is_pot (uint16_t v) { return ecb_is_pot32 (v); }
707     inline bool ecb_is_pot (uint32_t v) { return ecb_is_pot32 (v); }
708     inline bool ecb_is_pot (uint64_t v) { return ecb_is_pot64 (v); }
709    
710     inline int ecb_ld (uint8_t v) { return ecb_ld32 (v); }
711     inline int ecb_ld (uint16_t v) { return ecb_ld32 (v); }
712     inline int ecb_ld (uint32_t v) { return ecb_ld32 (v); }
713     inline int ecb_ld (uint64_t v) { return ecb_ld64 (v); }
714    
715     inline int ecb_popcount (uint8_t v) { return ecb_popcount32 (v); }
716     inline int ecb_popcount (uint16_t v) { return ecb_popcount32 (v); }
717     inline int ecb_popcount (uint32_t v) { return ecb_popcount32 (v); }
718     inline int ecb_popcount (uint64_t v) { return ecb_popcount64 (v); }
719    
720     inline uint8_t ecb_bitrev (uint8_t v) { return ecb_bitrev8 (v); }
721     inline uint16_t ecb_bitrev (uint16_t v) { return ecb_bitrev16 (v); }
722     inline uint32_t ecb_bitrev (uint32_t v) { return ecb_bitrev32 (v); }
723    
724 root 1.183 inline uint8_t ecb_rotl (uint8_t v, unsigned int count) { return ecb_rotl8 (v, count); }
725     inline uint16_t ecb_rotl (uint16_t v, unsigned int count) { return ecb_rotl16 (v, count); }
726     inline uint32_t ecb_rotl (uint32_t v, unsigned int count) { return ecb_rotl32 (v, count); }
727     inline uint64_t ecb_rotl (uint64_t v, unsigned int count) { return ecb_rotl64 (v, count); }
728    
729     inline uint8_t ecb_rotr (uint8_t v, unsigned int count) { return ecb_rotr8 (v, count); }
730     inline uint16_t ecb_rotr (uint16_t v, unsigned int count) { return ecb_rotr16 (v, count); }
731     inline uint32_t ecb_rotr (uint32_t v, unsigned int count) { return ecb_rotr32 (v, count); }
732     inline uint64_t ecb_rotr (uint64_t v, unsigned int count) { return ecb_rotr64 (v, count); }
733 root 1.182
734     #endif
735    
736 sf-exg 1.138 #if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64))
737 root 1.164 #if ECB_GCC_VERSION(4,8) || ECB_CLANG_BUILTIN(__builtin_bswap16)
738     #define ecb_bswap16(x) __builtin_bswap16 (x)
739     #else
740 root 1.49 #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16)
741 root 1.164 #endif
742 root 1.49 #define ecb_bswap32(x) __builtin_bswap32 (x)
743     #define ecb_bswap64(x) __builtin_bswap64 (x)
744 root 1.164 #elif _MSC_VER
745     #include <stdlib.h>
746     #define ecb_bswap16(x) ((uint16_t)_byteswap_ushort ((uint16_t)(x)))
747     #define ecb_bswap32(x) ((uint32_t)_byteswap_ulong ((uint32_t)(x)))
748     #define ecb_bswap64(x) ((uint64_t)_byteswap_uint64 ((uint64_t)(x)))
749 root 1.13 #else
750 root 1.151 ecb_function_ ecb_const uint16_t ecb_bswap16 (uint16_t x);
751     ecb_function_ ecb_const uint16_t
752 root 1.50 ecb_bswap16 (uint16_t x)
753 root 1.49 {
754 root 1.50 return ecb_rotl16 (x, 8);
755 root 1.49 }
756    
757 root 1.151 ecb_function_ ecb_const uint32_t ecb_bswap32 (uint32_t x);
758     ecb_function_ ecb_const uint32_t
759 root 1.35 ecb_bswap32 (uint32_t x)
760     {
761 root 1.50 return (((uint32_t)ecb_bswap16 (x)) << 16) | ecb_bswap16 (x >> 16);
762 root 1.35 }
763    
764 root 1.151 ecb_function_ ecb_const uint64_t ecb_bswap64 (uint64_t x);
765     ecb_function_ ecb_const uint64_t
766 root 1.49 ecb_bswap64 (uint64_t x)
767 root 1.35 {
768 root 1.50 return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32);
769 root 1.35 }
770 root 1.13 #endif
771    
772 sf-exg 1.138 #if ECB_GCC_VERSION(4,5) || ECB_CLANG_BUILTIN(__builtin_unreachable)
773 root 1.35 #define ecb_unreachable() __builtin_unreachable ()
774 root 1.13 #else
775 root 1.35 /* this seems to work fine, but gcc always emits a warning for it :/ */
776 root 1.151 ecb_inline ecb_noreturn void ecb_unreachable (void);
777     ecb_inline ecb_noreturn void ecb_unreachable (void) { }
778 root 1.13 #endif
779    
780 root 1.41 /* try to tell the compiler that some condition is definitely true */
781 root 1.100 #define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0
782 root 1.41
783 root 1.174 ecb_inline ecb_const uint32_t ecb_byteorder_helper (void);
784     ecb_inline ecb_const uint32_t
785 root 1.23 ecb_byteorder_helper (void)
786 root 1.3 {
787 root 1.98 /* the union code still generates code under pressure in gcc, */
788 sf-exg 1.111 /* but less than using pointers, and always seems to */
789 root 1.98 /* successfully return a constant. */
790     /* the reason why we have this horrible preprocessor mess */
791 root 1.99 /* is to avoid it in all cases, at least on common architectures */
792 sf-exg 1.111 /* or when using a recent enough gcc version (>= 4.6) */
793 root 1.174 #if (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \
794     || ((__i386 || __i386__ || _M_IX86 || ECB_GCC_AMD64 || ECB_MSVC_AMD64) && !__VOS__)
795     #define ECB_LITTLE_ENDIAN 1
796     return 0x44332211;
797     #elif (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) \
798     || ((__AARCH64EB__ || __MIPSEB__ || __ARMEB__) && !__VOS__)
799     #define ECB_BIG_ENDIAN 1
800     return 0x11223344;
801 root 1.98 #else
802     union
803     {
804 root 1.174 uint8_t c[4];
805     uint32_t u;
806     } u = { 0x11, 0x22, 0x33, 0x44 };
807     return u.u;
808 root 1.98 #endif
809 root 1.3 }
810    
811 root 1.151 ecb_inline ecb_const ecb_bool ecb_big_endian (void);
812 root 1.174 ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11223344; }
813 root 1.151 ecb_inline ecb_const ecb_bool ecb_little_endian (void);
814 root 1.174 ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44332211; }
815 root 1.3
816 root 1.180 /*****************************************************************************/
817     /* unaligned load/store */
818    
819     ecb_inline uint_fast16_t ecb_be_u16_to_host (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; }
820     ecb_inline uint_fast32_t ecb_be_u32_to_host (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; }
821     ecb_inline uint_fast64_t ecb_be_u64_to_host (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; }
822    
823     ecb_inline uint_fast16_t ecb_le_u16_to_host (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; }
824     ecb_inline uint_fast32_t ecb_le_u32_to_host (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; }
825     ecb_inline uint_fast64_t ecb_le_u64_to_host (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; }
826    
827     ecb_inline uint_fast16_t ecb_peek_u16_u (const void *ptr) { uint16_t v; memcpy (&v, ptr, sizeof (v)); return v; }
828     ecb_inline uint_fast32_t ecb_peek_u32_u (const void *ptr) { uint32_t v; memcpy (&v, ptr, sizeof (v)); return v; }
829     ecb_inline uint_fast64_t ecb_peek_u64_u (const void *ptr) { uint64_t v; memcpy (&v, ptr, sizeof (v)); return v; }
830    
831     ecb_inline uint_fast16_t ecb_peek_be_u16_u (const void *ptr) { return ecb_be_u16_to_host (ecb_peek_u16_u (ptr)); }
832     ecb_inline uint_fast32_t ecb_peek_be_u32_u (const void *ptr) { return ecb_be_u32_to_host (ecb_peek_u32_u (ptr)); }
833     ecb_inline uint_fast64_t ecb_peek_be_u64_u (const void *ptr) { return ecb_be_u64_to_host (ecb_peek_u64_u (ptr)); }
834    
835     ecb_inline uint_fast16_t ecb_peek_le_u16_u (const void *ptr) { return ecb_le_u16_to_host (ecb_peek_u16_u (ptr)); }
836     ecb_inline uint_fast32_t ecb_peek_le_u32_u (const void *ptr) { return ecb_le_u32_to_host (ecb_peek_u32_u (ptr)); }
837     ecb_inline uint_fast64_t ecb_peek_le_u64_u (const void *ptr) { return ecb_le_u64_to_host (ecb_peek_u64_u (ptr)); }
838    
839     ecb_inline uint_fast16_t ecb_host_to_be_u16 (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; }
840     ecb_inline uint_fast32_t ecb_host_to_be_u32 (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; }
841     ecb_inline uint_fast64_t ecb_host_to_be_u64 (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; }
842    
843     ecb_inline uint_fast16_t ecb_host_to_le_u16 (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; }
844     ecb_inline uint_fast32_t ecb_host_to_le_u32 (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; }
845     ecb_inline uint_fast64_t ecb_host_to_le_u64 (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; }
846    
847     ecb_inline void ecb_poke_u16_u (void *ptr, uint16_t v) { memcpy (ptr, &v, sizeof (v)); }
848     ecb_inline void ecb_poke_u32_u (void *ptr, uint32_t v) { memcpy (ptr, &v, sizeof (v)); }
849     ecb_inline void ecb_poke_u64_u (void *ptr, uint64_t v) { memcpy (ptr, &v, sizeof (v)); }
850    
851     ecb_inline void ecb_poke_be_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_be_u16 (v)); }
852     ecb_inline void ecb_poke_be_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_be_u32 (v)); }
853     ecb_inline void ecb_poke_be_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_be_u64 (v)); }
854 sf-exg 1.196
855 root 1.180 ecb_inline void ecb_poke_le_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_le_u16 (v)); }
856     ecb_inline void ecb_poke_le_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_le_u32 (v)); }
857     ecb_inline void ecb_poke_le_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_le_u64 (v)); }
858    
859 root 1.186 #if ECB_CPP
860 root 1.180
861     inline uint8_t ecb_bswap (uint8_t v) { return v; }
862     inline uint16_t ecb_bswap (uint16_t v) { return ecb_bswap16 (v); }
863     inline uint32_t ecb_bswap (uint32_t v) { return ecb_bswap32 (v); }
864     inline uint64_t ecb_bswap (uint64_t v) { return ecb_bswap64 (v); }
865    
866     template<typename T> inline T ecb_be_to_host (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; }
867     template<typename T> inline T ecb_le_to_host (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; }
868     template<typename T> inline T ecb_peek (const void *ptr) { return *(const T *)ptr; }
869     template<typename T> inline T ecb_peek_be (const void *ptr) { return ecb_be_to_host (ecb_peek <T> (ptr)); }
870     template<typename T> inline T ecb_peek_le (const void *ptr) { return ecb_le_to_host (ecb_peek <T> (ptr)); }
871 root 1.184 template<typename T> inline T ecb_peek_u (const void *ptr) { T v; memcpy (&v, ptr, sizeof (v)); return v; }
872 root 1.180 template<typename T> inline T ecb_peek_be_u (const void *ptr) { return ecb_be_to_host (ecb_peek_u<T> (ptr)); }
873     template<typename T> inline T ecb_peek_le_u (const void *ptr) { return ecb_le_to_host (ecb_peek_u<T> (ptr)); }
874    
875     template<typename T> inline T ecb_host_to_be (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; }
876     template<typename T> inline T ecb_host_to_le (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; }
877     template<typename T> inline void ecb_poke (void *ptr, T v) { *(T *)ptr = v; }
878     template<typename T> inline void ecb_poke_be (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_be (v)); }
879     template<typename T> inline void ecb_poke_le (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_le (v)); }
880 root 1.184 template<typename T> inline void ecb_poke_u (void *ptr, T v) { memcpy (ptr, &v, sizeof (v)); }
881 root 1.180 template<typename T> inline void ecb_poke_be_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_be (v)); }
882     template<typename T> inline void ecb_poke_le_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_le (v)); }
883    
884     #endif
885    
886     /*****************************************************************************/
887 root 1.199 /* pointer/integer hashing */
888    
889     /* based on hash by Chris Wellons, https://nullprogram.com/blog/2018/07/31/ */
890     ecb_function_ uint32_t ecb_mix32 (uint32_t v);
891     ecb_function_ uint32_t ecb_mix32 (uint32_t v)
892     {
893     v ^= v >> 16; v *= 0x7feb352dU;
894     v ^= v >> 15; v *= 0x846ca68bU;
895     v ^= v >> 16;
896     return v;
897     }
898    
899     ecb_function_ uint32_t ecb_unmix32 (uint32_t v);
900     ecb_function_ uint32_t ecb_unmix32 (uint32_t v)
901     {
902     v ^= v >> 16 ; v *= 0x43021123U;
903     v ^= v >> 15 ^ v >> 30; v *= 0x1d69e2a5U;
904     v ^= v >> 16 ;
905     return v;
906     }
907    
908     /* based on splitmix64, by Sebastiona Vigna, https://prng.di.unimi.it/splitmix64.c */
909     ecb_function_ uint64_t ecb_mix64 (uint64_t v);
910     ecb_function_ uint64_t ecb_mix64 (uint64_t v)
911     {
912     v ^= v >> 30; v *= 0xbf58476d1ce4e5b9U;
913     v ^= v >> 27; v *= 0x94d049bb133111ebU;
914     v ^= v >> 31;
915     return v;
916     }
917    
918     ecb_function_ uint64_t ecb_unmix64 (uint64_t v);
919     ecb_function_ uint64_t ecb_unmix64 (uint64_t v)
920     {
921     v ^= v >> 31 ^ v >> 62; v *= 0x319642b2d24d8ec3U;
922     v ^= v >> 27 ^ v >> 54; v *= 0x96de1b173f119089U;
923     v ^= v >> 30 ^ v >> 60;
924     return v;
925     }
926    
927     ecb_function_ uintptr_t ecb_ptrmix (void *p);
928     ecb_function_ uintptr_t ecb_ptrmix (void *p)
929     {
930     #if ECB_PTRSIZE <= 4
931     return ecb_mix32 ((uint32_t)p);
932     #else
933     return ecb_mix64 ((uint64_t)p);
934     #endif
935     }
936    
937     ecb_function_ void *ecb_ptrunmix (uintptr_t v);
938     ecb_function_ void *ecb_ptrunmix (uintptr_t v)
939     {
940     #if ECB_PTRSIZE <= 4
941     return (void *)ecb_unmix32 (v);
942     #else
943     return (void *)ecb_unmix64 (v);
944     #endif
945     }
946    
947     #if ECB_CPP
948    
949     template<typename T>
950     inline uintptr_t ecb_ptrmix (T *p)
951     {
952     return ecb_ptrmix (static_cast<void *>(p));
953     }
954    
955     template<typename T>
956     inline T *ecb_ptrunmix (uintptr_t v)
957     {
958     return static_cast<T *>(ecb_ptrunmix (v));
959     }
960    
961     #endif
962    
963     /*****************************************************************************/
964 root 1.202 /* gray code */
965    
966     ecb_function_ uint_fast8_t ecb_gray8_encode (uint_fast8_t b) { return b ^ (b >> 1); }
967     ecb_function_ uint_fast16_t ecb_gray16_encode (uint_fast16_t b) { return b ^ (b >> 1); }
968     ecb_function_ uint_fast32_t ecb_gray32_encode (uint_fast32_t b) { return b ^ (b >> 1); }
969     ecb_function_ uint_fast64_t ecb_gray64_encode (uint_fast64_t b) { return b ^ (b >> 1); }
970    
971     ecb_function_ uint8_t ecb_gray8_decode (uint8_t g)
972     {
973 root 1.203 g ^= g >> 1;
974     g ^= g >> 2;
975     g ^= g >> 4;
976    
977 root 1.202 return g;
978     }
979    
980     ecb_function_ uint16_t ecb_gray16_decode (uint16_t g)
981     {
982 root 1.203 g ^= g >> 1;
983     g ^= g >> 2;
984     g ^= g >> 4;
985     g ^= g >> 8;
986    
987 root 1.202 return g;
988     }
989    
990     ecb_function_ uint32_t ecb_gray32_decode (uint32_t g)
991     {
992 root 1.203 g ^= g >> 1;
993     g ^= g >> 2;
994     g ^= g >> 4;
995     g ^= g >> 8;
996     g ^= g >> 16;
997    
998 root 1.202 return g;
999     }
1000    
1001     ecb_function_ uint64_t ecb_gray64_decode (uint64_t g)
1002     {
1003 root 1.203 g ^= g >> 1;
1004     g ^= g >> 2;
1005     g ^= g >> 4;
1006     g ^= g >> 8;
1007     g ^= g >> 16;
1008     g ^= g >> 32;
1009    
1010 root 1.202 return g;
1011     }
1012    
1013     #if ECB_CPP
1014    
1015     ecb_function_ uint8_t ecb_gray_encode (uint8_t b) { return ecb_gray8_encode (b); }
1016     ecb_function_ uint16_t ecb_gray_encode (uint16_t b) { return ecb_gray16_encode (b); }
1017     ecb_function_ uint32_t ecb_gray_encode (uint32_t b) { return ecb_gray32_encode (b); }
1018     ecb_function_ uint64_t ecb_gray_encode (uint64_t b) { return ecb_gray64_encode (b); }
1019    
1020     ecb_function_ uint8_t ecb_gray_decode (uint8_t g) { return ecb_gray8_decode (g); }
1021     ecb_function_ uint16_t ecb_gray_decode (uint16_t g) { return ecb_gray16_decode (g); }
1022     ecb_function_ uint32_t ecb_gray_decode (uint32_t g) { return ecb_gray32_decode (g); }
1023     ecb_function_ uint64_t ecb_gray_decode (uint64_t g) { return ecb_gray64_decode (g); }
1024    
1025     #endif
1026    
1027     /*****************************************************************************/
1028 root 1.204 /* 2d hilbert curves */
1029    
1030     /* algorithm from the book Hacker's Delight, modified to not */
1031     /* run into undefined behaviour for n==16 */
1032     static uint32_t
1033     ecb_hilbert2d_index_to_coord32 (int n, uint32_t s)
1034     {
1035     uint32_t comp, swap, cs, t, sr;
1036    
1037     /* pad s on the left (unused) bits with 01 (no change groups) */
1038     s |= 0x55555555U << n << n;
1039     /* "s shift right" */
1040     sr = (s >> 1) & 0x55555555U;
1041     /* compute complement and swap info in two-bit groups */
1042     cs = ((s & 0x55555555U) + sr) ^ 0x55555555U;
1043    
1044     /* parallel prefix xor op to propagate both complement
1045     * and swap info together from left to right (there is
1046     * no step "cs ^= cs >> 1", so in effect it computes
1047     * two independent parallel prefix operations on two
1048     * interleaved sets of sixteen bits).
1049     */
1050     cs ^= cs >> 2;
1051     cs ^= cs >> 4;
1052     cs ^= cs >> 8;
1053     cs ^= cs >> 16;
1054    
1055     /* separate swap and complement bits */
1056     swap = cs & 0x55555555U;
1057     comp = (cs >> 1) & 0x55555555U;
1058    
1059     /* calculate coordinates in odd and even bit positions */
1060     t = (s & swap) ^ comp;
1061     s = s ^ sr ^ t ^ (t << 1);
1062    
1063     /* unpad/clear out any junk on the left */
1064     s = s & ((1 << n << n) - 1);
1065    
1066     /* Now "unshuffle" to separate the x and y bits. */
1067     t = (s ^ (s >> 1)) & 0x22222222U; s ^= t ^ (t << 1);
1068     t = (s ^ (s >> 2)) & 0x0c0c0c0cU; s ^= t ^ (t << 2);
1069     t = (s ^ (s >> 4)) & 0x00f000f0U; s ^= t ^ (t << 4);
1070     t = (s ^ (s >> 8)) & 0x0000ff00U; s ^= t ^ (t << 8);
1071    
1072     /* now s contains two 16-bit coordinates */
1073     return s;
1074     }
1075    
1076     /* 64 bit, a straightforward extension to the 32 bit case */
1077     static uint64_t
1078     ecb_hilbert2d_index_to_coord64 (int n, uint64_t s)
1079     {
1080     uint64_t comp, swap, cs, t, sr;
1081    
1082     /* pad s on the left (unused) bits with 01 (no change groups) */
1083     s |= 0x5555555555555555U << n << n;
1084     /* "s shift right" */
1085     sr = (s >> 1) & 0x5555555555555555U;
1086     /* compute complement and swap info in two-bit groups */
1087     cs = ((s & 0x5555555555555555U) + sr) ^ 0x5555555555555555U;
1088    
1089     /* parallel prefix xor op to propagate both complement
1090     * and swap info together from left to right (there is
1091     * no step "cs ^= cs >> 1", so in effect it computes
1092     * two independent parallel prefix operations on two
1093     * interleaved sets of thirty-two bits).
1094     */
1095     cs ^= cs >> 2;
1096     cs ^= cs >> 4;
1097     cs ^= cs >> 8;
1098     cs ^= cs >> 16;
1099     cs ^= cs >> 32;
1100    
1101     /* separate swap and complement bits */
1102     swap = cs & 0x5555555555555555U;
1103     comp = (cs >> 1) & 0x5555555555555555U;
1104    
1105     /* calculate coordinates in odd and even bit positions */
1106     t = (s & swap) ^ comp;
1107     s = s ^ sr ^ t ^ (t << 1);
1108    
1109     /* unpad/clear out any junk on the left */
1110     s = s & ((1 << n << n) - 1);
1111    
1112     /* Now "unshuffle" to separate the x and y bits. */
1113     t = (s ^ (s >> 1)) & 0x2222222222222222U; s ^= t ^ (t << 1);
1114     t = (s ^ (s >> 2)) & 0x0c0c0c0c0c0c0c0cU; s ^= t ^ (t << 2);
1115     t = (s ^ (s >> 4)) & 0x00f000f000f000f0U; s ^= t ^ (t << 4);
1116     t = (s ^ (s >> 8)) & 0x0000ff000000ff00U; s ^= t ^ (t << 8);
1117     t = (s ^ (s >> 16)) & 0x00000000ffff0000U; s ^= t ^ (t << 16);
1118    
1119     /* now s contains two 32-bit coordinates */
1120     return s;
1121     }
1122    
1123     /* algorithm from the book Hacker's Delight, but a similar algorithm*/
1124     /* is given in https://doi.org/10.1002/spe.4380160103 */
1125     /* this has been slightly improved over the original version */
1126     ecb_function_ uint32_t
1127     ecb_hilbert2d_coord_to_index32 (int n, uint32_t xy)
1128     {
1129     uint32_t row;
1130     uint32_t state = 0;
1131     uint32_t s = 0;
1132    
1133     do
1134     {
1135     --n;
1136    
1137     row = 4 * state
1138     | (2 & (xy >> n >> 15))
1139     | (1 & (xy >> n ));
1140    
1141     /* these funky constants are lookup tables for two-bit values */
1142     s = (s << 2) | (0x361e9cb4U >> 2 * row) & 3;
1143     state = (0x8fe65831U >> 2 * row) & 3;
1144     }
1145     while (n > 0);
1146    
1147     return s;
1148     }
1149    
1150     /* 64 bit, essentially the same as 32 bit */
1151     ecb_function_ uint64_t
1152     ecb_hilbert2d_coord_to_index64 (int n, uint64_t xy)
1153     {
1154     uint32_t row;
1155     uint32_t state = 0;
1156     uint64_t s = 0;
1157    
1158     do
1159     {
1160     --n;
1161    
1162     row = 4 * state
1163     | (2 & (xy >> n >> 31))
1164     | (1 & (xy >> n ));
1165    
1166     /* these funky constants are lookup tables for two-bit values */
1167     s = (s << 2) | (0x361e9cb4U >> 2 * row) & 3;
1168     state = (0x8fe65831U >> 2 * row) & 3;
1169     }
1170     while (n > 0);
1171    
1172     return s;
1173     }
1174    
1175     /*****************************************************************************/
1176 root 1.188 /* division */
1177 root 1.180
1178 root 1.39 #if ECB_GCC_VERSION(3,0) || ECB_C99
1179 root 1.188 /* C99 tightened the definition of %, so we can use a more efficient version */
1180 root 1.35 #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0))
1181 root 1.31 #else
1182 root 1.35 #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n)))
1183 root 1.31 #endif
1184 root 1.21
1185 root 1.149 #if ECB_CPP
1186 sf-exg 1.68 template<typename T>
1187     static inline T ecb_div_rd (T val, T div)
1188     {
1189     return val < 0 ? - ((-val + div - 1) / div) : (val ) / div;
1190     }
1191     template<typename T>
1192     static inline T ecb_div_ru (T val, T div)
1193     {
1194     return val < 0 ? - ((-val ) / div) : (val + div - 1) / div;
1195     }
1196     #else
1197     #define ecb_div_rd(val,div) ((val) < 0 ? - ((-(val) + (div) - 1) / (div)) : ((val) ) / (div))
1198     #define ecb_div_ru(val,div) ((val) < 0 ? - ((-(val) ) / (div)) : ((val) + (div) - 1) / (div))
1199     #endif
1200 sf-exg 1.67
1201 root 1.188 /*****************************************************************************/
1202     /* array length */
1203    
1204 root 1.5 #if ecb_cplusplus_does_not_suck
1205 root 1.40 /* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */
1206 root 1.35 template<typename T, int N>
1207     static inline int ecb_array_length (const T (&arr)[N])
1208     {
1209     return N;
1210     }
1211 root 1.5 #else
1212 root 1.35 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0]))
1213 root 1.5 #endif
1214    
1215 root 1.180 /*****************************************************************************/
1216 root 1.188 /* IEEE 754-2008 half float conversions */
1217 root 1.180
1218 root 1.170 ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x);
1219 root 1.167 ecb_function_ ecb_const uint32_t
1220 root 1.170 ecb_binary16_to_binary32 (uint32_t x)
1221 root 1.167 {
1222     unsigned int s = (x & 0x8000) << (31 - 15);
1223     int e = (x >> 10) & 0x001f;
1224     unsigned int m = x & 0x03ff;
1225    
1226     if (ecb_expect_false (e == 31))
1227     /* infinity or NaN */
1228     e = 255 - (127 - 15);
1229     else if (ecb_expect_false (!e))
1230     {
1231     if (ecb_expect_true (!m))
1232     /* zero, handled by code below by forcing e to 0 */
1233     e = 0 - (127 - 15);
1234     else
1235     {
1236     /* subnormal, renormalise */
1237     unsigned int s = 10 - ecb_ld32 (m);
1238    
1239     m = (m << s) & 0x3ff; /* mask implicit bit */
1240     e -= s - 1;
1241     }
1242     }
1243    
1244     /* e and m now are normalised, or zero, (or inf or nan) */
1245     e += 127 - 15;
1246    
1247     return s | (e << 23) | (m << (23 - 10));
1248     }
1249    
1250     ecb_function_ ecb_const uint16_t ecb_binary32_to_binary16 (uint32_t x);
1251     ecb_function_ ecb_const uint16_t
1252     ecb_binary32_to_binary16 (uint32_t x)
1253     {
1254     unsigned int s = (x >> 16) & 0x00008000; /* sign bit, the easy part */
1255 root 1.188 int e = ((x >> 23) & 0x000000ff) - (127 - 15); /* the desired exponent */
1256 root 1.167 unsigned int m = x & 0x007fffff;
1257    
1258     x &= 0x7fffffff;
1259    
1260     /* if it's within range of binary16 normals, use fast path */
1261     if (ecb_expect_true (0x38800000 <= x && x <= 0x477fefff))
1262     {
1263     /* mantissa round-to-even */
1264     m += 0x00000fff + ((m >> (23 - 10)) & 1);
1265    
1266     /* handle overflow */
1267     if (ecb_expect_false (m >= 0x00800000))
1268     {
1269     m >>= 1;
1270     e += 1;
1271     }
1272    
1273     return s | (e << 10) | (m >> (23 - 10));
1274     }
1275    
1276     /* handle large numbers and infinity */
1277     if (ecb_expect_true (0x477fefff < x && x <= 0x7f800000))
1278     return s | 0x7c00;
1279    
1280 root 1.169 /* handle zero, subnormals and small numbers */
1281 root 1.167 if (ecb_expect_true (x < 0x38800000))
1282     {
1283     /* zero */
1284     if (ecb_expect_true (!x))
1285     return s;
1286    
1287     /* handle subnormals */
1288    
1289 root 1.169 /* too small, will be zero */
1290     if (e < (14 - 24)) /* might not be sharp, but is good enough */
1291     return s;
1292    
1293 root 1.167 m |= 0x00800000; /* make implicit bit explicit */
1294    
1295     /* very tricky - we need to round to the nearest e (+10) bit value */
1296     {
1297     unsigned int bits = 14 - e;
1298     unsigned int half = (1 << (bits - 1)) - 1;
1299     unsigned int even = (m >> bits) & 1;
1300    
1301     /* if this overflows, we will end up with a normalised number */
1302     m = (m + half + even) >> bits;
1303     }
1304    
1305     return s | m;
1306     }
1307    
1308     /* handle NaNs, preserve leftmost nan bits, but make sure we don't turn them into infinities */
1309     m >>= 13;
1310    
1311     return s | 0x7c00 | m | !m;
1312     }
1313    
1314 root 1.104 /*******************************************************************************/
1315 root 1.191 /* fast integer to ascii */
1316    
1317 root 1.195 /*
1318     * This code is pretty complicated because it is general. The idea behind it,
1319     * however, is pretty simple: first, the number is multiplied with a scaling
1320 root 1.197 * factor (2**bits / 10**(digits-1)) to convert the integer into a fixed-point
1321 root 1.195 * number with the first digit in the upper bits.
1322     * Then this digit is converted to text and masked out. The resulting number
1323     * is then multiplied by 10, by multiplying the fixed point representation
1324     * by 5 and shifting the (binary) decimal point one to the right, so a 4.28
1325     * format becomes 5.27, 6.26 and so on.
1326     * The rest involves only advancing the pointer if we already generated a
1327     * non-zero digit, so leading zeroes are overwritten.
1328     */
1329    
1330 root 1.201 /* simply return a mask with "bits" bits set */
1331 root 1.191 #define ecb_i2a_mask(type,bits) ((((type)1) << (bits)) - 1)
1332    
1333 root 1.200 /* oputput a single digit. maskvalue is 10**digitidx */
1334 root 1.191 #define ecb_i2a_digit(type,bits,digitmask,maskvalue,digitidx) \
1335     if (digitmask >= maskvalue) /* constant, used to decide how many digits to generate */ \
1336     { \
1337     char digit = x >> (bits - digitidx); /* calculate the topmost digit */ \
1338     *ptr = digit + '0'; /* output it */ \
1339     nz = (digitmask == maskvalue) || nz || digit; /* first term == always output last digit */ \
1340     ptr += nz; /* output digit only if non-zero digit seen */ \
1341     x = (x & ecb_i2a_mask (type, bits - digitidx)) * 5; /* *10, but shift decimal point right */ \
1342     }
1343    
1344 root 1.200 /* convert integer to fixed point format and multiply out digits, highest first */
1345     /* requires magic constants: max. digits and number of bits after the decimal point */
1346 root 1.191 #define ecb_i2a_def(suffix,ptr,v,type,bits,digitmask,lz) \
1347     ecb_inline char *ecb_i2a_ ## suffix (char *ptr, uint32_t u) \
1348     { \
1349     char nz = lz; /* non-zero digit seen? */ \
1350     /* convert to x.bits fixed-point */ \
1351     type x = u * ((ecb_i2a_mask (type, bits) + digitmask) / digitmask); \
1352     /* output up to 10 digits */ \
1353     ecb_i2a_digit (type,bits,digitmask, 1, 0); \
1354     ecb_i2a_digit (type,bits,digitmask, 10, 1); \
1355     ecb_i2a_digit (type,bits,digitmask, 100, 2); \
1356     ecb_i2a_digit (type,bits,digitmask, 1000, 3); \
1357     ecb_i2a_digit (type,bits,digitmask, 10000, 4); \
1358     ecb_i2a_digit (type,bits,digitmask, 100000, 5); \
1359     ecb_i2a_digit (type,bits,digitmask, 1000000, 6); \
1360     ecb_i2a_digit (type,bits,digitmask, 10000000, 7); \
1361     ecb_i2a_digit (type,bits,digitmask, 100000000, 8); \
1362     ecb_i2a_digit (type,bits,digitmask, 1000000000, 9); \
1363     return ptr; \
1364     }
1365    
1366 root 1.200 /* predefined versions of the above, for various digits */
1367     /* ecb_i2a_xN = almost N digits, limit defined by macro */
1368     /* ecb_i2a_N = up to N digits, leading zeroes suppressed */
1369     /* ecb_i2a_0N = exactly N digits, including leading zeroes */
1370    
1371     /* non-leading-zero versions, limited range */
1372     #define ECB_I2A_MAX_X5 59074 /* limit for ecb_i2a_x5 */
1373     #define ECB_I2A_MAX_X10 2932500665 /* limit for ecb_i2a_x10 */
1374 root 1.191 ecb_i2a_def ( x5, ptr, v, uint32_t, 26, 10000, 0)
1375     ecb_i2a_def (x10, ptr, v, uint64_t, 60, 1000000000, 0)
1376    
1377 root 1.200 /* non-leading zero versions, all digits, 4 and 9 are optimal for 32/64 bit */
1378 root 1.194 ecb_i2a_def ( 2, ptr, v, uint32_t, 10, 10, 0)
1379     ecb_i2a_def ( 3, ptr, v, uint32_t, 12, 100, 0)
1380     ecb_i2a_def ( 4, ptr, v, uint32_t, 26, 1000, 0)
1381     ecb_i2a_def ( 5, ptr, v, uint64_t, 30, 10000, 0)
1382     ecb_i2a_def ( 6, ptr, v, uint64_t, 36, 100000, 0)
1383     ecb_i2a_def ( 7, ptr, v, uint64_t, 44, 1000000, 0)
1384     ecb_i2a_def ( 8, ptr, v, uint64_t, 50, 10000000, 0)
1385     ecb_i2a_def ( 9, ptr, v, uint64_t, 56, 100000000, 0)
1386 root 1.191
1387 root 1.200 /* leading-zero versions, all digits, 04 and 09 are optimal for 32/64 bit */
1388 root 1.194 ecb_i2a_def (02, ptr, v, uint32_t, 10, 10, 1)
1389     ecb_i2a_def (03, ptr, v, uint32_t, 12, 100, 1)
1390     ecb_i2a_def (04, ptr, v, uint32_t, 26, 1000, 1)
1391     ecb_i2a_def (05, ptr, v, uint64_t, 30, 10000, 1)
1392     ecb_i2a_def (06, ptr, v, uint64_t, 36, 100000, 1)
1393     ecb_i2a_def (07, ptr, v, uint64_t, 44, 1000000, 1)
1394     ecb_i2a_def (08, ptr, v, uint64_t, 50, 10000000, 1)
1395     ecb_i2a_def (09, ptr, v, uint64_t, 56, 100000000, 1)
1396 root 1.191
1397 root 1.192 #define ECB_I2A_I32_DIGITS 11
1398     #define ECB_I2A_U32_DIGITS 10
1399     #define ECB_I2A_I64_DIGITS 20
1400 root 1.194 #define ECB_I2A_U64_DIGITS 21
1401 root 1.193 #define ECB_I2A_MAX_DIGITS 21
1402 root 1.192
1403 root 1.191 ecb_inline char *
1404     ecb_i2a_u32 (char *ptr, uint32_t u)
1405     {
1406     #if ECB_64BIT_NATIVE
1407     if (ecb_expect_true (u <= ECB_I2A_MAX_X10))
1408     ptr = ecb_i2a_x10 (ptr, u);
1409 root 1.200 else /* x10 almost, but not fully, covers 32 bit */
1410 root 1.191 {
1411     uint32_t u1 = u % 1000000000;
1412     uint32_t u2 = u / 1000000000;
1413    
1414     *ptr++ = u2 + '0';
1415     ptr = ecb_i2a_09 (ptr, u1);
1416     }
1417     #else
1418     if (ecb_expect_true (u <= ECB_I2A_MAX_X5))
1419     ecb_i2a_x5 (ptr, u);
1420     else if (ecb_expect_true (u <= ECB_I2A_MAX_X5 * 10000))
1421     {
1422     uint32_t u1 = u % 10000;
1423     uint32_t u2 = u / 10000;
1424    
1425     ptr = ecb_i2a_x5 (ptr, u2);
1426     ptr = ecb_i2a_04 (ptr, u1);
1427     }
1428     else
1429     {
1430     uint32_t u1 = u % 10000;
1431     uint32_t ua = u / 10000;
1432     uint32_t u2 = ua % 10000;
1433     uint32_t u3 = ua / 10000;
1434    
1435     ptr = ecb_i2a_2 (ptr, u3);
1436     ptr = ecb_i2a_04 (ptr, u2);
1437     ptr = ecb_i2a_04 (ptr, u1);
1438     }
1439     #endif
1440    
1441     return ptr;
1442     }
1443    
1444     ecb_inline char *
1445     ecb_i2a_i32 (char *ptr, int32_t v)
1446     {
1447     *ptr = '-'; ptr += v < 0;
1448     uint32_t u = v < 0 ? -(uint32_t)v : v;
1449    
1450     #if ECB_64BIT_NATIVE
1451 root 1.200 ptr = ecb_i2a_x10 (ptr, u); /* x10 fully covers 31 bit */
1452 root 1.191 #else
1453     ptr = ecb_i2a_u32 (ptr, u);
1454     #endif
1455    
1456     return ptr;
1457     }
1458    
1459     ecb_inline char *
1460     ecb_i2a_u64 (char *ptr, uint64_t u)
1461     {
1462     #if ECB_64BIT_NATIVE
1463     if (ecb_expect_true (u <= ECB_I2A_MAX_X10))
1464     ptr = ecb_i2a_x10 (ptr, u);
1465     else if (ecb_expect_false (u <= ECB_I2A_MAX_X10 * 1000000000))
1466     {
1467     uint64_t u1 = u % 1000000000;
1468     uint64_t u2 = u / 1000000000;
1469    
1470     ptr = ecb_i2a_x10 (ptr, u2);
1471     ptr = ecb_i2a_09 (ptr, u1);
1472     }
1473     else
1474     {
1475     uint64_t u1 = u % 1000000000;
1476     uint64_t ua = u / 1000000000;
1477     uint64_t u2 = ua % 1000000000;
1478     uint64_t u3 = ua / 1000000000;
1479    
1480     ptr = ecb_i2a_2 (ptr, u3);
1481     ptr = ecb_i2a_09 (ptr, u2);
1482     ptr = ecb_i2a_09 (ptr, u1);
1483     }
1484     #else
1485     if (ecb_expect_true (u <= ECB_I2A_MAX_X5))
1486     ptr = ecb_i2a_x5 (ptr, u);
1487     else
1488     {
1489     uint64_t u1 = u % 10000;
1490     uint64_t u2 = u / 10000;
1491    
1492     ptr = ecb_i2a_u64 (ptr, u2);
1493     ptr = ecb_i2a_04 (ptr, u1);
1494     }
1495     #endif
1496    
1497     return ptr;
1498     }
1499    
1500     ecb_inline char *
1501     ecb_i2a_i64 (char *ptr, int64_t v)
1502     {
1503     *ptr = '-'; ptr += v < 0;
1504     uint64_t u = v < 0 ? -(uint64_t)v : v;
1505    
1506     #if ECB_64BIT_NATIVE
1507     if (ecb_expect_true (u <= ECB_I2A_MAX_X10))
1508     ptr = ecb_i2a_x10 (ptr, u);
1509     else if (ecb_expect_false (u <= ECB_I2A_MAX_X10 * 1000000000))
1510     {
1511     uint64_t u1 = u % 1000000000;
1512     uint64_t u2 = u / 1000000000;
1513    
1514     ptr = ecb_i2a_x10 (ptr, u2);
1515     ptr = ecb_i2a_09 (ptr, u1);
1516     }
1517     else
1518     {
1519     uint64_t u1 = u % 1000000000;
1520     uint64_t ua = u / 1000000000;
1521     uint64_t u2 = ua % 1000000000;
1522     uint64_t u3 = ua / 1000000000;
1523    
1524 root 1.200 /* 2**31 is 19 digits, so the top is exactly one digit */
1525 root 1.191 *ptr++ = u3 + '0';
1526     ptr = ecb_i2a_09 (ptr, u2);
1527     ptr = ecb_i2a_09 (ptr, u1);
1528     }
1529     #else
1530     ptr = ecb_i2a_u64 (ptr, u);
1531     #endif
1532    
1533     return ptr;
1534     }
1535    
1536     /*******************************************************************************/
1537 root 1.104 /* floating point stuff, can be disabled by defining ECB_NO_LIBM */
1538    
1539     /* basically, everything uses "ieee pure-endian" floating point numbers */
1540     /* the only noteworthy exception is ancient armle, which uses order 43218765 */
1541     #if 0 \
1542     || __i386 || __i386__ \
1543 sf-exg 1.159 || ECB_GCC_AMD64 \
1544 root 1.104 || __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \
1545     || defined __s390__ || defined __s390x__ \
1546     || defined __mips__ \
1547     || defined __alpha__ \
1548     || defined __hppa__ \
1549     || defined __ia64__ \
1550 root 1.117 || defined __m68k__ \
1551     || defined __m88k__ \
1552     || defined __sh__ \
1553 sf-exg 1.159 || defined _M_IX86 || defined ECB_MSVC_AMD64 || defined _M_IA64 \
1554 root 1.131 || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \
1555 root 1.132 || defined __aarch64__
1556 root 1.104 #define ECB_STDFP 1
1557 root 1.102 #else
1558 root 1.104 #define ECB_STDFP 0
1559 root 1.102 #endif
1560    
1561 root 1.104 #ifndef ECB_NO_LIBM
1562 root 1.103
1563 root 1.121 #include <math.h> /* for frexp*, ldexp*, INFINITY, NAN */
1564    
1565 root 1.122 /* only the oldest of old doesn't have this one. solaris. */
1566     #ifdef INFINITY
1567     #define ECB_INFINITY INFINITY
1568     #else
1569     #define ECB_INFINITY HUGE_VAL
1570     #endif
1571    
1572     #ifdef NAN
1573 root 1.121 #define ECB_NAN NAN
1574     #else
1575 root 1.122 #define ECB_NAN ECB_INFINITY
1576 root 1.121 #endif
1577 root 1.120
1578 root 1.148 #if ECB_C99 || _XOPEN_VERSION >= 600 || _POSIX_VERSION >= 200112L
1579 root 1.150 #define ecb_ldexpf(x,e) ldexpf ((x), (e))
1580 sf-exg 1.163 #define ecb_frexpf(x,e) frexpf ((x), (e))
1581 root 1.148 #else
1582 sf-exg 1.161 #define ecb_ldexpf(x,e) (float) ldexp ((double) (x), (e))
1583 sf-exg 1.163 #define ecb_frexpf(x,e) (float) frexp ((double) (x), (e))
1584 root 1.148 #endif
1585    
1586 root 1.104 /* convert a float to ieee single/binary32 */
1587 root 1.151 ecb_function_ ecb_const uint32_t ecb_float_to_binary32 (float x);
1588     ecb_function_ ecb_const uint32_t
1589 root 1.103 ecb_float_to_binary32 (float x)
1590     {
1591     uint32_t r;
1592    
1593     #if ECB_STDFP
1594 root 1.104 memcpy (&r, &x, 4);
1595 root 1.103 #else
1596 root 1.105 /* slow emulation, works for anything but -0 */
1597 root 1.103 uint32_t m;
1598     int e;
1599    
1600 root 1.108 if (x == 0e0f ) return 0x00000000U;
1601 root 1.103 if (x > +3.40282346638528860e+38f) return 0x7f800000U;
1602     if (x < -3.40282346638528860e+38f) return 0xff800000U;
1603 root 1.105 if (x != x ) return 0x7fbfffffU;
1604 root 1.103
1605 sf-exg 1.163 m = ecb_frexpf (x, &e) * 0x1000000U;
1606 root 1.103
1607     r = m & 0x80000000U;
1608    
1609     if (r)
1610     m = -m;
1611    
1612 root 1.108 if (e <= -126)
1613 root 1.103 {
1614     m &= 0xffffffU;
1615     m >>= (-125 - e);
1616     e = -126;
1617     }
1618    
1619     r |= (e + 126) << 23;
1620     r |= m & 0x7fffffU;
1621     #endif
1622    
1623     return r;
1624     }
1625    
1626 root 1.104 /* converts an ieee single/binary32 to a float */
1627 root 1.151 ecb_function_ ecb_const float ecb_binary32_to_float (uint32_t x);
1628     ecb_function_ ecb_const float
1629 root 1.103 ecb_binary32_to_float (uint32_t x)
1630     {
1631     float r;
1632    
1633     #if ECB_STDFP
1634 root 1.104 memcpy (&r, &x, 4);
1635 root 1.103 #else
1636     /* emulation, only works for normals and subnormals and +0 */
1637     int neg = x >> 31;
1638     int e = (x >> 23) & 0xffU;
1639    
1640     x &= 0x7fffffU;
1641    
1642     if (e)
1643     x |= 0x800000U;
1644 root 1.104 else
1645     e = 1;
1646 root 1.103
1647     /* we distrust ldexpf a bit and do the 2**-24 scaling by an extra multiply */
1648 root 1.148 r = ecb_ldexpf (x * (0.5f / 0x800000U), e - 126);
1649 root 1.103
1650     r = neg ? -r : r;
1651     #endif
1652    
1653     return r;
1654     }
1655    
1656 root 1.104 /* convert a double to ieee double/binary64 */
1657 root 1.151 ecb_function_ ecb_const uint64_t ecb_double_to_binary64 (double x);
1658     ecb_function_ ecb_const uint64_t
1659 root 1.103 ecb_double_to_binary64 (double x)
1660     {
1661 root 1.104 uint64_t r;
1662    
1663     #if ECB_STDFP
1664     memcpy (&r, &x, 8);
1665     #else
1666 root 1.105 /* slow emulation, works for anything but -0 */
1667 root 1.104 uint64_t m;
1668     int e;
1669    
1670 root 1.108 if (x == 0e0 ) return 0x0000000000000000U;
1671 root 1.104 if (x > +1.79769313486231470e+308) return 0x7ff0000000000000U;
1672     if (x < -1.79769313486231470e+308) return 0xfff0000000000000U;
1673 root 1.105 if (x != x ) return 0X7ff7ffffffffffffU;
1674 root 1.104
1675     m = frexp (x, &e) * 0x20000000000000U;
1676    
1677     r = m & 0x8000000000000000;;
1678    
1679     if (r)
1680     m = -m;
1681    
1682 root 1.108 if (e <= -1022)
1683 root 1.104 {
1684     m &= 0x1fffffffffffffU;
1685     m >>= (-1021 - e);
1686     e = -1022;
1687     }
1688    
1689     r |= ((uint64_t)(e + 1022)) << 52;
1690     r |= m & 0xfffffffffffffU;
1691     #endif
1692    
1693     return r;
1694     }
1695    
1696     /* converts an ieee double/binary64 to a double */
1697 root 1.151 ecb_function_ ecb_const double ecb_binary64_to_double (uint64_t x);
1698     ecb_function_ ecb_const double
1699 root 1.104 ecb_binary64_to_double (uint64_t x)
1700     {
1701     double r;
1702    
1703     #if ECB_STDFP
1704     memcpy (&r, &x, 8);
1705     #else
1706     /* emulation, only works for normals and subnormals and +0 */
1707     int neg = x >> 63;
1708     int e = (x >> 52) & 0x7ffU;
1709    
1710     x &= 0xfffffffffffffU;
1711    
1712     if (e)
1713     x |= 0x10000000000000U;
1714     else
1715     e = 1;
1716    
1717 root 1.107 /* we distrust ldexp a bit and do the 2**-53 scaling by an extra multiply */
1718 root 1.108 r = ldexp (x * (0.5 / 0x10000000000000U), e - 1022);
1719 root 1.104
1720     r = neg ? -r : r;
1721     #endif
1722    
1723     return r;
1724 root 1.103 }
1725    
1726 root 1.167 /* convert a float to ieee half/binary16 */
1727     ecb_function_ ecb_const uint16_t ecb_float_to_binary16 (float x);
1728     ecb_function_ ecb_const uint16_t
1729     ecb_float_to_binary16 (float x)
1730     {
1731     return ecb_binary32_to_binary16 (ecb_float_to_binary32 (x));
1732     }
1733    
1734     /* convert an ieee half/binary16 to float */
1735     ecb_function_ ecb_const float ecb_binary16_to_float (uint16_t x);
1736     ecb_function_ ecb_const float
1737     ecb_binary16_to_float (uint16_t x)
1738     {
1739     return ecb_binary32_to_float (ecb_binary16_to_binary32 (x));
1740     }
1741    
1742 root 1.103 #endif
1743 root 1.102
1744 root 1.1 #endif
1745