ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libecb/ecb.h
(Generate patch)

Comparing libecb/ecb.h (file contents):
Revision 1.157 by root, Fri Feb 20 17:17:26 2015 UTC vs.
Revision 1.204 by root, Fri Mar 25 08:44:14 2022 UTC

1/* 1/*
2 * libecb - http://software.schmorp.de/pkg/libecb 2 * libecb - http://software.schmorp.de/pkg/libecb
3 * 3 *
4 * Copyright (©) 2009-2015 Marc Alexander Lehmann <libecb@schmorp.de> 4 * Copyright (©) 2009-2015,2018-2021 Marc Alexander Lehmann <libecb@schmorp.de>
5 * Copyright (©) 2011 Emanuele Giaquinta 5 * Copyright (©) 2011 Emanuele Giaquinta
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without modifica- 8 * Redistribution and use in source and binary forms, with or without modifica-
9 * tion, are permitted provided that the following conditions are met: 9 * tion, are permitted provided that the following conditions are met:
40 40
41#ifndef ECB_H 41#ifndef ECB_H
42#define ECB_H 42#define ECB_H
43 43
44/* 16 bits major, 16 bits minor */ 44/* 16 bits major, 16 bits minor */
45#define ECB_VERSION 0x00010004 45#define ECB_VERSION 0x0001000c
46 46
47#ifdef _WIN32 47#include <string.h> /* for memcpy */
48
49#if defined (_WIN32) && !defined (__MINGW32__)
48 typedef signed char int8_t; 50 typedef signed char int8_t;
49 typedef unsigned char uint8_t; 51 typedef unsigned char uint8_t;
52 typedef signed char int_fast8_t;
53 typedef unsigned char uint_fast8_t;
50 typedef signed short int16_t; 54 typedef signed short int16_t;
51 typedef unsigned short uint16_t; 55 typedef unsigned short uint16_t;
56 typedef signed int int_fast16_t;
57 typedef unsigned int uint_fast16_t;
52 typedef signed int int32_t; 58 typedef signed int int32_t;
53 typedef unsigned int uint32_t; 59 typedef unsigned int uint32_t;
60 typedef signed int int_fast32_t;
61 typedef unsigned int uint_fast32_t;
54 #if __GNUC__ 62 #if __GNUC__
55 typedef signed long long int64_t; 63 typedef signed long long int64_t;
56 typedef unsigned long long uint64_t; 64 typedef unsigned long long uint64_t;
57 #else /* _MSC_VER || __BORLANDC__ */ 65 #else /* _MSC_VER || __BORLANDC__ */
58 typedef signed __int64 int64_t; 66 typedef signed __int64 int64_t;
59 typedef unsigned __int64 uint64_t; 67 typedef unsigned __int64 uint64_t;
60 #endif 68 #endif
69 typedef int64_t int_fast64_t;
70 typedef uint64_t uint_fast64_t;
61 #ifdef _WIN64 71 #ifdef _WIN64
62 #define ECB_PTRSIZE 8 72 #define ECB_PTRSIZE 8
63 typedef uint64_t uintptr_t; 73 typedef uint64_t uintptr_t;
64 typedef int64_t intptr_t; 74 typedef int64_t intptr_t;
65 #else 75 #else
67 typedef uint32_t uintptr_t; 77 typedef uint32_t uintptr_t;
68 typedef int32_t intptr_t; 78 typedef int32_t intptr_t;
69 #endif 79 #endif
70#else 80#else
71 #include <inttypes.h> 81 #include <inttypes.h>
72 #if UINTMAX_MAX > 0xffffffffU 82 #if (defined INTPTR_MAX ? INTPTR_MAX : ULONG_MAX) > 0xffffffffU
73 #define ECB_PTRSIZE 8 83 #define ECB_PTRSIZE 8
74 #else 84 #else
75 #define ECB_PTRSIZE 4 85 #define ECB_PTRSIZE 4
76 #endif 86 #endif
77#endif 87#endif
78 88
89#define ECB_GCC_AMD64 (__amd64 || __amd64__ || __x86_64 || __x86_64__)
90#define ECB_MSVC_AMD64 (_M_AMD64 || _M_X64)
91
92#ifndef ECB_OPTIMIZE_SIZE
93 #if __OPTIMIZE_SIZE__
94 #define ECB_OPTIMIZE_SIZE 1
95 #else
96 #define ECB_OPTIMIZE_SIZE 0
97 #endif
98#endif
99
79/* work around x32 idiocy by defining proper macros */ 100/* work around x32 idiocy by defining proper macros */
80#if __amd64 || __x86_64 || _M_AMD64 || _M_X64 101#if ECB_GCC_AMD64 || ECB_MSVC_AMD64
81 #if _ILP32 102 #if _ILP32
82 #define ECB_AMD64_X32 1 103 #define ECB_AMD64_X32 1
83 #else 104 #else
84 #define ECB_AMD64 1 105 #define ECB_AMD64 1
85 #endif 106 #endif
107#endif
108
109#if ECB_PTRSIZE >= 8 || ECB_AMD64_X32
110 #define ECB_64BIT_NATIVE 1
111#else
112 #define ECB_64BIT_NATIVE 0
86#endif 113#endif
87 114
88/* many compilers define _GNUC_ to some versions but then only implement 115/* many compilers define _GNUC_ to some versions but then only implement
89 * what their idiot authors think are the "more important" extensions, 116 * what their idiot authors think are the "more important" extensions,
90 * causing enormous grief in return for some better fake benchmark numbers. 117 * causing enormous grief in return for some better fake benchmark numbers.
112 #define ECB_CLANG_EXTENSION(x) 0 139 #define ECB_CLANG_EXTENSION(x) 0
113#endif 140#endif
114 141
115#define ECB_CPP (__cplusplus+0) 142#define ECB_CPP (__cplusplus+0)
116#define ECB_CPP11 (__cplusplus >= 201103L) 143#define ECB_CPP11 (__cplusplus >= 201103L)
144#define ECB_CPP14 (__cplusplus >= 201402L)
145#define ECB_CPP17 (__cplusplus >= 201703L)
117 146
118#if ECB_CPP 147#if ECB_CPP
119 #define ECB_C 0 148 #define ECB_C 0
120 #define ECB_STDC_VERSION 0 149 #define ECB_STDC_VERSION 0
121#else 150#else
123 #define ECB_STDC_VERSION __STDC_VERSION__ 152 #define ECB_STDC_VERSION __STDC_VERSION__
124#endif 153#endif
125 154
126#define ECB_C99 (ECB_STDC_VERSION >= 199901L) 155#define ECB_C99 (ECB_STDC_VERSION >= 199901L)
127#define ECB_C11 (ECB_STDC_VERSION >= 201112L) 156#define ECB_C11 (ECB_STDC_VERSION >= 201112L)
157#define ECB_C17 (ECB_STDC_VERSION >= 201710L)
128 158
129#if ECB_CPP 159#if ECB_CPP
130 #define ECB_EXTERN_C extern "C" 160 #define ECB_EXTERN_C extern "C"
131 #define ECB_EXTERN_C_BEG ECB_EXTERN_C { 161 #define ECB_EXTERN_C_BEG ECB_EXTERN_C {
132 #define ECB_EXTERN_C_END } 162 #define ECB_EXTERN_C_END }
147 177
148#if ECB_NO_SMP 178#if ECB_NO_SMP
149 #define ECB_MEMORY_FENCE do { } while (0) 179 #define ECB_MEMORY_FENCE do { } while (0)
150#endif 180#endif
151 181
182/* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/compiler_ref/compiler_builtins.html */
183#if __xlC__ && ECB_CPP
184 #include <builtins.h>
185#endif
186
187#if 1400 <= _MSC_VER
188 #include <intrin.h> /* fence functions _ReadBarrier, also bit search functions _BitScanReverse */
189#endif
190
152#ifndef ECB_MEMORY_FENCE 191#ifndef ECB_MEMORY_FENCE
153 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 192 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
193 #define ECB_MEMORY_FENCE_RELAXED __asm__ __volatile__ ("" : : : "memory")
154 #if __i386 || __i386__ 194 #if __i386 || __i386__
155 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") 195 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
156 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") 196 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
157 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") 197 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory")
158 #elif __amd64 || __amd64__ || __x86_64 || __x86_64__ 198 #elif ECB_GCC_AMD64
159 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") 199 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory")
160 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") 200 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
161 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") 201 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory")
162 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ 202 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__
163 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") 203 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory")
204 #elif defined __ARM_ARCH_2__ \
205 || defined __ARM_ARCH_3__ || defined __ARM_ARCH_3M__ \
206 || defined __ARM_ARCH_4__ || defined __ARM_ARCH_4T__ \
207 || defined __ARM_ARCH_5__ || defined __ARM_ARCH_5E__ \
208 || defined __ARM_ARCH_5T__ || defined __ARM_ARCH_5TE__ \
209 || defined __ARM_ARCH_5TEJ__
210 /* should not need any, unless running old code on newer cpu - arm doesn't support that */
164 #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \ 211 #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \
165 || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ 212 || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ \
213 || defined __ARM_ARCH_6T2__
166 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") 214 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory")
167 #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ 215 #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \
168 || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__ 216 || defined __ARM_ARCH_7R__ || defined __ARM_ARCH_7M__
169 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") 217 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory")
170 #elif __aarch64__ 218 #elif __aarch64__
171 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory") 219 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory")
172 #elif (__sparc || __sparc__) && !__sparcv8 220 #elif (__sparc || __sparc__) && !(__sparc_v8__ || defined __sparcv8)
173 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory") 221 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory")
174 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory") 222 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory")
175 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore") 223 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore")
176 #elif defined __s390__ || defined __s390x__ 224 #elif defined __s390__ || defined __s390x__
177 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory") 225 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory")
200 #if ECB_GCC_VERSION(4,7) 248 #if ECB_GCC_VERSION(4,7)
201 /* see comment below (stdatomic.h) about the C11 memory model. */ 249 /* see comment below (stdatomic.h) about the C11 memory model. */
202 #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST) 250 #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST)
203 #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE) 251 #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE)
204 #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE) 252 #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE)
253 #undef ECB_MEMORY_FENCE_RELAXED
254 #define ECB_MEMORY_FENCE_RELAXED __atomic_thread_fence (__ATOMIC_RELAXED)
205 255
206 #elif ECB_CLANG_EXTENSION(c_atomic) 256 #elif ECB_CLANG_EXTENSION(c_atomic)
207 /* see comment below (stdatomic.h) about the C11 memory model. */ 257 /* see comment below (stdatomic.h) about the C11 memory model. */
208 #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST) 258 #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST)
209 #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE) 259 #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE)
210 #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE) 260 #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE)
261 #undef ECB_MEMORY_FENCE_RELAXED
262 #define ECB_MEMORY_FENCE_RELAXED __c11_atomic_thread_fence (__ATOMIC_RELAXED)
211 263
212 #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ 264 #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__
213 #define ECB_MEMORY_FENCE __sync_synchronize () 265 #define ECB_MEMORY_FENCE __sync_synchronize ()
214 #elif _MSC_VER >= 1500 /* VC++ 2008 */ 266 #elif _MSC_VER >= 1500 /* VC++ 2008 */
215 /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */ 267 /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */
225 #elif defined _WIN32 277 #elif defined _WIN32
226 #include <WinNT.h> 278 #include <WinNT.h>
227 #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */ 279 #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */
228 #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 280 #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
229 #include <mbarrier.h> 281 #include <mbarrier.h>
230 #define ECB_MEMORY_FENCE __machine_rw_barrier () 282 #define ECB_MEMORY_FENCE __machine_rw_barrier ()
231 #define ECB_MEMORY_FENCE_ACQUIRE __machine_r_barrier () 283 #define ECB_MEMORY_FENCE_ACQUIRE __machine_acq_barrier ()
232 #define ECB_MEMORY_FENCE_RELEASE __machine_w_barrier () 284 #define ECB_MEMORY_FENCE_RELEASE __machine_rel_barrier ()
285 #define ECB_MEMORY_FENCE_RELAXED __compiler_barrier ()
233 #elif __xlC__ 286 #elif __xlC__
234 #define ECB_MEMORY_FENCE __sync () 287 #define ECB_MEMORY_FENCE __sync ()
235 #endif 288 #endif
236#endif 289#endif
237 290
238#ifndef ECB_MEMORY_FENCE 291#ifndef ECB_MEMORY_FENCE
239 #if ECB_C11 && !defined __STDC_NO_ATOMICS__ 292 #if ECB_C11 && !defined __STDC_NO_ATOMICS__
240 /* we assume that these memory fences work on all variables/all memory accesses, */ 293 /* we assume that these memory fences work on all variables/all memory accesses, */
241 /* not just C11 atomics and atomic accesses */ 294 /* not just C11 atomics and atomic accesses */
242 #include <stdatomic.h> 295 #include <stdatomic.h>
243 /* Unfortunately, neither gcc 4.7 nor clang 3.1 generate any instructions for */
244 /* any fence other than seq_cst, which isn't very efficient for us. */
245 /* Why that is, we don't know - either the C11 memory model is quite useless */
246 /* for most usages, or gcc and clang have a bug */
247 /* I *currently* lean towards the latter, and inefficiently implement */
248 /* all three of ecb's fences as a seq_cst fence */
249 /* Update, gcc-4.8 generates mfence for all c++ fences, but nothing */
250 /* for all __atomic_thread_fence's except seq_cst */
251 #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst) 296 #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst)
297 #define ECB_MEMORY_FENCE_ACQUIRE atomic_thread_fence (memory_order_acquire)
298 #define ECB_MEMORY_FENCE_RELEASE atomic_thread_fence (memory_order_release)
252 #endif 299 #endif
253#endif 300#endif
254 301
255#ifndef ECB_MEMORY_FENCE 302#ifndef ECB_MEMORY_FENCE
256 #if !ECB_AVOID_PTHREADS 303 #if !ECB_AVOID_PTHREADS
276 323
277#if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE 324#if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE
278 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE 325 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
279#endif 326#endif
280 327
328#if !defined ECB_MEMORY_FENCE_RELAXED && defined ECB_MEMORY_FENCE
329 #define ECB_MEMORY_FENCE_RELAXED ECB_MEMORY_FENCE /* very heavy-handed */
330#endif
331
281/*****************************************************************************/ 332/*****************************************************************************/
282 333
283#if ECB_CPP 334#if ECB_CPP
284 #define ecb_inline static inline 335 #define ecb_inline static inline
285#elif ECB_GCC_VERSION(2,5) 336#elif ECB_GCC_VERSION(2,5)
349 #define ecb_deprecated __declspec (deprecated) 400 #define ecb_deprecated __declspec (deprecated)
350#else 401#else
351 #define ecb_deprecated ecb_attribute ((__deprecated__)) 402 #define ecb_deprecated ecb_attribute ((__deprecated__))
352#endif 403#endif
353 404
354#if __MSC_VER >= 1500 405#if _MSC_VER >= 1500
355 #define ecb_deprecated_message(msg) __declspec (deprecated (msg)) 406 #define ecb_deprecated_message(msg) __declspec (deprecated (msg))
356#elif ECB_GCC_VERSION(4,5) 407#elif ECB_GCC_VERSION(4,5)
357 #define ecb_deprecated_message(msg) ecb_attribute ((__deprecated__ (msg)) 408 #define ecb_deprecated_message(msg) ecb_attribute ((__deprecated__ (msg))
358#else 409#else
359 #define ecb_deprecated_message(msg) ecb_deprecated 410 #define ecb_deprecated_message(msg) ecb_deprecated
368#define ecb_unused ecb_attribute ((__unused__)) 419#define ecb_unused ecb_attribute ((__unused__))
369#define ecb_const ecb_attribute ((__const__)) 420#define ecb_const ecb_attribute ((__const__))
370#define ecb_pure ecb_attribute ((__pure__)) 421#define ecb_pure ecb_attribute ((__pure__))
371 422
372#if ECB_C11 || __IBMC_NORETURN 423#if ECB_C11 || __IBMC_NORETURN
373 /* http://pic.dhe.ibm.com/infocenter/compbg/v121v141/topic/com.ibm.xlcpp121.bg.doc/language_ref/noreturn.html */ 424 /* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/language_ref/noreturn.html */
374 #define ecb_noreturn _Noreturn 425 #define ecb_noreturn _Noreturn
375#elif ECB_CPP11 426#elif ECB_CPP11
376 #define ecb_noreturn [[noreturn]] 427 #define ecb_noreturn [[noreturn]]
377#elif _MSC_VER >= 1200 428#elif _MSC_VER >= 1200
378 /* http://msdn.microsoft.com/en-us/library/k6ktzx3s.aspx */ 429 /* http://msdn.microsoft.com/en-us/library/k6ktzx3s.aspx */
415#else 466#else
416 ecb_function_ ecb_const int ecb_ctz32 (uint32_t x); 467 ecb_function_ ecb_const int ecb_ctz32 (uint32_t x);
417 ecb_function_ ecb_const int 468 ecb_function_ ecb_const int
418 ecb_ctz32 (uint32_t x) 469 ecb_ctz32 (uint32_t x)
419 { 470 {
471#if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM)
472 unsigned long r;
473 _BitScanForward (&r, x);
474 return (int)r;
475#else
420 int r = 0; 476 int r = 0;
421 477
422 x &= ~x + 1; /* this isolates the lowest bit */ 478 x &= ~x + 1; /* this isolates the lowest bit */
423 479
424#if ECB_branchless_on_i386 480#if ECB_branchless_on_i386
434 if (x & 0xff00ff00) r += 8; 490 if (x & 0xff00ff00) r += 8;
435 if (x & 0xffff0000) r += 16; 491 if (x & 0xffff0000) r += 16;
436#endif 492#endif
437 493
438 return r; 494 return r;
495#endif
439 } 496 }
440 497
441 ecb_function_ ecb_const int ecb_ctz64 (uint64_t x); 498 ecb_function_ ecb_const int ecb_ctz64 (uint64_t x);
442 ecb_function_ ecb_const int 499 ecb_function_ ecb_const int
443 ecb_ctz64 (uint64_t x) 500 ecb_ctz64 (uint64_t x)
444 { 501 {
502#if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM)
503 unsigned long r;
504 _BitScanForward64 (&r, x);
505 return (int)r;
506#else
445 int shift = x & 0xffffffffU ? 0 : 32; 507 int shift = x & 0xffffffff ? 0 : 32;
446 return ecb_ctz32 (x >> shift) + shift; 508 return ecb_ctz32 (x >> shift) + shift;
509#endif
447 } 510 }
448 511
449 ecb_function_ ecb_const int ecb_popcount32 (uint32_t x); 512 ecb_function_ ecb_const int ecb_popcount32 (uint32_t x);
450 ecb_function_ ecb_const int 513 ecb_function_ ecb_const int
451 ecb_popcount32 (uint32_t x) 514 ecb_popcount32 (uint32_t x)
459 } 522 }
460 523
461 ecb_function_ ecb_const int ecb_ld32 (uint32_t x); 524 ecb_function_ ecb_const int ecb_ld32 (uint32_t x);
462 ecb_function_ ecb_const int ecb_ld32 (uint32_t x) 525 ecb_function_ ecb_const int ecb_ld32 (uint32_t x)
463 { 526 {
527#if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM)
528 unsigned long r;
529 _BitScanReverse (&r, x);
530 return (int)r;
531#else
464 int r = 0; 532 int r = 0;
465 533
466 if (x >> 16) { x >>= 16; r += 16; } 534 if (x >> 16) { x >>= 16; r += 16; }
467 if (x >> 8) { x >>= 8; r += 8; } 535 if (x >> 8) { x >>= 8; r += 8; }
468 if (x >> 4) { x >>= 4; r += 4; } 536 if (x >> 4) { x >>= 4; r += 4; }
469 if (x >> 2) { x >>= 2; r += 2; } 537 if (x >> 2) { x >>= 2; r += 2; }
470 if (x >> 1) { r += 1; } 538 if (x >> 1) { r += 1; }
471 539
472 return r; 540 return r;
541#endif
473 } 542 }
474 543
475 ecb_function_ ecb_const int ecb_ld64 (uint64_t x); 544 ecb_function_ ecb_const int ecb_ld64 (uint64_t x);
476 ecb_function_ ecb_const int ecb_ld64 (uint64_t x) 545 ecb_function_ ecb_const int ecb_ld64 (uint64_t x)
477 { 546 {
547#if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM)
548 unsigned long r;
549 _BitScanReverse64 (&r, x);
550 return (int)r;
551#else
478 int r = 0; 552 int r = 0;
479 553
480 if (x >> 32) { x >>= 32; r += 32; } 554 if (x >> 32) { x >>= 32; r += 32; }
481 555
482 return r + ecb_ld32 (x); 556 return r + ecb_ld32 (x);
557#endif
483 } 558 }
484#endif 559#endif
485 560
486ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x); 561ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x);
487ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); } 562ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); }
534ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count); 609ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count);
535ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count); 610ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count);
536ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count); 611ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count);
537ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count); 612ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count);
538 613
539ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> ( 8 - count)) | (x << count); } 614ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> (-count & 7)) | (x << (count & 7)); }
540ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << ( 8 - count)) | (x >> count); } 615ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << (-count & 7)) | (x >> (count & 7)); }
541ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (16 - count)) | (x << count); } 616ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (-count & 15)) | (x << (count & 15)); }
542ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (16 - count)) | (x >> count); } 617ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (-count & 15)) | (x >> (count & 15)); }
543ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); } 618ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (-count & 31)) | (x << (count & 31)); }
544ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); } 619ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (-count & 31)) | (x >> (count & 31)); }
545ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); } 620ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (-count & 63)) | (x << (count & 63)); }
546ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); } 621ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (-count & 63)) | (x >> (count & 63)); }
622
623#if ECB_CPP
624
625inline uint8_t ecb_ctz (uint8_t v) { return ecb_ctz32 (v); }
626inline uint16_t ecb_ctz (uint16_t v) { return ecb_ctz32 (v); }
627inline uint32_t ecb_ctz (uint32_t v) { return ecb_ctz32 (v); }
628inline uint64_t ecb_ctz (uint64_t v) { return ecb_ctz64 (v); }
629
630inline bool ecb_is_pot (uint8_t v) { return ecb_is_pot32 (v); }
631inline bool ecb_is_pot (uint16_t v) { return ecb_is_pot32 (v); }
632inline bool ecb_is_pot (uint32_t v) { return ecb_is_pot32 (v); }
633inline bool ecb_is_pot (uint64_t v) { return ecb_is_pot64 (v); }
634
635inline int ecb_ld (uint8_t v) { return ecb_ld32 (v); }
636inline int ecb_ld (uint16_t v) { return ecb_ld32 (v); }
637inline int ecb_ld (uint32_t v) { return ecb_ld32 (v); }
638inline int ecb_ld (uint64_t v) { return ecb_ld64 (v); }
639
640inline int ecb_popcount (uint8_t v) { return ecb_popcount32 (v); }
641inline int ecb_popcount (uint16_t v) { return ecb_popcount32 (v); }
642inline int ecb_popcount (uint32_t v) { return ecb_popcount32 (v); }
643inline int ecb_popcount (uint64_t v) { return ecb_popcount64 (v); }
644
645inline uint8_t ecb_bitrev (uint8_t v) { return ecb_bitrev8 (v); }
646inline uint16_t ecb_bitrev (uint16_t v) { return ecb_bitrev16 (v); }
647inline uint32_t ecb_bitrev (uint32_t v) { return ecb_bitrev32 (v); }
648
649inline uint8_t ecb_rotl (uint8_t v, unsigned int count) { return ecb_rotl8 (v, count); }
650inline uint16_t ecb_rotl (uint16_t v, unsigned int count) { return ecb_rotl16 (v, count); }
651inline uint32_t ecb_rotl (uint32_t v, unsigned int count) { return ecb_rotl32 (v, count); }
652inline uint64_t ecb_rotl (uint64_t v, unsigned int count) { return ecb_rotl64 (v, count); }
653
654inline uint8_t ecb_rotr (uint8_t v, unsigned int count) { return ecb_rotr8 (v, count); }
655inline uint16_t ecb_rotr (uint16_t v, unsigned int count) { return ecb_rotr16 (v, count); }
656inline uint32_t ecb_rotr (uint32_t v, unsigned int count) { return ecb_rotr32 (v, count); }
657inline uint64_t ecb_rotr (uint64_t v, unsigned int count) { return ecb_rotr64 (v, count); }
658
659#endif
547 660
548#if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64)) 661#if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64))
662 #if ECB_GCC_VERSION(4,8) || ECB_CLANG_BUILTIN(__builtin_bswap16)
663 #define ecb_bswap16(x) __builtin_bswap16 (x)
664 #else
549 #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16) 665 #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16)
666 #endif
550 #define ecb_bswap32(x) __builtin_bswap32 (x) 667 #define ecb_bswap32(x) __builtin_bswap32 (x)
551 #define ecb_bswap64(x) __builtin_bswap64 (x) 668 #define ecb_bswap64(x) __builtin_bswap64 (x)
669#elif _MSC_VER
670 #include <stdlib.h>
671 #define ecb_bswap16(x) ((uint16_t)_byteswap_ushort ((uint16_t)(x)))
672 #define ecb_bswap32(x) ((uint32_t)_byteswap_ulong ((uint32_t)(x)))
673 #define ecb_bswap64(x) ((uint64_t)_byteswap_uint64 ((uint64_t)(x)))
552#else 674#else
553 ecb_function_ ecb_const uint16_t ecb_bswap16 (uint16_t x); 675 ecb_function_ ecb_const uint16_t ecb_bswap16 (uint16_t x);
554 ecb_function_ ecb_const uint16_t 676 ecb_function_ ecb_const uint16_t
555 ecb_bswap16 (uint16_t x) 677 ecb_bswap16 (uint16_t x)
556 { 678 {
581#endif 703#endif
582 704
583/* try to tell the compiler that some condition is definitely true */ 705/* try to tell the compiler that some condition is definitely true */
584#define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0 706#define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0
585 707
586ecb_inline ecb_const unsigned char ecb_byteorder_helper (void); 708ecb_inline ecb_const uint32_t ecb_byteorder_helper (void);
587ecb_inline ecb_const unsigned char 709ecb_inline ecb_const uint32_t
588ecb_byteorder_helper (void) 710ecb_byteorder_helper (void)
589{ 711{
590 /* the union code still generates code under pressure in gcc, */ 712 /* the union code still generates code under pressure in gcc, */
591 /* but less than using pointers, and always seems to */ 713 /* but less than using pointers, and always seems to */
592 /* successfully return a constant. */ 714 /* successfully return a constant. */
593 /* the reason why we have this horrible preprocessor mess */ 715 /* the reason why we have this horrible preprocessor mess */
594 /* is to avoid it in all cases, at least on common architectures */ 716 /* is to avoid it in all cases, at least on common architectures */
595 /* or when using a recent enough gcc version (>= 4.6) */ 717 /* or when using a recent enough gcc version (>= 4.6) */
596#if ((__i386 || __i386__) && !__VOS__) || _M_X86 || __amd64 || __amd64__ || _M_X64
597 return 0x44;
598#elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 718#if (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \
719 || ((__i386 || __i386__ || _M_IX86 || ECB_GCC_AMD64 || ECB_MSVC_AMD64) && !__VOS__)
720 #define ECB_LITTLE_ENDIAN 1
599 return 0x44; 721 return 0x44332211;
600#elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 722#elif (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) \
723 || ((__AARCH64EB__ || __MIPSEB__ || __ARMEB__) && !__VOS__)
724 #define ECB_BIG_ENDIAN 1
601 return 0x11; 725 return 0x11223344;
602#else 726#else
603 union 727 union
604 { 728 {
729 uint8_t c[4];
605 uint32_t i; 730 uint32_t u;
606 uint8_t c;
607 } u = { 0x11223344 }; 731 } u = { 0x11, 0x22, 0x33, 0x44 };
608 return u.c; 732 return u.u;
609#endif 733#endif
610} 734}
611 735
612ecb_inline ecb_const ecb_bool ecb_big_endian (void); 736ecb_inline ecb_const ecb_bool ecb_big_endian (void);
613ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; } 737ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11223344; }
614ecb_inline ecb_const ecb_bool ecb_little_endian (void); 738ecb_inline ecb_const ecb_bool ecb_little_endian (void);
615ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44; } 739ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44332211; }
740
741/*****************************************************************************/
742/* unaligned load/store */
743
744ecb_inline uint_fast16_t ecb_be_u16_to_host (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; }
745ecb_inline uint_fast32_t ecb_be_u32_to_host (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; }
746ecb_inline uint_fast64_t ecb_be_u64_to_host (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; }
747
748ecb_inline uint_fast16_t ecb_le_u16_to_host (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; }
749ecb_inline uint_fast32_t ecb_le_u32_to_host (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; }
750ecb_inline uint_fast64_t ecb_le_u64_to_host (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; }
751
752ecb_inline uint_fast16_t ecb_peek_u16_u (const void *ptr) { uint16_t v; memcpy (&v, ptr, sizeof (v)); return v; }
753ecb_inline uint_fast32_t ecb_peek_u32_u (const void *ptr) { uint32_t v; memcpy (&v, ptr, sizeof (v)); return v; }
754ecb_inline uint_fast64_t ecb_peek_u64_u (const void *ptr) { uint64_t v; memcpy (&v, ptr, sizeof (v)); return v; }
755
756ecb_inline uint_fast16_t ecb_peek_be_u16_u (const void *ptr) { return ecb_be_u16_to_host (ecb_peek_u16_u (ptr)); }
757ecb_inline uint_fast32_t ecb_peek_be_u32_u (const void *ptr) { return ecb_be_u32_to_host (ecb_peek_u32_u (ptr)); }
758ecb_inline uint_fast64_t ecb_peek_be_u64_u (const void *ptr) { return ecb_be_u64_to_host (ecb_peek_u64_u (ptr)); }
759
760ecb_inline uint_fast16_t ecb_peek_le_u16_u (const void *ptr) { return ecb_le_u16_to_host (ecb_peek_u16_u (ptr)); }
761ecb_inline uint_fast32_t ecb_peek_le_u32_u (const void *ptr) { return ecb_le_u32_to_host (ecb_peek_u32_u (ptr)); }
762ecb_inline uint_fast64_t ecb_peek_le_u64_u (const void *ptr) { return ecb_le_u64_to_host (ecb_peek_u64_u (ptr)); }
763
764ecb_inline uint_fast16_t ecb_host_to_be_u16 (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; }
765ecb_inline uint_fast32_t ecb_host_to_be_u32 (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; }
766ecb_inline uint_fast64_t ecb_host_to_be_u64 (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; }
767
768ecb_inline uint_fast16_t ecb_host_to_le_u16 (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; }
769ecb_inline uint_fast32_t ecb_host_to_le_u32 (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; }
770ecb_inline uint_fast64_t ecb_host_to_le_u64 (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; }
771
772ecb_inline void ecb_poke_u16_u (void *ptr, uint16_t v) { memcpy (ptr, &v, sizeof (v)); }
773ecb_inline void ecb_poke_u32_u (void *ptr, uint32_t v) { memcpy (ptr, &v, sizeof (v)); }
774ecb_inline void ecb_poke_u64_u (void *ptr, uint64_t v) { memcpy (ptr, &v, sizeof (v)); }
775
776ecb_inline void ecb_poke_be_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_be_u16 (v)); }
777ecb_inline void ecb_poke_be_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_be_u32 (v)); }
778ecb_inline void ecb_poke_be_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_be_u64 (v)); }
779
780ecb_inline void ecb_poke_le_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_le_u16 (v)); }
781ecb_inline void ecb_poke_le_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_le_u32 (v)); }
782ecb_inline void ecb_poke_le_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_le_u64 (v)); }
783
784#if ECB_CPP
785
786inline uint8_t ecb_bswap (uint8_t v) { return v; }
787inline uint16_t ecb_bswap (uint16_t v) { return ecb_bswap16 (v); }
788inline uint32_t ecb_bswap (uint32_t v) { return ecb_bswap32 (v); }
789inline uint64_t ecb_bswap (uint64_t v) { return ecb_bswap64 (v); }
790
791template<typename T> inline T ecb_be_to_host (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; }
792template<typename T> inline T ecb_le_to_host (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; }
793template<typename T> inline T ecb_peek (const void *ptr) { return *(const T *)ptr; }
794template<typename T> inline T ecb_peek_be (const void *ptr) { return ecb_be_to_host (ecb_peek <T> (ptr)); }
795template<typename T> inline T ecb_peek_le (const void *ptr) { return ecb_le_to_host (ecb_peek <T> (ptr)); }
796template<typename T> inline T ecb_peek_u (const void *ptr) { T v; memcpy (&v, ptr, sizeof (v)); return v; }
797template<typename T> inline T ecb_peek_be_u (const void *ptr) { return ecb_be_to_host (ecb_peek_u<T> (ptr)); }
798template<typename T> inline T ecb_peek_le_u (const void *ptr) { return ecb_le_to_host (ecb_peek_u<T> (ptr)); }
799
800template<typename T> inline T ecb_host_to_be (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; }
801template<typename T> inline T ecb_host_to_le (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; }
802template<typename T> inline void ecb_poke (void *ptr, T v) { *(T *)ptr = v; }
803template<typename T> inline void ecb_poke_be (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_be (v)); }
804template<typename T> inline void ecb_poke_le (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_le (v)); }
805template<typename T> inline void ecb_poke_u (void *ptr, T v) { memcpy (ptr, &v, sizeof (v)); }
806template<typename T> inline void ecb_poke_be_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_be (v)); }
807template<typename T> inline void ecb_poke_le_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_le (v)); }
808
809#endif
810
811/*****************************************************************************/
812/* pointer/integer hashing */
813
814/* based on hash by Chris Wellons, https://nullprogram.com/blog/2018/07/31/ */
815ecb_function_ uint32_t ecb_mix32 (uint32_t v);
816ecb_function_ uint32_t ecb_mix32 (uint32_t v)
817{
818 v ^= v >> 16; v *= 0x7feb352dU;
819 v ^= v >> 15; v *= 0x846ca68bU;
820 v ^= v >> 16;
821 return v;
822}
823
824ecb_function_ uint32_t ecb_unmix32 (uint32_t v);
825ecb_function_ uint32_t ecb_unmix32 (uint32_t v)
826{
827 v ^= v >> 16 ; v *= 0x43021123U;
828 v ^= v >> 15 ^ v >> 30; v *= 0x1d69e2a5U;
829 v ^= v >> 16 ;
830 return v;
831}
832
833/* based on splitmix64, by Sebastiona Vigna, https://prng.di.unimi.it/splitmix64.c */
834ecb_function_ uint64_t ecb_mix64 (uint64_t v);
835ecb_function_ uint64_t ecb_mix64 (uint64_t v)
836{
837 v ^= v >> 30; v *= 0xbf58476d1ce4e5b9U;
838 v ^= v >> 27; v *= 0x94d049bb133111ebU;
839 v ^= v >> 31;
840 return v;
841}
842
843ecb_function_ uint64_t ecb_unmix64 (uint64_t v);
844ecb_function_ uint64_t ecb_unmix64 (uint64_t v)
845{
846 v ^= v >> 31 ^ v >> 62; v *= 0x319642b2d24d8ec3U;
847 v ^= v >> 27 ^ v >> 54; v *= 0x96de1b173f119089U;
848 v ^= v >> 30 ^ v >> 60;
849 return v;
850}
851
852ecb_function_ uintptr_t ecb_ptrmix (void *p);
853ecb_function_ uintptr_t ecb_ptrmix (void *p)
854{
855 #if ECB_PTRSIZE <= 4
856 return ecb_mix32 ((uint32_t)p);
857 #else
858 return ecb_mix64 ((uint64_t)p);
859 #endif
860}
861
862ecb_function_ void *ecb_ptrunmix (uintptr_t v);
863ecb_function_ void *ecb_ptrunmix (uintptr_t v)
864{
865 #if ECB_PTRSIZE <= 4
866 return (void *)ecb_unmix32 (v);
867 #else
868 return (void *)ecb_unmix64 (v);
869 #endif
870}
871
872#if ECB_CPP
873
874template<typename T>
875inline uintptr_t ecb_ptrmix (T *p)
876{
877 return ecb_ptrmix (static_cast<void *>(p));
878}
879
880template<typename T>
881inline T *ecb_ptrunmix (uintptr_t v)
882{
883 return static_cast<T *>(ecb_ptrunmix (v));
884}
885
886#endif
887
888/*****************************************************************************/
889/* gray code */
890
891ecb_function_ uint_fast8_t ecb_gray8_encode (uint_fast8_t b) { return b ^ (b >> 1); }
892ecb_function_ uint_fast16_t ecb_gray16_encode (uint_fast16_t b) { return b ^ (b >> 1); }
893ecb_function_ uint_fast32_t ecb_gray32_encode (uint_fast32_t b) { return b ^ (b >> 1); }
894ecb_function_ uint_fast64_t ecb_gray64_encode (uint_fast64_t b) { return b ^ (b >> 1); }
895
896ecb_function_ uint8_t ecb_gray8_decode (uint8_t g)
897{
898 g ^= g >> 1;
899 g ^= g >> 2;
900 g ^= g >> 4;
901
902 return g;
903}
904
905ecb_function_ uint16_t ecb_gray16_decode (uint16_t g)
906{
907 g ^= g >> 1;
908 g ^= g >> 2;
909 g ^= g >> 4;
910 g ^= g >> 8;
911
912 return g;
913}
914
915ecb_function_ uint32_t ecb_gray32_decode (uint32_t g)
916{
917 g ^= g >> 1;
918 g ^= g >> 2;
919 g ^= g >> 4;
920 g ^= g >> 8;
921 g ^= g >> 16;
922
923 return g;
924}
925
926ecb_function_ uint64_t ecb_gray64_decode (uint64_t g)
927{
928 g ^= g >> 1;
929 g ^= g >> 2;
930 g ^= g >> 4;
931 g ^= g >> 8;
932 g ^= g >> 16;
933 g ^= g >> 32;
934
935 return g;
936}
937
938#if ECB_CPP
939
940ecb_function_ uint8_t ecb_gray_encode (uint8_t b) { return ecb_gray8_encode (b); }
941ecb_function_ uint16_t ecb_gray_encode (uint16_t b) { return ecb_gray16_encode (b); }
942ecb_function_ uint32_t ecb_gray_encode (uint32_t b) { return ecb_gray32_encode (b); }
943ecb_function_ uint64_t ecb_gray_encode (uint64_t b) { return ecb_gray64_encode (b); }
944
945ecb_function_ uint8_t ecb_gray_decode (uint8_t g) { return ecb_gray8_decode (g); }
946ecb_function_ uint16_t ecb_gray_decode (uint16_t g) { return ecb_gray16_decode (g); }
947ecb_function_ uint32_t ecb_gray_decode (uint32_t g) { return ecb_gray32_decode (g); }
948ecb_function_ uint64_t ecb_gray_decode (uint64_t g) { return ecb_gray64_decode (g); }
949
950#endif
951
952/*****************************************************************************/
953/* 2d hilbert curves */
954
955/* algorithm from the book Hacker's Delight, modified to not */
956/* run into undefined behaviour for n==16 */
957static uint32_t
958ecb_hilbert2d_index_to_coord32 (int n, uint32_t s)
959{
960 uint32_t comp, swap, cs, t, sr;
961
962 /* pad s on the left (unused) bits with 01 (no change groups) */
963 s |= 0x55555555U << n << n;
964 /* "s shift right" */
965 sr = (s >> 1) & 0x55555555U;
966 /* compute complement and swap info in two-bit groups */
967 cs = ((s & 0x55555555U) + sr) ^ 0x55555555U;
968
969 /* parallel prefix xor op to propagate both complement
970 * and swap info together from left to right (there is
971 * no step "cs ^= cs >> 1", so in effect it computes
972 * two independent parallel prefix operations on two
973 * interleaved sets of sixteen bits).
974 */
975 cs ^= cs >> 2;
976 cs ^= cs >> 4;
977 cs ^= cs >> 8;
978 cs ^= cs >> 16;
979
980 /* separate swap and complement bits */
981 swap = cs & 0x55555555U;
982 comp = (cs >> 1) & 0x55555555U;
983
984 /* calculate coordinates in odd and even bit positions */
985 t = (s & swap) ^ comp;
986 s = s ^ sr ^ t ^ (t << 1);
987
988 /* unpad/clear out any junk on the left */
989 s = s & ((1 << n << n) - 1);
990
991 /* Now "unshuffle" to separate the x and y bits. */
992 t = (s ^ (s >> 1)) & 0x22222222U; s ^= t ^ (t << 1);
993 t = (s ^ (s >> 2)) & 0x0c0c0c0cU; s ^= t ^ (t << 2);
994 t = (s ^ (s >> 4)) & 0x00f000f0U; s ^= t ^ (t << 4);
995 t = (s ^ (s >> 8)) & 0x0000ff00U; s ^= t ^ (t << 8);
996
997 /* now s contains two 16-bit coordinates */
998 return s;
999}
1000
1001/* 64 bit, a straightforward extension to the 32 bit case */
1002static uint64_t
1003ecb_hilbert2d_index_to_coord64 (int n, uint64_t s)
1004{
1005 uint64_t comp, swap, cs, t, sr;
1006
1007 /* pad s on the left (unused) bits with 01 (no change groups) */
1008 s |= 0x5555555555555555U << n << n;
1009 /* "s shift right" */
1010 sr = (s >> 1) & 0x5555555555555555U;
1011 /* compute complement and swap info in two-bit groups */
1012 cs = ((s & 0x5555555555555555U) + sr) ^ 0x5555555555555555U;
1013
1014 /* parallel prefix xor op to propagate both complement
1015 * and swap info together from left to right (there is
1016 * no step "cs ^= cs >> 1", so in effect it computes
1017 * two independent parallel prefix operations on two
1018 * interleaved sets of thirty-two bits).
1019 */
1020 cs ^= cs >> 2;
1021 cs ^= cs >> 4;
1022 cs ^= cs >> 8;
1023 cs ^= cs >> 16;
1024 cs ^= cs >> 32;
1025
1026 /* separate swap and complement bits */
1027 swap = cs & 0x5555555555555555U;
1028 comp = (cs >> 1) & 0x5555555555555555U;
1029
1030 /* calculate coordinates in odd and even bit positions */
1031 t = (s & swap) ^ comp;
1032 s = s ^ sr ^ t ^ (t << 1);
1033
1034 /* unpad/clear out any junk on the left */
1035 s = s & ((1 << n << n) - 1);
1036
1037 /* Now "unshuffle" to separate the x and y bits. */
1038 t = (s ^ (s >> 1)) & 0x2222222222222222U; s ^= t ^ (t << 1);
1039 t = (s ^ (s >> 2)) & 0x0c0c0c0c0c0c0c0cU; s ^= t ^ (t << 2);
1040 t = (s ^ (s >> 4)) & 0x00f000f000f000f0U; s ^= t ^ (t << 4);
1041 t = (s ^ (s >> 8)) & 0x0000ff000000ff00U; s ^= t ^ (t << 8);
1042 t = (s ^ (s >> 16)) & 0x00000000ffff0000U; s ^= t ^ (t << 16);
1043
1044 /* now s contains two 32-bit coordinates */
1045 return s;
1046}
1047
1048/* algorithm from the book Hacker's Delight, but a similar algorithm*/
1049/* is given in https://doi.org/10.1002/spe.4380160103 */
1050/* this has been slightly improved over the original version */
1051ecb_function_ uint32_t
1052ecb_hilbert2d_coord_to_index32 (int n, uint32_t xy)
1053{
1054 uint32_t row;
1055 uint32_t state = 0;
1056 uint32_t s = 0;
1057
1058 do
1059 {
1060 --n;
1061
1062 row = 4 * state
1063 | (2 & (xy >> n >> 15))
1064 | (1 & (xy >> n ));
1065
1066 /* these funky constants are lookup tables for two-bit values */
1067 s = (s << 2) | (0x361e9cb4U >> 2 * row) & 3;
1068 state = (0x8fe65831U >> 2 * row) & 3;
1069 }
1070 while (n > 0);
1071
1072 return s;
1073}
1074
1075/* 64 bit, essentially the same as 32 bit */
1076ecb_function_ uint64_t
1077ecb_hilbert2d_coord_to_index64 (int n, uint64_t xy)
1078{
1079 uint32_t row;
1080 uint32_t state = 0;
1081 uint64_t s = 0;
1082
1083 do
1084 {
1085 --n;
1086
1087 row = 4 * state
1088 | (2 & (xy >> n >> 31))
1089 | (1 & (xy >> n ));
1090
1091 /* these funky constants are lookup tables for two-bit values */
1092 s = (s << 2) | (0x361e9cb4U >> 2 * row) & 3;
1093 state = (0x8fe65831U >> 2 * row) & 3;
1094 }
1095 while (n > 0);
1096
1097 return s;
1098}
1099
1100/*****************************************************************************/
1101/* division */
616 1102
617#if ECB_GCC_VERSION(3,0) || ECB_C99 1103#if ECB_GCC_VERSION(3,0) || ECB_C99
1104 /* C99 tightened the definition of %, so we can use a more efficient version */
618 #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0)) 1105 #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0))
619#else 1106#else
620 #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n))) 1107 #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n)))
621#endif 1108#endif
622 1109
634#else 1121#else
635 #define ecb_div_rd(val,div) ((val) < 0 ? - ((-(val) + (div) - 1) / (div)) : ((val) ) / (div)) 1122 #define ecb_div_rd(val,div) ((val) < 0 ? - ((-(val) + (div) - 1) / (div)) : ((val) ) / (div))
636 #define ecb_div_ru(val,div) ((val) < 0 ? - ((-(val) ) / (div)) : ((val) + (div) - 1) / (div)) 1123 #define ecb_div_ru(val,div) ((val) < 0 ? - ((-(val) ) / (div)) : ((val) + (div) - 1) / (div))
637#endif 1124#endif
638 1125
1126/*****************************************************************************/
1127/* array length */
1128
639#if ecb_cplusplus_does_not_suck 1129#if ecb_cplusplus_does_not_suck
640 /* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */ 1130 /* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */
641 template<typename T, int N> 1131 template<typename T, int N>
642 static inline int ecb_array_length (const T (&arr)[N]) 1132 static inline int ecb_array_length (const T (&arr)[N])
643 { 1133 {
645 } 1135 }
646#else 1136#else
647 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) 1137 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0]))
648#endif 1138#endif
649 1139
1140/*****************************************************************************/
1141/* IEEE 754-2008 half float conversions */
1142
1143ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x);
1144ecb_function_ ecb_const uint32_t
1145ecb_binary16_to_binary32 (uint32_t x)
1146{
1147 unsigned int s = (x & 0x8000) << (31 - 15);
1148 int e = (x >> 10) & 0x001f;
1149 unsigned int m = x & 0x03ff;
1150
1151 if (ecb_expect_false (e == 31))
1152 /* infinity or NaN */
1153 e = 255 - (127 - 15);
1154 else if (ecb_expect_false (!e))
1155 {
1156 if (ecb_expect_true (!m))
1157 /* zero, handled by code below by forcing e to 0 */
1158 e = 0 - (127 - 15);
1159 else
1160 {
1161 /* subnormal, renormalise */
1162 unsigned int s = 10 - ecb_ld32 (m);
1163
1164 m = (m << s) & 0x3ff; /* mask implicit bit */
1165 e -= s - 1;
1166 }
1167 }
1168
1169 /* e and m now are normalised, or zero, (or inf or nan) */
1170 e += 127 - 15;
1171
1172 return s | (e << 23) | (m << (23 - 10));
1173}
1174
1175ecb_function_ ecb_const uint16_t ecb_binary32_to_binary16 (uint32_t x);
1176ecb_function_ ecb_const uint16_t
1177ecb_binary32_to_binary16 (uint32_t x)
1178{
1179 unsigned int s = (x >> 16) & 0x00008000; /* sign bit, the easy part */
1180 int e = ((x >> 23) & 0x000000ff) - (127 - 15); /* the desired exponent */
1181 unsigned int m = x & 0x007fffff;
1182
1183 x &= 0x7fffffff;
1184
1185 /* if it's within range of binary16 normals, use fast path */
1186 if (ecb_expect_true (0x38800000 <= x && x <= 0x477fefff))
1187 {
1188 /* mantissa round-to-even */
1189 m += 0x00000fff + ((m >> (23 - 10)) & 1);
1190
1191 /* handle overflow */
1192 if (ecb_expect_false (m >= 0x00800000))
1193 {
1194 m >>= 1;
1195 e += 1;
1196 }
1197
1198 return s | (e << 10) | (m >> (23 - 10));
1199 }
1200
1201 /* handle large numbers and infinity */
1202 if (ecb_expect_true (0x477fefff < x && x <= 0x7f800000))
1203 return s | 0x7c00;
1204
1205 /* handle zero, subnormals and small numbers */
1206 if (ecb_expect_true (x < 0x38800000))
1207 {
1208 /* zero */
1209 if (ecb_expect_true (!x))
1210 return s;
1211
1212 /* handle subnormals */
1213
1214 /* too small, will be zero */
1215 if (e < (14 - 24)) /* might not be sharp, but is good enough */
1216 return s;
1217
1218 m |= 0x00800000; /* make implicit bit explicit */
1219
1220 /* very tricky - we need to round to the nearest e (+10) bit value */
1221 {
1222 unsigned int bits = 14 - e;
1223 unsigned int half = (1 << (bits - 1)) - 1;
1224 unsigned int even = (m >> bits) & 1;
1225
1226 /* if this overflows, we will end up with a normalised number */
1227 m = (m + half + even) >> bits;
1228 }
1229
1230 return s | m;
1231 }
1232
1233 /* handle NaNs, preserve leftmost nan bits, but make sure we don't turn them into infinities */
1234 m >>= 13;
1235
1236 return s | 0x7c00 | m | !m;
1237}
1238
1239/*******************************************************************************/
1240/* fast integer to ascii */
1241
1242/*
1243 * This code is pretty complicated because it is general. The idea behind it,
1244 * however, is pretty simple: first, the number is multiplied with a scaling
1245 * factor (2**bits / 10**(digits-1)) to convert the integer into a fixed-point
1246 * number with the first digit in the upper bits.
1247 * Then this digit is converted to text and masked out. The resulting number
1248 * is then multiplied by 10, by multiplying the fixed point representation
1249 * by 5 and shifting the (binary) decimal point one to the right, so a 4.28
1250 * format becomes 5.27, 6.26 and so on.
1251 * The rest involves only advancing the pointer if we already generated a
1252 * non-zero digit, so leading zeroes are overwritten.
1253 */
1254
1255/* simply return a mask with "bits" bits set */
1256#define ecb_i2a_mask(type,bits) ((((type)1) << (bits)) - 1)
1257
1258/* oputput a single digit. maskvalue is 10**digitidx */
1259#define ecb_i2a_digit(type,bits,digitmask,maskvalue,digitidx) \
1260 if (digitmask >= maskvalue) /* constant, used to decide how many digits to generate */ \
1261 { \
1262 char digit = x >> (bits - digitidx); /* calculate the topmost digit */ \
1263 *ptr = digit + '0'; /* output it */ \
1264 nz = (digitmask == maskvalue) || nz || digit; /* first term == always output last digit */ \
1265 ptr += nz; /* output digit only if non-zero digit seen */ \
1266 x = (x & ecb_i2a_mask (type, bits - digitidx)) * 5; /* *10, but shift decimal point right */ \
1267 }
1268
1269/* convert integer to fixed point format and multiply out digits, highest first */
1270/* requires magic constants: max. digits and number of bits after the decimal point */
1271#define ecb_i2a_def(suffix,ptr,v,type,bits,digitmask,lz) \
1272ecb_inline char *ecb_i2a_ ## suffix (char *ptr, uint32_t u) \
1273{ \
1274 char nz = lz; /* non-zero digit seen? */ \
1275 /* convert to x.bits fixed-point */ \
1276 type x = u * ((ecb_i2a_mask (type, bits) + digitmask) / digitmask); \
1277 /* output up to 10 digits */ \
1278 ecb_i2a_digit (type,bits,digitmask, 1, 0); \
1279 ecb_i2a_digit (type,bits,digitmask, 10, 1); \
1280 ecb_i2a_digit (type,bits,digitmask, 100, 2); \
1281 ecb_i2a_digit (type,bits,digitmask, 1000, 3); \
1282 ecb_i2a_digit (type,bits,digitmask, 10000, 4); \
1283 ecb_i2a_digit (type,bits,digitmask, 100000, 5); \
1284 ecb_i2a_digit (type,bits,digitmask, 1000000, 6); \
1285 ecb_i2a_digit (type,bits,digitmask, 10000000, 7); \
1286 ecb_i2a_digit (type,bits,digitmask, 100000000, 8); \
1287 ecb_i2a_digit (type,bits,digitmask, 1000000000, 9); \
1288 return ptr; \
1289}
1290
1291/* predefined versions of the above, for various digits */
1292/* ecb_i2a_xN = almost N digits, limit defined by macro */
1293/* ecb_i2a_N = up to N digits, leading zeroes suppressed */
1294/* ecb_i2a_0N = exactly N digits, including leading zeroes */
1295
1296/* non-leading-zero versions, limited range */
1297#define ECB_I2A_MAX_X5 59074 /* limit for ecb_i2a_x5 */
1298#define ECB_I2A_MAX_X10 2932500665 /* limit for ecb_i2a_x10 */
1299ecb_i2a_def ( x5, ptr, v, uint32_t, 26, 10000, 0)
1300ecb_i2a_def (x10, ptr, v, uint64_t, 60, 1000000000, 0)
1301
1302/* non-leading zero versions, all digits, 4 and 9 are optimal for 32/64 bit */
1303ecb_i2a_def ( 2, ptr, v, uint32_t, 10, 10, 0)
1304ecb_i2a_def ( 3, ptr, v, uint32_t, 12, 100, 0)
1305ecb_i2a_def ( 4, ptr, v, uint32_t, 26, 1000, 0)
1306ecb_i2a_def ( 5, ptr, v, uint64_t, 30, 10000, 0)
1307ecb_i2a_def ( 6, ptr, v, uint64_t, 36, 100000, 0)
1308ecb_i2a_def ( 7, ptr, v, uint64_t, 44, 1000000, 0)
1309ecb_i2a_def ( 8, ptr, v, uint64_t, 50, 10000000, 0)
1310ecb_i2a_def ( 9, ptr, v, uint64_t, 56, 100000000, 0)
1311
1312/* leading-zero versions, all digits, 04 and 09 are optimal for 32/64 bit */
1313ecb_i2a_def (02, ptr, v, uint32_t, 10, 10, 1)
1314ecb_i2a_def (03, ptr, v, uint32_t, 12, 100, 1)
1315ecb_i2a_def (04, ptr, v, uint32_t, 26, 1000, 1)
1316ecb_i2a_def (05, ptr, v, uint64_t, 30, 10000, 1)
1317ecb_i2a_def (06, ptr, v, uint64_t, 36, 100000, 1)
1318ecb_i2a_def (07, ptr, v, uint64_t, 44, 1000000, 1)
1319ecb_i2a_def (08, ptr, v, uint64_t, 50, 10000000, 1)
1320ecb_i2a_def (09, ptr, v, uint64_t, 56, 100000000, 1)
1321
1322#define ECB_I2A_I32_DIGITS 11
1323#define ECB_I2A_U32_DIGITS 10
1324#define ECB_I2A_I64_DIGITS 20
1325#define ECB_I2A_U64_DIGITS 21
1326#define ECB_I2A_MAX_DIGITS 21
1327
1328ecb_inline char *
1329ecb_i2a_u32 (char *ptr, uint32_t u)
1330{
1331 #if ECB_64BIT_NATIVE
1332 if (ecb_expect_true (u <= ECB_I2A_MAX_X10))
1333 ptr = ecb_i2a_x10 (ptr, u);
1334 else /* x10 almost, but not fully, covers 32 bit */
1335 {
1336 uint32_t u1 = u % 1000000000;
1337 uint32_t u2 = u / 1000000000;
1338
1339 *ptr++ = u2 + '0';
1340 ptr = ecb_i2a_09 (ptr, u1);
1341 }
1342 #else
1343 if (ecb_expect_true (u <= ECB_I2A_MAX_X5))
1344 ecb_i2a_x5 (ptr, u);
1345 else if (ecb_expect_true (u <= ECB_I2A_MAX_X5 * 10000))
1346 {
1347 uint32_t u1 = u % 10000;
1348 uint32_t u2 = u / 10000;
1349
1350 ptr = ecb_i2a_x5 (ptr, u2);
1351 ptr = ecb_i2a_04 (ptr, u1);
1352 }
1353 else
1354 {
1355 uint32_t u1 = u % 10000;
1356 uint32_t ua = u / 10000;
1357 uint32_t u2 = ua % 10000;
1358 uint32_t u3 = ua / 10000;
1359
1360 ptr = ecb_i2a_2 (ptr, u3);
1361 ptr = ecb_i2a_04 (ptr, u2);
1362 ptr = ecb_i2a_04 (ptr, u1);
1363 }
1364 #endif
1365
1366 return ptr;
1367}
1368
1369ecb_inline char *
1370ecb_i2a_i32 (char *ptr, int32_t v)
1371{
1372 *ptr = '-'; ptr += v < 0;
1373 uint32_t u = v < 0 ? -(uint32_t)v : v;
1374
1375 #if ECB_64BIT_NATIVE
1376 ptr = ecb_i2a_x10 (ptr, u); /* x10 fully covers 31 bit */
1377 #else
1378 ptr = ecb_i2a_u32 (ptr, u);
1379 #endif
1380
1381 return ptr;
1382}
1383
1384ecb_inline char *
1385ecb_i2a_u64 (char *ptr, uint64_t u)
1386{
1387 #if ECB_64BIT_NATIVE
1388 if (ecb_expect_true (u <= ECB_I2A_MAX_X10))
1389 ptr = ecb_i2a_x10 (ptr, u);
1390 else if (ecb_expect_false (u <= ECB_I2A_MAX_X10 * 1000000000))
1391 {
1392 uint64_t u1 = u % 1000000000;
1393 uint64_t u2 = u / 1000000000;
1394
1395 ptr = ecb_i2a_x10 (ptr, u2);
1396 ptr = ecb_i2a_09 (ptr, u1);
1397 }
1398 else
1399 {
1400 uint64_t u1 = u % 1000000000;
1401 uint64_t ua = u / 1000000000;
1402 uint64_t u2 = ua % 1000000000;
1403 uint64_t u3 = ua / 1000000000;
1404
1405 ptr = ecb_i2a_2 (ptr, u3);
1406 ptr = ecb_i2a_09 (ptr, u2);
1407 ptr = ecb_i2a_09 (ptr, u1);
1408 }
1409 #else
1410 if (ecb_expect_true (u <= ECB_I2A_MAX_X5))
1411 ptr = ecb_i2a_x5 (ptr, u);
1412 else
1413 {
1414 uint64_t u1 = u % 10000;
1415 uint64_t u2 = u / 10000;
1416
1417 ptr = ecb_i2a_u64 (ptr, u2);
1418 ptr = ecb_i2a_04 (ptr, u1);
1419 }
1420 #endif
1421
1422 return ptr;
1423}
1424
1425ecb_inline char *
1426ecb_i2a_i64 (char *ptr, int64_t v)
1427{
1428 *ptr = '-'; ptr += v < 0;
1429 uint64_t u = v < 0 ? -(uint64_t)v : v;
1430
1431 #if ECB_64BIT_NATIVE
1432 if (ecb_expect_true (u <= ECB_I2A_MAX_X10))
1433 ptr = ecb_i2a_x10 (ptr, u);
1434 else if (ecb_expect_false (u <= ECB_I2A_MAX_X10 * 1000000000))
1435 {
1436 uint64_t u1 = u % 1000000000;
1437 uint64_t u2 = u / 1000000000;
1438
1439 ptr = ecb_i2a_x10 (ptr, u2);
1440 ptr = ecb_i2a_09 (ptr, u1);
1441 }
1442 else
1443 {
1444 uint64_t u1 = u % 1000000000;
1445 uint64_t ua = u / 1000000000;
1446 uint64_t u2 = ua % 1000000000;
1447 uint64_t u3 = ua / 1000000000;
1448
1449 /* 2**31 is 19 digits, so the top is exactly one digit */
1450 *ptr++ = u3 + '0';
1451 ptr = ecb_i2a_09 (ptr, u2);
1452 ptr = ecb_i2a_09 (ptr, u1);
1453 }
1454 #else
1455 ptr = ecb_i2a_u64 (ptr, u);
1456 #endif
1457
1458 return ptr;
1459}
1460
650/*******************************************************************************/ 1461/*******************************************************************************/
651/* floating point stuff, can be disabled by defining ECB_NO_LIBM */ 1462/* floating point stuff, can be disabled by defining ECB_NO_LIBM */
652 1463
653/* basically, everything uses "ieee pure-endian" floating point numbers */ 1464/* basically, everything uses "ieee pure-endian" floating point numbers */
654/* the only noteworthy exception is ancient armle, which uses order 43218765 */ 1465/* the only noteworthy exception is ancient armle, which uses order 43218765 */
655#if 0 \ 1466#if 0 \
656 || __i386 || __i386__ \ 1467 || __i386 || __i386__ \
657 || __amd64 || __amd64__ || __x86_64 || __x86_64__ \ 1468 || ECB_GCC_AMD64 \
658 || __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \ 1469 || __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \
659 || defined __s390__ || defined __s390x__ \ 1470 || defined __s390__ || defined __s390x__ \
660 || defined __mips__ \ 1471 || defined __mips__ \
661 || defined __alpha__ \ 1472 || defined __alpha__ \
662 || defined __hppa__ \ 1473 || defined __hppa__ \
663 || defined __ia64__ \ 1474 || defined __ia64__ \
664 || defined __m68k__ \ 1475 || defined __m68k__ \
665 || defined __m88k__ \ 1476 || defined __m88k__ \
666 || defined __sh__ \ 1477 || defined __sh__ \
667 || defined _M_IX86 || defined _M_AMD64 || defined _M_IA64 \ 1478 || defined _M_IX86 || defined ECB_MSVC_AMD64 || defined _M_IA64 \
668 || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \ 1479 || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \
669 || defined __aarch64__ 1480 || defined __aarch64__
670 #define ECB_STDFP 1 1481 #define ECB_STDFP 1
671 #include <string.h> /* for memcpy */
672#else 1482#else
673 #define ECB_STDFP 0 1483 #define ECB_STDFP 0
674#endif 1484#endif
675 1485
676#ifndef ECB_NO_LIBM 1486#ifndef ECB_NO_LIBM
690 #define ECB_NAN ECB_INFINITY 1500 #define ECB_NAN ECB_INFINITY
691 #endif 1501 #endif
692 1502
693 #if ECB_C99 || _XOPEN_VERSION >= 600 || _POSIX_VERSION >= 200112L 1503 #if ECB_C99 || _XOPEN_VERSION >= 600 || _POSIX_VERSION >= 200112L
694 #define ecb_ldexpf(x,e) ldexpf ((x), (e)) 1504 #define ecb_ldexpf(x,e) ldexpf ((x), (e))
1505 #define ecb_frexpf(x,e) frexpf ((x), (e))
695 #else 1506 #else
696 #define ecb_ldexpf(x,e) (float) ldexp ((x), (e)) 1507 #define ecb_ldexpf(x,e) (float) ldexp ((double) (x), (e))
1508 #define ecb_frexpf(x,e) (float) frexp ((double) (x), (e))
697 #endif 1509 #endif
698
699 /* converts an ieee half/binary16 to a float */
700 ecb_function_ ecb_const float ecb_binary16_to_float (uint16_t x);
701 ecb_function_ ecb_const float
702 ecb_binary16_to_float (uint16_t x)
703 {
704 int e = (x >> 10) & 0x1f;
705 int m = x & 0x3ff;
706 float r;
707
708 if (!e ) r = ecb_ldexpf (m , -24);
709 else if (e != 31) r = ecb_ldexpf (m + 0x400, e - 25);
710 else if (m ) r = ECB_NAN;
711 else r = ECB_INFINITY;
712
713 return x & 0x8000 ? -r : r;
714 }
715 1510
716 /* convert a float to ieee single/binary32 */ 1511 /* convert a float to ieee single/binary32 */
717 ecb_function_ ecb_const uint32_t ecb_float_to_binary32 (float x); 1512 ecb_function_ ecb_const uint32_t ecb_float_to_binary32 (float x);
718 ecb_function_ ecb_const uint32_t 1513 ecb_function_ ecb_const uint32_t
719 ecb_float_to_binary32 (float x) 1514 ecb_float_to_binary32 (float x)
730 if (x == 0e0f ) return 0x00000000U; 1525 if (x == 0e0f ) return 0x00000000U;
731 if (x > +3.40282346638528860e+38f) return 0x7f800000U; 1526 if (x > +3.40282346638528860e+38f) return 0x7f800000U;
732 if (x < -3.40282346638528860e+38f) return 0xff800000U; 1527 if (x < -3.40282346638528860e+38f) return 0xff800000U;
733 if (x != x ) return 0x7fbfffffU; 1528 if (x != x ) return 0x7fbfffffU;
734 1529
735 m = frexpf (x, &e) * 0x1000000U; 1530 m = ecb_frexpf (x, &e) * 0x1000000U;
736 1531
737 r = m & 0x80000000U; 1532 r = m & 0x80000000U;
738 1533
739 if (r) 1534 if (r)
740 m = -m; 1535 m = -m;
851 #endif 1646 #endif
852 1647
853 return r; 1648 return r;
854 } 1649 }
855 1650
856#endif 1651 /* convert a float to ieee half/binary16 */
1652 ecb_function_ ecb_const uint16_t ecb_float_to_binary16 (float x);
1653 ecb_function_ ecb_const uint16_t
1654 ecb_float_to_binary16 (float x)
1655 {
1656 return ecb_binary32_to_binary16 (ecb_float_to_binary32 (x));
1657 }
857 1658
858#endif 1659 /* convert an ieee half/binary16 to float */
1660 ecb_function_ ecb_const float ecb_binary16_to_float (uint16_t x);
1661 ecb_function_ ecb_const float
1662 ecb_binary16_to_float (uint16_t x)
1663 {
1664 return ecb_binary32_to_float (ecb_binary16_to_binary32 (x));
1665 }
859 1666
1667#endif
1668
1669#endif
1670

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines