1 | /* |
1 | /* |
2 | * libecb - http://software.schmorp.de/pkg/libecb |
2 | * libecb - http://software.schmorp.de/pkg/libecb |
3 | * |
3 | * |
4 | * Copyright (©) 2009-2015 Marc Alexander Lehmann <libecb@schmorp.de> |
4 | * Copyright (©) 2009-2015,2018-2021 Marc Alexander Lehmann <libecb@schmorp.de> |
5 | * Copyright (©) 2011 Emanuele Giaquinta |
5 | * Copyright (©) 2011 Emanuele Giaquinta |
6 | * All rights reserved. |
6 | * All rights reserved. |
7 | * |
7 | * |
8 | * Redistribution and use in source and binary forms, with or without modifica- |
8 | * Redistribution and use in source and binary forms, with or without modifica- |
9 | * tion, are permitted provided that the following conditions are met: |
9 | * tion, are permitted provided that the following conditions are met: |
… | |
… | |
40 | |
40 | |
41 | #ifndef ECB_H |
41 | #ifndef ECB_H |
42 | #define ECB_H |
42 | #define ECB_H |
43 | |
43 | |
44 | /* 16 bits major, 16 bits minor */ |
44 | /* 16 bits major, 16 bits minor */ |
45 | #define ECB_VERSION 0x00010006 |
45 | #define ECB_VERSION 0x0001000c |
46 | |
46 | |
47 | #ifdef _WIN32 |
47 | #include <string.h> /* for memcpy */ |
|
|
48 | |
|
|
49 | #if defined (_WIN32) && !defined (__MINGW32__) |
48 | typedef signed char int8_t; |
50 | typedef signed char int8_t; |
49 | typedef unsigned char uint8_t; |
51 | typedef unsigned char uint8_t; |
|
|
52 | typedef signed char int_fast8_t; |
|
|
53 | typedef unsigned char uint_fast8_t; |
50 | typedef signed short int16_t; |
54 | typedef signed short int16_t; |
51 | typedef unsigned short uint16_t; |
55 | typedef unsigned short uint16_t; |
|
|
56 | typedef signed int int_fast16_t; |
|
|
57 | typedef unsigned int uint_fast16_t; |
52 | typedef signed int int32_t; |
58 | typedef signed int int32_t; |
53 | typedef unsigned int uint32_t; |
59 | typedef unsigned int uint32_t; |
|
|
60 | typedef signed int int_fast32_t; |
|
|
61 | typedef unsigned int uint_fast32_t; |
54 | #if __GNUC__ |
62 | #if __GNUC__ |
55 | typedef signed long long int64_t; |
63 | typedef signed long long int64_t; |
56 | typedef unsigned long long uint64_t; |
64 | typedef unsigned long long uint64_t; |
57 | #else /* _MSC_VER || __BORLANDC__ */ |
65 | #else /* _MSC_VER || __BORLANDC__ */ |
58 | typedef signed __int64 int64_t; |
66 | typedef signed __int64 int64_t; |
59 | typedef unsigned __int64 uint64_t; |
67 | typedef unsigned __int64 uint64_t; |
60 | #endif |
68 | #endif |
|
|
69 | typedef int64_t int_fast64_t; |
|
|
70 | typedef uint64_t uint_fast64_t; |
61 | #ifdef _WIN64 |
71 | #ifdef _WIN64 |
62 | #define ECB_PTRSIZE 8 |
72 | #define ECB_PTRSIZE 8 |
63 | typedef uint64_t uintptr_t; |
73 | typedef uint64_t uintptr_t; |
64 | typedef int64_t intptr_t; |
74 | typedef int64_t intptr_t; |
65 | #else |
75 | #else |
… | |
… | |
77 | #endif |
87 | #endif |
78 | |
88 | |
79 | #define ECB_GCC_AMD64 (__amd64 || __amd64__ || __x86_64 || __x86_64__) |
89 | #define ECB_GCC_AMD64 (__amd64 || __amd64__ || __x86_64 || __x86_64__) |
80 | #define ECB_MSVC_AMD64 (_M_AMD64 || _M_X64) |
90 | #define ECB_MSVC_AMD64 (_M_AMD64 || _M_X64) |
81 | |
91 | |
|
|
92 | #ifndef ECB_OPTIMIZE_SIZE |
|
|
93 | #if __OPTIMIZE_SIZE__ |
|
|
94 | #define ECB_OPTIMIZE_SIZE 1 |
|
|
95 | #else |
|
|
96 | #define ECB_OPTIMIZE_SIZE 0 |
|
|
97 | #endif |
|
|
98 | #endif |
|
|
99 | |
82 | /* work around x32 idiocy by defining proper macros */ |
100 | /* work around x32 idiocy by defining proper macros */ |
83 | #if ECB_GCC_AMD64 || ECB_MSVC_AMD64 |
101 | #if ECB_GCC_AMD64 || ECB_MSVC_AMD64 |
84 | #if _ILP32 |
102 | #if _ILP32 |
85 | #define ECB_AMD64_X32 1 |
103 | #define ECB_AMD64_X32 1 |
86 | #else |
104 | #else |
87 | #define ECB_AMD64 1 |
105 | #define ECB_AMD64 1 |
88 | #endif |
106 | #endif |
|
|
107 | #endif |
|
|
108 | |
|
|
109 | #if ECB_PTRSIZE >= 8 || ECB_AMD64_X32 |
|
|
110 | #define ECB_64BIT_NATIVE 1 |
|
|
111 | #else |
|
|
112 | #define ECB_64BIT_NATIVE 0 |
89 | #endif |
113 | #endif |
90 | |
114 | |
91 | /* many compilers define _GNUC_ to some versions but then only implement |
115 | /* many compilers define _GNUC_ to some versions but then only implement |
92 | * what their idiot authors think are the "more important" extensions, |
116 | * what their idiot authors think are the "more important" extensions, |
93 | * causing enormous grief in return for some better fake benchmark numbers. |
117 | * causing enormous grief in return for some better fake benchmark numbers. |
… | |
… | |
224 | #if ECB_GCC_VERSION(4,7) |
248 | #if ECB_GCC_VERSION(4,7) |
225 | /* see comment below (stdatomic.h) about the C11 memory model. */ |
249 | /* see comment below (stdatomic.h) about the C11 memory model. */ |
226 | #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST) |
250 | #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST) |
227 | #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE) |
251 | #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE) |
228 | #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE) |
252 | #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE) |
|
|
253 | #undef ECB_MEMORY_FENCE_RELAXED |
229 | #define ECB_MEMORY_FENCE_RELAXED __atomic_thread_fence (__ATOMIC_RELAXED) |
254 | #define ECB_MEMORY_FENCE_RELAXED __atomic_thread_fence (__ATOMIC_RELAXED) |
230 | |
255 | |
231 | #elif ECB_CLANG_EXTENSION(c_atomic) |
256 | #elif ECB_CLANG_EXTENSION(c_atomic) |
232 | /* see comment below (stdatomic.h) about the C11 memory model. */ |
257 | /* see comment below (stdatomic.h) about the C11 memory model. */ |
233 | #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST) |
258 | #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST) |
234 | #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE) |
259 | #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE) |
235 | #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE) |
260 | #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE) |
|
|
261 | #undef ECB_MEMORY_FENCE_RELAXED |
236 | #define ECB_MEMORY_FENCE_RELAXED __c11_atomic_thread_fence (__ATOMIC_RELAXED) |
262 | #define ECB_MEMORY_FENCE_RELAXED __c11_atomic_thread_fence (__ATOMIC_RELAXED) |
237 | |
263 | |
238 | #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ |
264 | #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ |
239 | #define ECB_MEMORY_FENCE __sync_synchronize () |
265 | #define ECB_MEMORY_FENCE __sync_synchronize () |
240 | #elif _MSC_VER >= 1500 /* VC++ 2008 */ |
266 | #elif _MSC_VER >= 1500 /* VC++ 2008 */ |
… | |
… | |
329 | #define ECB_CONCAT(a, b) ECB_CONCAT_(a, b) |
355 | #define ECB_CONCAT(a, b) ECB_CONCAT_(a, b) |
330 | #define ECB_STRINGIFY_(a) # a |
356 | #define ECB_STRINGIFY_(a) # a |
331 | #define ECB_STRINGIFY(a) ECB_STRINGIFY_(a) |
357 | #define ECB_STRINGIFY(a) ECB_STRINGIFY_(a) |
332 | #define ECB_STRINGIFY_EXPR(expr) ((expr), ECB_STRINGIFY_ (expr)) |
358 | #define ECB_STRINGIFY_EXPR(expr) ((expr), ECB_STRINGIFY_ (expr)) |
333 | |
359 | |
|
|
360 | /* This marks larger functions that do not neccessarily need to be inlined */ |
|
|
361 | /* The idea is to possibly compile the header twice, */ |
|
|
362 | /* once exposing only the declarations, another time to define external functions */ |
|
|
363 | /* TODO: possibly static would be best for these at the moment? */ |
334 | #define ecb_function_ ecb_inline |
364 | #define ecb_function_ ecb_inline |
335 | |
365 | |
336 | #if ECB_GCC_VERSION(3,1) || ECB_CLANG_VERSION(2,8) |
366 | #if ECB_GCC_VERSION(3,1) || ECB_CLANG_VERSION(2,8) |
337 | #define ecb_attribute(attrlist) __attribute__ (attrlist) |
367 | #define ecb_attribute(attrlist) __attribute__ (attrlist) |
338 | #else |
368 | #else |
… | |
… | |
428 | /* count trailing zero bits and count # of one bits */ |
458 | /* count trailing zero bits and count # of one bits */ |
429 | #if ECB_GCC_VERSION(3,4) \ |
459 | #if ECB_GCC_VERSION(3,4) \ |
430 | || (ECB_CLANG_BUILTIN(__builtin_clz) && ECB_CLANG_BUILTIN(__builtin_clzll) \ |
460 | || (ECB_CLANG_BUILTIN(__builtin_clz) && ECB_CLANG_BUILTIN(__builtin_clzll) \ |
431 | && ECB_CLANG_BUILTIN(__builtin_ctz) && ECB_CLANG_BUILTIN(__builtin_ctzll) \ |
461 | && ECB_CLANG_BUILTIN(__builtin_ctz) && ECB_CLANG_BUILTIN(__builtin_ctzll) \ |
432 | && ECB_CLANG_BUILTIN(__builtin_popcount)) |
462 | && ECB_CLANG_BUILTIN(__builtin_popcount)) |
433 | /* we assume int == 32 bit, long == 32 or 64 bit and long long == 64 bit */ |
|
|
434 | #define ecb_ld32(x) (__builtin_clz (x) ^ 31) |
|
|
435 | #define ecb_ld64(x) (__builtin_clzll (x) ^ 63) |
|
|
436 | #define ecb_ctz32(x) __builtin_ctz (x) |
463 | #define ecb_ctz32(x) __builtin_ctz (x) |
|
|
464 | #define ecb_ctz64(x) (__SIZEOF_LONG__ == 64 ? __builtin_ctzl (x) : __builtin_ctzll (x)) |
437 | #define ecb_ctz64(x) __builtin_ctzll (x) |
465 | #define ecb_clz32(x) __builtin_clz (x) |
|
|
466 | #define ecb_clz64(x) (__SIZEOF_LONG__ == 64 ? __builtin_clzl (x) : __builtin_clzll (x)) |
|
|
467 | #define ecb_ld32(x) (ecb_clz32 (x) ^ 31) |
|
|
468 | #define ecb_ld64(x) (ecb_clz64 (x) ^ 63) |
438 | #define ecb_popcount32(x) __builtin_popcount (x) |
469 | #define ecb_popcount32(x) __builtin_popcount (x) |
439 | /* no popcountll */ |
470 | /* ecb_popcount64 is more difficult, see below */ |
440 | #else |
471 | #else |
441 | ecb_function_ ecb_const int ecb_ctz32 (uint32_t x); |
472 | ecb_function_ ecb_const int ecb_ctz32 (uint32_t x); |
442 | ecb_function_ ecb_const int |
473 | ecb_function_ ecb_const int ecb_ctz32 (uint32_t x) |
443 | ecb_ctz32 (uint32_t x) |
|
|
444 | { |
474 | { |
445 | #if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM) |
475 | #if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM) |
446 | unsigned long r; |
476 | unsigned long r; |
447 | _BitScanForward (&r, x); |
477 | _BitScanForward (&r, x); |
448 | return (int)r; |
478 | return (int)r; |
449 | #else |
479 | #else |
450 | int r = 0; |
480 | int r; |
451 | |
481 | |
452 | x &= ~x + 1; /* this isolates the lowest bit */ |
482 | x &= ~x + 1; /* this isolates the lowest bit */ |
453 | |
483 | |
454 | #if ECB_branchless_on_i386 |
484 | #if 1 |
|
|
485 | /* David Seal's algorithm, Message-ID: <32975@armltd.uucp> from 1994 */ |
|
|
486 | /* This happens to return 32 for x == 0, but the API does not support this */ |
|
|
487 | |
|
|
488 | /* -0 marks unused entries */ |
|
|
489 | static unsigned char table[64] = |
|
|
490 | { |
|
|
491 | 32, 0, 1, 12, 2, 6, -0, 13, 3, -0, 7, -0, -0, -0, -0, 14, |
|
|
492 | 10, 4, -0, -0, 8, -0, -0, 25, -0, -0, -0, -0, -0, 21, 27, 15, |
|
|
493 | 31, 11, 5, -0, -0, -0, -0, -0, 9, -0, -0, 24, -0, -0, 20, 26, |
|
|
494 | 30, -0, -0, -0, -0, 23, -0, 19, 29, -0, 22, 18, 28, 17, 16, -0 |
|
|
495 | }; |
|
|
496 | |
|
|
497 | /* magic constant results in 33 unique values in the upper 6 bits */ |
|
|
498 | x *= 0x0450fbafU; /* == 17 * 65 * 65535 */ |
|
|
499 | |
|
|
500 | r = table [x >> 26]; |
|
|
501 | #elif 0 /* branchless on i386, typically */ |
|
|
502 | r = 0; |
455 | r += !!(x & 0xaaaaaaaa) << 0; |
503 | r += !!(x & 0xaaaaaaaa) << 0; |
456 | r += !!(x & 0xcccccccc) << 1; |
504 | r += !!(x & 0xcccccccc) << 1; |
457 | r += !!(x & 0xf0f0f0f0) << 2; |
505 | r += !!(x & 0xf0f0f0f0) << 2; |
458 | r += !!(x & 0xff00ff00) << 3; |
506 | r += !!(x & 0xff00ff00) << 3; |
459 | r += !!(x & 0xffff0000) << 4; |
507 | r += !!(x & 0xffff0000) << 4; |
460 | #else |
508 | #else /* branchless on modern compilers, typically */ |
|
|
509 | r = 0; |
461 | if (x & 0xaaaaaaaa) r += 1; |
510 | if (x & 0xaaaaaaaa) r += 1; |
462 | if (x & 0xcccccccc) r += 2; |
511 | if (x & 0xcccccccc) r += 2; |
463 | if (x & 0xf0f0f0f0) r += 4; |
512 | if (x & 0xf0f0f0f0) r += 4; |
464 | if (x & 0xff00ff00) r += 8; |
513 | if (x & 0xff00ff00) r += 8; |
465 | if (x & 0xffff0000) r += 16; |
514 | if (x & 0xffff0000) r += 16; |
… | |
… | |
468 | return r; |
517 | return r; |
469 | #endif |
518 | #endif |
470 | } |
519 | } |
471 | |
520 | |
472 | ecb_function_ ecb_const int ecb_ctz64 (uint64_t x); |
521 | ecb_function_ ecb_const int ecb_ctz64 (uint64_t x); |
473 | ecb_function_ ecb_const int |
522 | ecb_function_ ecb_const int ecb_ctz64 (uint64_t x) |
474 | ecb_ctz64 (uint64_t x) |
|
|
475 | { |
523 | { |
476 | #if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM) |
524 | #if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM) |
477 | unsigned long r; |
525 | unsigned long r; |
478 | _BitScanForward64 (&r, x); |
526 | _BitScanForward64 (&r, x); |
479 | return (int)r; |
527 | return (int)r; |
480 | #else |
528 | #else |
481 | int shift = x & 0xffffffff ? 0 : 32; |
529 | int shift = x & 0xffffffff ? 0 : 32; |
482 | return ecb_ctz32 (x >> shift) + shift; |
530 | return ecb_ctz32 (x >> shift) + shift; |
|
|
531 | #endif |
|
|
532 | } |
|
|
533 | |
|
|
534 | ecb_function_ ecb_const int ecb_clz32 (uint32_t x); |
|
|
535 | ecb_function_ ecb_const int ecb_clz32 (uint32_t x) |
|
|
536 | { |
|
|
537 | #if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM) |
|
|
538 | unsigned long r; |
|
|
539 | _BitScanReverse (&r, x); |
|
|
540 | return (int)r; |
|
|
541 | #else |
|
|
542 | |
|
|
543 | /* Robert Harley's algorithm from comp.arch 1996-12-07 */ |
|
|
544 | /* This happens to return 32 for x == 0, but the API does not support this */ |
|
|
545 | |
|
|
546 | /* -0 marks unused table elements */ |
|
|
547 | static unsigned char table[64] = |
|
|
548 | { |
|
|
549 | 32, 31, -0, 16, -0, 30, 3, -0, 15, -0, -0, -0, 29, 10, 2, -0, |
|
|
550 | -0, -0, 12, 14, 21, -0, 19, -0, -0, 28, -0, 25, -0, 9, 1, -0, |
|
|
551 | 17, -0, 4, -0, -0, -0, 11, -0, 13, 22, 20, -0, 26, -0, -0, 18, |
|
|
552 | 5, -0, -0, 23, -0, 27, -0, 6, -0, 24, 7, -0, 8, -0, 0, -0 |
|
|
553 | }; |
|
|
554 | |
|
|
555 | /* propagate leftmost 1 bit to the right */ |
|
|
556 | x |= x >> 1; |
|
|
557 | x |= x >> 2; |
|
|
558 | x |= x >> 4; |
|
|
559 | x |= x >> 8; |
|
|
560 | x |= x >> 16; |
|
|
561 | |
|
|
562 | /* magic constant results in 33 unique values in the upper 6 bits */ |
|
|
563 | x *= 0x06EB14F9U; /* == 7 * 255 * 255 * 255 */ |
|
|
564 | |
|
|
565 | return table [x >> 26]; |
|
|
566 | #endif |
|
|
567 | } |
|
|
568 | |
|
|
569 | ecb_function_ ecb_const int ecb_clz64 (uint64_t x); |
|
|
570 | ecb_function_ ecb_const int ecb_clz64 (uint64_t x) |
|
|
571 | { |
|
|
572 | #if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM) |
|
|
573 | unsigned long r; |
|
|
574 | _BitScanReverse64 (&r, x); |
|
|
575 | return (int)r; |
|
|
576 | #else |
|
|
577 | uint32_t l = x >> 32; |
|
|
578 | int shift = l ? 0 : 32; |
|
|
579 | return ecb_clz32 (l ? l : x) + shift; |
483 | #endif |
580 | #endif |
484 | } |
581 | } |
485 | |
582 | |
486 | ecb_function_ ecb_const int ecb_popcount32 (uint32_t x); |
583 | ecb_function_ ecb_const int ecb_popcount32 (uint32_t x); |
487 | ecb_function_ ecb_const int |
584 | ecb_function_ ecb_const int |
… | |
… | |
565 | x = ( x >> 16 ) | ( x << 16); |
662 | x = ( x >> 16 ) | ( x << 16); |
566 | |
663 | |
567 | return x; |
664 | return x; |
568 | } |
665 | } |
569 | |
666 | |
570 | /* popcount64 is only available on 64 bit cpus as gcc builtin */ |
|
|
571 | /* so for this version we are lazy */ |
|
|
572 | ecb_function_ ecb_const int ecb_popcount64 (uint64_t x); |
667 | ecb_function_ ecb_const int ecb_popcount64 (uint64_t x); |
573 | ecb_function_ ecb_const int |
668 | ecb_function_ ecb_const int ecb_popcount64 (uint64_t x) |
574 | ecb_popcount64 (uint64_t x) |
|
|
575 | { |
669 | { |
|
|
670 | /* popcount64 is only available on 64 bit cpus as gcc builtin. */ |
|
|
671 | /* also, gcc/clang make this surprisingly difficult to use */ |
|
|
672 | #if (__SIZEOF_LONG__ == 8) && (ECB_GCC_VERSION(3,4) || ECB_CLANG_BUILTIN (__builtin_popcountl)) |
|
|
673 | return __builtin_popcountl (x); |
|
|
674 | #else |
576 | return ecb_popcount32 (x) + ecb_popcount32 (x >> 32); |
675 | return ecb_popcount32 (x) + ecb_popcount32 (x >> 32); |
|
|
676 | #endif |
577 | } |
677 | } |
578 | |
678 | |
579 | ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count); |
679 | ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count); |
580 | ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count); |
680 | ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count); |
581 | ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count); |
681 | ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count); |
… | |
… | |
583 | ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count); |
683 | ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count); |
584 | ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count); |
684 | ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count); |
585 | ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count); |
685 | ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count); |
586 | ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count); |
686 | ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count); |
587 | |
687 | |
588 | ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> ( 8 - count)) | (x << count); } |
688 | ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> (-count & 7)) | (x << (count & 7)); } |
589 | ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << ( 8 - count)) | (x >> count); } |
689 | ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << (-count & 7)) | (x >> (count & 7)); } |
590 | ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (16 - count)) | (x << count); } |
690 | ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (-count & 15)) | (x << (count & 15)); } |
591 | ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (16 - count)) | (x >> count); } |
691 | ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (-count & 15)) | (x >> (count & 15)); } |
592 | ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); } |
692 | ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (-count & 31)) | (x << (count & 31)); } |
593 | ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); } |
693 | ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (-count & 31)) | (x >> (count & 31)); } |
594 | ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); } |
694 | ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (-count & 63)) | (x << (count & 63)); } |
595 | ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); } |
695 | ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (-count & 63)) | (x >> (count & 63)); } |
|
|
696 | |
|
|
697 | #if ECB_CPP |
|
|
698 | |
|
|
699 | inline uint8_t ecb_ctz (uint8_t v) { return ecb_ctz32 (v); } |
|
|
700 | inline uint16_t ecb_ctz (uint16_t v) { return ecb_ctz32 (v); } |
|
|
701 | inline uint32_t ecb_ctz (uint32_t v) { return ecb_ctz32 (v); } |
|
|
702 | inline uint64_t ecb_ctz (uint64_t v) { return ecb_ctz64 (v); } |
|
|
703 | |
|
|
704 | inline bool ecb_is_pot (uint8_t v) { return ecb_is_pot32 (v); } |
|
|
705 | inline bool ecb_is_pot (uint16_t v) { return ecb_is_pot32 (v); } |
|
|
706 | inline bool ecb_is_pot (uint32_t v) { return ecb_is_pot32 (v); } |
|
|
707 | inline bool ecb_is_pot (uint64_t v) { return ecb_is_pot64 (v); } |
|
|
708 | |
|
|
709 | inline int ecb_ld (uint8_t v) { return ecb_ld32 (v); } |
|
|
710 | inline int ecb_ld (uint16_t v) { return ecb_ld32 (v); } |
|
|
711 | inline int ecb_ld (uint32_t v) { return ecb_ld32 (v); } |
|
|
712 | inline int ecb_ld (uint64_t v) { return ecb_ld64 (v); } |
|
|
713 | |
|
|
714 | inline int ecb_popcount (uint8_t v) { return ecb_popcount32 (v); } |
|
|
715 | inline int ecb_popcount (uint16_t v) { return ecb_popcount32 (v); } |
|
|
716 | inline int ecb_popcount (uint32_t v) { return ecb_popcount32 (v); } |
|
|
717 | inline int ecb_popcount (uint64_t v) { return ecb_popcount64 (v); } |
|
|
718 | |
|
|
719 | inline uint8_t ecb_bitrev (uint8_t v) { return ecb_bitrev8 (v); } |
|
|
720 | inline uint16_t ecb_bitrev (uint16_t v) { return ecb_bitrev16 (v); } |
|
|
721 | inline uint32_t ecb_bitrev (uint32_t v) { return ecb_bitrev32 (v); } |
|
|
722 | |
|
|
723 | inline uint8_t ecb_rotl (uint8_t v, unsigned int count) { return ecb_rotl8 (v, count); } |
|
|
724 | inline uint16_t ecb_rotl (uint16_t v, unsigned int count) { return ecb_rotl16 (v, count); } |
|
|
725 | inline uint32_t ecb_rotl (uint32_t v, unsigned int count) { return ecb_rotl32 (v, count); } |
|
|
726 | inline uint64_t ecb_rotl (uint64_t v, unsigned int count) { return ecb_rotl64 (v, count); } |
|
|
727 | |
|
|
728 | inline uint8_t ecb_rotr (uint8_t v, unsigned int count) { return ecb_rotr8 (v, count); } |
|
|
729 | inline uint16_t ecb_rotr (uint16_t v, unsigned int count) { return ecb_rotr16 (v, count); } |
|
|
730 | inline uint32_t ecb_rotr (uint32_t v, unsigned int count) { return ecb_rotr32 (v, count); } |
|
|
731 | inline uint64_t ecb_rotr (uint64_t v, unsigned int count) { return ecb_rotr64 (v, count); } |
|
|
732 | |
|
|
733 | #endif |
596 | |
734 | |
597 | #if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64)) |
735 | #if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64)) |
598 | #if ECB_GCC_VERSION(4,8) || ECB_CLANG_BUILTIN(__builtin_bswap16) |
736 | #if ECB_GCC_VERSION(4,8) || ECB_CLANG_BUILTIN(__builtin_bswap16) |
599 | #define ecb_bswap16(x) __builtin_bswap16 (x) |
737 | #define ecb_bswap16(x) __builtin_bswap16 (x) |
600 | #else |
738 | #else |
… | |
… | |
607 | #define ecb_bswap16(x) ((uint16_t)_byteswap_ushort ((uint16_t)(x))) |
745 | #define ecb_bswap16(x) ((uint16_t)_byteswap_ushort ((uint16_t)(x))) |
608 | #define ecb_bswap32(x) ((uint32_t)_byteswap_ulong ((uint32_t)(x))) |
746 | #define ecb_bswap32(x) ((uint32_t)_byteswap_ulong ((uint32_t)(x))) |
609 | #define ecb_bswap64(x) ((uint64_t)_byteswap_uint64 ((uint64_t)(x))) |
747 | #define ecb_bswap64(x) ((uint64_t)_byteswap_uint64 ((uint64_t)(x))) |
610 | #else |
748 | #else |
611 | ecb_function_ ecb_const uint16_t ecb_bswap16 (uint16_t x); |
749 | ecb_function_ ecb_const uint16_t ecb_bswap16 (uint16_t x); |
612 | ecb_function_ ecb_const uint16_t |
750 | ecb_function_ ecb_const uint16_t ecb_bswap16 (uint16_t x) |
613 | ecb_bswap16 (uint16_t x) |
|
|
614 | { |
751 | { |
615 | return ecb_rotl16 (x, 8); |
752 | return ecb_rotl16 (x, 8); |
616 | } |
753 | } |
617 | |
754 | |
618 | ecb_function_ ecb_const uint32_t ecb_bswap32 (uint32_t x); |
755 | ecb_function_ ecb_const uint32_t ecb_bswap32 (uint32_t x); |
619 | ecb_function_ ecb_const uint32_t |
756 | ecb_function_ ecb_const uint32_t ecb_bswap32 (uint32_t x) |
620 | ecb_bswap32 (uint32_t x) |
|
|
621 | { |
757 | { |
622 | return (((uint32_t)ecb_bswap16 (x)) << 16) | ecb_bswap16 (x >> 16); |
758 | return (((uint32_t)ecb_bswap16 (x)) << 16) | ecb_bswap16 (x >> 16); |
623 | } |
759 | } |
624 | |
760 | |
625 | ecb_function_ ecb_const uint64_t ecb_bswap64 (uint64_t x); |
761 | ecb_function_ ecb_const uint64_t ecb_bswap64 (uint64_t x); |
626 | ecb_function_ ecb_const uint64_t |
762 | ecb_function_ ecb_const uint64_t ecb_bswap64 (uint64_t x) |
627 | ecb_bswap64 (uint64_t x) |
|
|
628 | { |
763 | { |
629 | return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32); |
764 | return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32); |
630 | } |
765 | } |
631 | #endif |
766 | #endif |
632 | |
767 | |
… | |
… | |
640 | |
775 | |
641 | /* try to tell the compiler that some condition is definitely true */ |
776 | /* try to tell the compiler that some condition is definitely true */ |
642 | #define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0 |
777 | #define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0 |
643 | |
778 | |
644 | ecb_inline ecb_const uint32_t ecb_byteorder_helper (void); |
779 | ecb_inline ecb_const uint32_t ecb_byteorder_helper (void); |
645 | ecb_inline ecb_const uint32_t |
780 | ecb_inline ecb_const uint32_t ecb_byteorder_helper (void) |
646 | ecb_byteorder_helper (void) |
|
|
647 | { |
781 | { |
648 | /* the union code still generates code under pressure in gcc, */ |
782 | /* the union code still generates code under pressure in gcc, */ |
649 | /* but less than using pointers, and always seems to */ |
783 | /* but less than using pointers, and always seems to */ |
650 | /* successfully return a constant. */ |
784 | /* successfully return a constant. */ |
651 | /* the reason why we have this horrible preprocessor mess */ |
785 | /* the reason why we have this horrible preprocessor mess */ |
… | |
… | |
672 | ecb_inline ecb_const ecb_bool ecb_big_endian (void); |
806 | ecb_inline ecb_const ecb_bool ecb_big_endian (void); |
673 | ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11223344; } |
807 | ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11223344; } |
674 | ecb_inline ecb_const ecb_bool ecb_little_endian (void); |
808 | ecb_inline ecb_const ecb_bool ecb_little_endian (void); |
675 | ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44332211; } |
809 | ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44332211; } |
676 | |
810 | |
|
|
811 | /*****************************************************************************/ |
|
|
812 | /* unaligned load/store */ |
|
|
813 | |
|
|
814 | ecb_inline uint_fast16_t ecb_be_u16_to_host (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; } |
|
|
815 | ecb_inline uint_fast32_t ecb_be_u32_to_host (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; } |
|
|
816 | ecb_inline uint_fast64_t ecb_be_u64_to_host (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; } |
|
|
817 | |
|
|
818 | ecb_inline uint_fast16_t ecb_le_u16_to_host (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; } |
|
|
819 | ecb_inline uint_fast32_t ecb_le_u32_to_host (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; } |
|
|
820 | ecb_inline uint_fast64_t ecb_le_u64_to_host (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; } |
|
|
821 | |
|
|
822 | ecb_inline uint_fast16_t ecb_peek_u16_u (const void *ptr) { uint16_t v; memcpy (&v, ptr, sizeof (v)); return v; } |
|
|
823 | ecb_inline uint_fast32_t ecb_peek_u32_u (const void *ptr) { uint32_t v; memcpy (&v, ptr, sizeof (v)); return v; } |
|
|
824 | ecb_inline uint_fast64_t ecb_peek_u64_u (const void *ptr) { uint64_t v; memcpy (&v, ptr, sizeof (v)); return v; } |
|
|
825 | |
|
|
826 | ecb_inline uint_fast16_t ecb_peek_be_u16_u (const void *ptr) { return ecb_be_u16_to_host (ecb_peek_u16_u (ptr)); } |
|
|
827 | ecb_inline uint_fast32_t ecb_peek_be_u32_u (const void *ptr) { return ecb_be_u32_to_host (ecb_peek_u32_u (ptr)); } |
|
|
828 | ecb_inline uint_fast64_t ecb_peek_be_u64_u (const void *ptr) { return ecb_be_u64_to_host (ecb_peek_u64_u (ptr)); } |
|
|
829 | |
|
|
830 | ecb_inline uint_fast16_t ecb_peek_le_u16_u (const void *ptr) { return ecb_le_u16_to_host (ecb_peek_u16_u (ptr)); } |
|
|
831 | ecb_inline uint_fast32_t ecb_peek_le_u32_u (const void *ptr) { return ecb_le_u32_to_host (ecb_peek_u32_u (ptr)); } |
|
|
832 | ecb_inline uint_fast64_t ecb_peek_le_u64_u (const void *ptr) { return ecb_le_u64_to_host (ecb_peek_u64_u (ptr)); } |
|
|
833 | |
|
|
834 | ecb_inline uint_fast16_t ecb_host_to_be_u16 (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; } |
|
|
835 | ecb_inline uint_fast32_t ecb_host_to_be_u32 (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; } |
|
|
836 | ecb_inline uint_fast64_t ecb_host_to_be_u64 (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; } |
|
|
837 | |
|
|
838 | ecb_inline uint_fast16_t ecb_host_to_le_u16 (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; } |
|
|
839 | ecb_inline uint_fast32_t ecb_host_to_le_u32 (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; } |
|
|
840 | ecb_inline uint_fast64_t ecb_host_to_le_u64 (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; } |
|
|
841 | |
|
|
842 | ecb_inline void ecb_poke_u16_u (void *ptr, uint16_t v) { memcpy (ptr, &v, sizeof (v)); } |
|
|
843 | ecb_inline void ecb_poke_u32_u (void *ptr, uint32_t v) { memcpy (ptr, &v, sizeof (v)); } |
|
|
844 | ecb_inline void ecb_poke_u64_u (void *ptr, uint64_t v) { memcpy (ptr, &v, sizeof (v)); } |
|
|
845 | |
|
|
846 | ecb_inline void ecb_poke_be_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_be_u16 (v)); } |
|
|
847 | ecb_inline void ecb_poke_be_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_be_u32 (v)); } |
|
|
848 | ecb_inline void ecb_poke_be_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_be_u64 (v)); } |
|
|
849 | |
|
|
850 | ecb_inline void ecb_poke_le_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_le_u16 (v)); } |
|
|
851 | ecb_inline void ecb_poke_le_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_le_u32 (v)); } |
|
|
852 | ecb_inline void ecb_poke_le_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_le_u64 (v)); } |
|
|
853 | |
|
|
854 | #if ECB_CPP |
|
|
855 | |
|
|
856 | inline uint8_t ecb_bswap (uint8_t v) { return v; } |
|
|
857 | inline uint16_t ecb_bswap (uint16_t v) { return ecb_bswap16 (v); } |
|
|
858 | inline uint32_t ecb_bswap (uint32_t v) { return ecb_bswap32 (v); } |
|
|
859 | inline uint64_t ecb_bswap (uint64_t v) { return ecb_bswap64 (v); } |
|
|
860 | |
|
|
861 | template<typename T> inline T ecb_be_to_host (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; } |
|
|
862 | template<typename T> inline T ecb_le_to_host (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; } |
|
|
863 | template<typename T> inline T ecb_peek (const void *ptr) { return *(const T *)ptr; } |
|
|
864 | template<typename T> inline T ecb_peek_be (const void *ptr) { return ecb_be_to_host (ecb_peek <T> (ptr)); } |
|
|
865 | template<typename T> inline T ecb_peek_le (const void *ptr) { return ecb_le_to_host (ecb_peek <T> (ptr)); } |
|
|
866 | template<typename T> inline T ecb_peek_u (const void *ptr) { T v; memcpy (&v, ptr, sizeof (v)); return v; } |
|
|
867 | template<typename T> inline T ecb_peek_be_u (const void *ptr) { return ecb_be_to_host (ecb_peek_u<T> (ptr)); } |
|
|
868 | template<typename T> inline T ecb_peek_le_u (const void *ptr) { return ecb_le_to_host (ecb_peek_u<T> (ptr)); } |
|
|
869 | |
|
|
870 | template<typename T> inline T ecb_host_to_be (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; } |
|
|
871 | template<typename T> inline T ecb_host_to_le (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; } |
|
|
872 | template<typename T> inline void ecb_poke (void *ptr, T v) { *(T *)ptr = v; } |
|
|
873 | template<typename T> inline void ecb_poke_be (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_be (v)); } |
|
|
874 | template<typename T> inline void ecb_poke_le (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_le (v)); } |
|
|
875 | template<typename T> inline void ecb_poke_u (void *ptr, T v) { memcpy (ptr, &v, sizeof (v)); } |
|
|
876 | template<typename T> inline void ecb_poke_be_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_be (v)); } |
|
|
877 | template<typename T> inline void ecb_poke_le_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_le (v)); } |
|
|
878 | |
|
|
879 | #endif |
|
|
880 | |
|
|
881 | /*****************************************************************************/ |
|
|
882 | /* pointer/integer hashing */ |
|
|
883 | |
|
|
884 | /* based on hash by Chris Wellons, https://nullprogram.com/blog/2018/07/31/ */ |
|
|
885 | ecb_function_ uint32_t ecb_mix32 (uint32_t v); |
|
|
886 | ecb_function_ uint32_t ecb_mix32 (uint32_t v) |
|
|
887 | { |
|
|
888 | v ^= v >> 16; v *= 0x7feb352dU; |
|
|
889 | v ^= v >> 15; v *= 0x846ca68bU; |
|
|
890 | v ^= v >> 16; |
|
|
891 | return v; |
|
|
892 | } |
|
|
893 | |
|
|
894 | ecb_function_ uint32_t ecb_unmix32 (uint32_t v); |
|
|
895 | ecb_function_ uint32_t ecb_unmix32 (uint32_t v) |
|
|
896 | { |
|
|
897 | v ^= v >> 16 ; v *= 0x43021123U; |
|
|
898 | v ^= v >> 15 ^ v >> 30; v *= 0x1d69e2a5U; |
|
|
899 | v ^= v >> 16 ; |
|
|
900 | return v; |
|
|
901 | } |
|
|
902 | |
|
|
903 | /* based on splitmix64, by Sebastiona Vigna, https://prng.di.unimi.it/splitmix64.c */ |
|
|
904 | ecb_function_ uint64_t ecb_mix64 (uint64_t v); |
|
|
905 | ecb_function_ uint64_t ecb_mix64 (uint64_t v) |
|
|
906 | { |
|
|
907 | v ^= v >> 30; v *= 0xbf58476d1ce4e5b9U; |
|
|
908 | v ^= v >> 27; v *= 0x94d049bb133111ebU; |
|
|
909 | v ^= v >> 31; |
|
|
910 | return v; |
|
|
911 | } |
|
|
912 | |
|
|
913 | ecb_function_ uint64_t ecb_unmix64 (uint64_t v); |
|
|
914 | ecb_function_ uint64_t ecb_unmix64 (uint64_t v) |
|
|
915 | { |
|
|
916 | v ^= v >> 31 ^ v >> 62; v *= 0x319642b2d24d8ec3U; |
|
|
917 | v ^= v >> 27 ^ v >> 54; v *= 0x96de1b173f119089U; |
|
|
918 | v ^= v >> 30 ^ v >> 60; |
|
|
919 | return v; |
|
|
920 | } |
|
|
921 | |
|
|
922 | ecb_function_ uintptr_t ecb_ptrmix (void *p); |
|
|
923 | ecb_function_ uintptr_t ecb_ptrmix (void *p) |
|
|
924 | { |
|
|
925 | #if ECB_PTRSIZE <= 4 |
|
|
926 | return ecb_mix32 ((uint32_t)p); |
|
|
927 | #else |
|
|
928 | return ecb_mix64 ((uint64_t)p); |
|
|
929 | #endif |
|
|
930 | } |
|
|
931 | |
|
|
932 | ecb_function_ void *ecb_ptrunmix (uintptr_t v); |
|
|
933 | ecb_function_ void *ecb_ptrunmix (uintptr_t v) |
|
|
934 | { |
|
|
935 | #if ECB_PTRSIZE <= 4 |
|
|
936 | return (void *)ecb_unmix32 (v); |
|
|
937 | #else |
|
|
938 | return (void *)ecb_unmix64 (v); |
|
|
939 | #endif |
|
|
940 | } |
|
|
941 | |
|
|
942 | #if ECB_CPP |
|
|
943 | |
|
|
944 | template<typename T> |
|
|
945 | inline uintptr_t ecb_ptrmix (T *p) |
|
|
946 | { |
|
|
947 | return ecb_ptrmix (static_cast<void *>(p)); |
|
|
948 | } |
|
|
949 | |
|
|
950 | template<typename T> |
|
|
951 | inline T *ecb_ptrunmix (uintptr_t v) |
|
|
952 | { |
|
|
953 | return static_cast<T *>(ecb_ptrunmix (v)); |
|
|
954 | } |
|
|
955 | |
|
|
956 | #endif |
|
|
957 | |
|
|
958 | /*****************************************************************************/ |
|
|
959 | /* gray code */ |
|
|
960 | |
|
|
961 | ecb_inline uint_fast8_t ecb_gray_encode8 (uint_fast8_t b) { return b ^ (b >> 1); } |
|
|
962 | ecb_inline uint_fast16_t ecb_gray_encode16 (uint_fast16_t b) { return b ^ (b >> 1); } |
|
|
963 | ecb_inline uint_fast32_t ecb_gray_encode32 (uint_fast32_t b) { return b ^ (b >> 1); } |
|
|
964 | ecb_inline uint_fast64_t ecb_gray_encode64 (uint_fast64_t b) { return b ^ (b >> 1); } |
|
|
965 | |
|
|
966 | ecb_function_ uint8_t ecb_gray_decode8 (uint8_t g); |
|
|
967 | ecb_function_ uint8_t ecb_gray_decode8 (uint8_t g) |
|
|
968 | { |
|
|
969 | g ^= g >> 1; |
|
|
970 | g ^= g >> 2; |
|
|
971 | g ^= g >> 4; |
|
|
972 | |
|
|
973 | return g; |
|
|
974 | } |
|
|
975 | |
|
|
976 | ecb_function_ uint16_t ecb_gray_decode16 (uint16_t g); |
|
|
977 | ecb_function_ uint16_t ecb_gray_decode16 (uint16_t g) |
|
|
978 | { |
|
|
979 | g ^= g >> 1; |
|
|
980 | g ^= g >> 2; |
|
|
981 | g ^= g >> 4; |
|
|
982 | g ^= g >> 8; |
|
|
983 | |
|
|
984 | return g; |
|
|
985 | } |
|
|
986 | |
|
|
987 | ecb_function_ uint32_t ecb_gray_decode32 (uint32_t g); |
|
|
988 | ecb_function_ uint32_t ecb_gray_decode32 (uint32_t g) |
|
|
989 | { |
|
|
990 | g ^= g >> 1; |
|
|
991 | g ^= g >> 2; |
|
|
992 | g ^= g >> 4; |
|
|
993 | g ^= g >> 8; |
|
|
994 | g ^= g >> 16; |
|
|
995 | |
|
|
996 | return g; |
|
|
997 | } |
|
|
998 | |
|
|
999 | ecb_function_ uint64_t ecb_gray_decode64 (uint64_t g); |
|
|
1000 | ecb_function_ uint64_t ecb_gray_decode64 (uint64_t g) |
|
|
1001 | { |
|
|
1002 | g ^= g >> 1; |
|
|
1003 | g ^= g >> 2; |
|
|
1004 | g ^= g >> 4; |
|
|
1005 | g ^= g >> 8; |
|
|
1006 | g ^= g >> 16; |
|
|
1007 | g ^= g >> 32; |
|
|
1008 | |
|
|
1009 | return g; |
|
|
1010 | } |
|
|
1011 | |
|
|
1012 | #if ECB_CPP |
|
|
1013 | |
|
|
1014 | ecb_inline uint8_t ecb_gray_encode (uint8_t b) { return ecb_gray_encode8 (b); } |
|
|
1015 | ecb_inline uint16_t ecb_gray_encode (uint16_t b) { return ecb_gray_encode16 (b); } |
|
|
1016 | ecb_inline uint32_t ecb_gray_encode (uint32_t b) { return ecb_gray_encode32 (b); } |
|
|
1017 | ecb_inline uint64_t ecb_gray_encode (uint64_t b) { return ecb_gray_encode64 (b); } |
|
|
1018 | |
|
|
1019 | ecb_inline uint8_t ecb_gray_decode (uint8_t g) { return ecb_gray_decode8 (g); } |
|
|
1020 | ecb_inline uint16_t ecb_gray_decode (uint16_t g) { return ecb_gray_decode16 (g); } |
|
|
1021 | ecb_inline uint32_t ecb_gray_decode (uint32_t g) { return ecb_gray_decode32 (g); } |
|
|
1022 | ecb_inline uint64_t ecb_gray_decode (uint64_t g) { return ecb_gray_decode64 (g); } |
|
|
1023 | |
|
|
1024 | #endif |
|
|
1025 | |
|
|
1026 | /*****************************************************************************/ |
|
|
1027 | /* 2d hilbert curves */ |
|
|
1028 | |
|
|
1029 | /* algorithm from the book Hacker's Delight, modified to not */ |
|
|
1030 | /* run into undefined behaviour for n==16 */ |
|
|
1031 | static uint32_t ecb_hilbert2d_index_to_coord32 (int n, uint32_t s); |
|
|
1032 | static uint32_t ecb_hilbert2d_index_to_coord32 (int n, uint32_t s) |
|
|
1033 | { |
|
|
1034 | uint32_t comp, swap, cs, t, sr; |
|
|
1035 | |
|
|
1036 | /* pad s on the left (unused) bits with 01 (no change groups) */ |
|
|
1037 | s |= 0x55555555U << n << n; |
|
|
1038 | /* "s shift right" */ |
|
|
1039 | sr = (s >> 1) & 0x55555555U; |
|
|
1040 | /* compute complement and swap info in two-bit groups */ |
|
|
1041 | cs = ((s & 0x55555555U) + sr) ^ 0x55555555U; |
|
|
1042 | |
|
|
1043 | /* parallel prefix xor op to propagate both complement |
|
|
1044 | * and swap info together from left to right (there is |
|
|
1045 | * no step "cs ^= cs >> 1", so in effect it computes |
|
|
1046 | * two independent parallel prefix operations on two |
|
|
1047 | * interleaved sets of sixteen bits). |
|
|
1048 | */ |
|
|
1049 | cs ^= cs >> 2; |
|
|
1050 | cs ^= cs >> 4; |
|
|
1051 | cs ^= cs >> 8; |
|
|
1052 | cs ^= cs >> 16; |
|
|
1053 | |
|
|
1054 | /* separate swap and complement bits */ |
|
|
1055 | swap = cs & 0x55555555U; |
|
|
1056 | comp = (cs >> 1) & 0x55555555U; |
|
|
1057 | |
|
|
1058 | /* calculate coordinates in odd and even bit positions */ |
|
|
1059 | t = (s & swap) ^ comp; |
|
|
1060 | s = s ^ sr ^ t ^ (t << 1); |
|
|
1061 | |
|
|
1062 | /* unpad/clear out any junk on the left */ |
|
|
1063 | s = s & ((1 << n << n) - 1); |
|
|
1064 | |
|
|
1065 | /* Now "unshuffle" to separate the x and y bits. */ |
|
|
1066 | t = (s ^ (s >> 1)) & 0x22222222U; s ^= t ^ (t << 1); |
|
|
1067 | t = (s ^ (s >> 2)) & 0x0c0c0c0cU; s ^= t ^ (t << 2); |
|
|
1068 | t = (s ^ (s >> 4)) & 0x00f000f0U; s ^= t ^ (t << 4); |
|
|
1069 | t = (s ^ (s >> 8)) & 0x0000ff00U; s ^= t ^ (t << 8); |
|
|
1070 | |
|
|
1071 | /* now s contains two 16-bit coordinates */ |
|
|
1072 | return s; |
|
|
1073 | } |
|
|
1074 | |
|
|
1075 | /* 64 bit, a straightforward extension to the 32 bit case */ |
|
|
1076 | static uint64_t ecb_hilbert2d_index_to_coord64 (int n, uint64_t s); |
|
|
1077 | static uint64_t ecb_hilbert2d_index_to_coord64 (int n, uint64_t s) |
|
|
1078 | { |
|
|
1079 | uint64_t comp, swap, cs, t, sr; |
|
|
1080 | |
|
|
1081 | /* pad s on the left (unused) bits with 01 (no change groups) */ |
|
|
1082 | s |= 0x5555555555555555U << n << n; |
|
|
1083 | /* "s shift right" */ |
|
|
1084 | sr = (s >> 1) & 0x5555555555555555U; |
|
|
1085 | /* compute complement and swap info in two-bit groups */ |
|
|
1086 | cs = ((s & 0x5555555555555555U) + sr) ^ 0x5555555555555555U; |
|
|
1087 | |
|
|
1088 | /* parallel prefix xor op to propagate both complement |
|
|
1089 | * and swap info together from left to right (there is |
|
|
1090 | * no step "cs ^= cs >> 1", so in effect it computes |
|
|
1091 | * two independent parallel prefix operations on two |
|
|
1092 | * interleaved sets of thirty-two bits). |
|
|
1093 | */ |
|
|
1094 | cs ^= cs >> 2; |
|
|
1095 | cs ^= cs >> 4; |
|
|
1096 | cs ^= cs >> 8; |
|
|
1097 | cs ^= cs >> 16; |
|
|
1098 | cs ^= cs >> 32; |
|
|
1099 | |
|
|
1100 | /* separate swap and complement bits */ |
|
|
1101 | swap = cs & 0x5555555555555555U; |
|
|
1102 | comp = (cs >> 1) & 0x5555555555555555U; |
|
|
1103 | |
|
|
1104 | /* calculate coordinates in odd and even bit positions */ |
|
|
1105 | t = (s & swap) ^ comp; |
|
|
1106 | s = s ^ sr ^ t ^ (t << 1); |
|
|
1107 | |
|
|
1108 | /* unpad/clear out any junk on the left */ |
|
|
1109 | s = s & ((1 << n << n) - 1); |
|
|
1110 | |
|
|
1111 | /* Now "unshuffle" to separate the x and y bits. */ |
|
|
1112 | t = (s ^ (s >> 1)) & 0x2222222222222222U; s ^= t ^ (t << 1); |
|
|
1113 | t = (s ^ (s >> 2)) & 0x0c0c0c0c0c0c0c0cU; s ^= t ^ (t << 2); |
|
|
1114 | t = (s ^ (s >> 4)) & 0x00f000f000f000f0U; s ^= t ^ (t << 4); |
|
|
1115 | t = (s ^ (s >> 8)) & 0x0000ff000000ff00U; s ^= t ^ (t << 8); |
|
|
1116 | t = (s ^ (s >> 16)) & 0x00000000ffff0000U; s ^= t ^ (t << 16); |
|
|
1117 | |
|
|
1118 | /* now s contains two 32-bit coordinates */ |
|
|
1119 | return s; |
|
|
1120 | } |
|
|
1121 | |
|
|
1122 | /* algorithm from the book Hacker's Delight, but a similar algorithm*/ |
|
|
1123 | /* is given in https://doi.org/10.1002/spe.4380160103 */ |
|
|
1124 | /* this has been slightly improved over the original version */ |
|
|
1125 | ecb_function_ uint32_t ecb_hilbert2d_coord_to_index32 (int n, uint32_t xy); |
|
|
1126 | ecb_function_ uint32_t ecb_hilbert2d_coord_to_index32 (int n, uint32_t xy) |
|
|
1127 | { |
|
|
1128 | uint32_t row; |
|
|
1129 | uint32_t state = 0; |
|
|
1130 | uint32_t s = 0; |
|
|
1131 | |
|
|
1132 | do |
|
|
1133 | { |
|
|
1134 | --n; |
|
|
1135 | |
|
|
1136 | row = 4 * state |
|
|
1137 | | (2 & (xy >> n >> 15)) |
|
|
1138 | | (1 & (xy >> n )); |
|
|
1139 | |
|
|
1140 | /* these funky constants are lookup tables for two-bit values */ |
|
|
1141 | s = (s << 2) | (0x361e9cb4U >> 2 * row) & 3; |
|
|
1142 | state = (0x8fe65831U >> 2 * row) & 3; |
|
|
1143 | } |
|
|
1144 | while (n > 0); |
|
|
1145 | |
|
|
1146 | return s; |
|
|
1147 | } |
|
|
1148 | |
|
|
1149 | /* 64 bit, essentially the same as 32 bit */ |
|
|
1150 | ecb_function_ uint64_t ecb_hilbert2d_coord_to_index64 (int n, uint64_t xy); |
|
|
1151 | ecb_function_ uint64_t ecb_hilbert2d_coord_to_index64 (int n, uint64_t xy) |
|
|
1152 | { |
|
|
1153 | uint32_t row; |
|
|
1154 | uint32_t state = 0; |
|
|
1155 | uint64_t s = 0; |
|
|
1156 | |
|
|
1157 | do |
|
|
1158 | { |
|
|
1159 | --n; |
|
|
1160 | |
|
|
1161 | row = 4 * state |
|
|
1162 | | (2 & (xy >> n >> 31)) |
|
|
1163 | | (1 & (xy >> n )); |
|
|
1164 | |
|
|
1165 | /* these funky constants are lookup tables for two-bit values */ |
|
|
1166 | s = (s << 2) | (0x361e9cb4U >> 2 * row) & 3; |
|
|
1167 | state = (0x8fe65831U >> 2 * row) & 3; |
|
|
1168 | } |
|
|
1169 | while (n > 0); |
|
|
1170 | |
|
|
1171 | return s; |
|
|
1172 | } |
|
|
1173 | |
|
|
1174 | /*****************************************************************************/ |
|
|
1175 | /* division */ |
|
|
1176 | |
677 | #if ECB_GCC_VERSION(3,0) || ECB_C99 |
1177 | #if ECB_GCC_VERSION(3,0) || ECB_C99 |
|
|
1178 | /* C99 tightened the definition of %, so we can use a more efficient version */ |
678 | #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0)) |
1179 | #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0)) |
679 | #else |
1180 | #else |
680 | #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n))) |
1181 | #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n))) |
681 | #endif |
1182 | #endif |
682 | |
1183 | |
… | |
… | |
693 | } |
1194 | } |
694 | #else |
1195 | #else |
695 | #define ecb_div_rd(val,div) ((val) < 0 ? - ((-(val) + (div) - 1) / (div)) : ((val) ) / (div)) |
1196 | #define ecb_div_rd(val,div) ((val) < 0 ? - ((-(val) + (div) - 1) / (div)) : ((val) ) / (div)) |
696 | #define ecb_div_ru(val,div) ((val) < 0 ? - ((-(val) ) / (div)) : ((val) + (div) - 1) / (div)) |
1197 | #define ecb_div_ru(val,div) ((val) < 0 ? - ((-(val) ) / (div)) : ((val) + (div) - 1) / (div)) |
697 | #endif |
1198 | #endif |
|
|
1199 | |
|
|
1200 | /*****************************************************************************/ |
|
|
1201 | /* array length */ |
698 | |
1202 | |
699 | #if ecb_cplusplus_does_not_suck |
1203 | #if ecb_cplusplus_does_not_suck |
700 | /* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */ |
1204 | /* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */ |
701 | template<typename T, int N> |
1205 | template<typename T, int N> |
702 | static inline int ecb_array_length (const T (&arr)[N]) |
1206 | static inline int ecb_array_length (const T (&arr)[N]) |
… | |
… | |
705 | } |
1209 | } |
706 | #else |
1210 | #else |
707 | #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) |
1211 | #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) |
708 | #endif |
1212 | #endif |
709 | |
1213 | |
|
|
1214 | /*****************************************************************************/ |
|
|
1215 | /* IEEE 754-2008 half float conversions */ |
|
|
1216 | |
710 | ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x); |
1217 | ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x); |
711 | ecb_function_ ecb_const uint32_t |
1218 | ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x) |
712 | ecb_binary16_to_binary32 (uint32_t x) |
|
|
713 | { |
1219 | { |
714 | unsigned int s = (x & 0x8000) << (31 - 15); |
1220 | unsigned int s = (x & 0x8000) << (31 - 15); |
715 | int e = (x >> 10) & 0x001f; |
1221 | int e = (x >> 10) & 0x001f; |
716 | unsigned int m = x & 0x03ff; |
1222 | unsigned int m = x & 0x03ff; |
717 | |
1223 | |
… | |
… | |
738 | |
1244 | |
739 | return s | (e << 23) | (m << (23 - 10)); |
1245 | return s | (e << 23) | (m << (23 - 10)); |
740 | } |
1246 | } |
741 | |
1247 | |
742 | ecb_function_ ecb_const uint16_t ecb_binary32_to_binary16 (uint32_t x); |
1248 | ecb_function_ ecb_const uint16_t ecb_binary32_to_binary16 (uint32_t x); |
743 | ecb_function_ ecb_const uint16_t |
1249 | ecb_function_ ecb_const uint16_t ecb_binary32_to_binary16 (uint32_t x) |
744 | ecb_binary32_to_binary16 (uint32_t x) |
|
|
745 | { |
1250 | { |
746 | unsigned int s = (x >> 16) & 0x00008000; /* sign bit, the easy part */ |
1251 | unsigned int s = (x >> 16) & 0x00008000; /* sign bit, the easy part */ |
747 | unsigned int e = ((x >> 23) & 0x000000ff) - (127 - 15); /* the desired exponent */ |
1252 | int e = ((x >> 23) & 0x000000ff) - (127 - 15); /* the desired exponent */ |
748 | unsigned int m = x & 0x007fffff; |
1253 | unsigned int m = x & 0x007fffff; |
749 | |
1254 | |
750 | x &= 0x7fffffff; |
1255 | x &= 0x7fffffff; |
751 | |
1256 | |
752 | /* if it's within range of binary16 normals, use fast path */ |
1257 | /* if it's within range of binary16 normals, use fast path */ |
… | |
… | |
799 | |
1304 | |
800 | /* handle NaNs, preserve leftmost nan bits, but make sure we don't turn them into infinities */ |
1305 | /* handle NaNs, preserve leftmost nan bits, but make sure we don't turn them into infinities */ |
801 | m >>= 13; |
1306 | m >>= 13; |
802 | |
1307 | |
803 | return s | 0x7c00 | m | !m; |
1308 | return s | 0x7c00 | m | !m; |
|
|
1309 | } |
|
|
1310 | |
|
|
1311 | /*******************************************************************************/ |
|
|
1312 | /* fast integer to ascii */ |
|
|
1313 | |
|
|
1314 | /* |
|
|
1315 | * This code is pretty complicated because it is general. The idea behind it, |
|
|
1316 | * however, is pretty simple: first, the number is multiplied with a scaling |
|
|
1317 | * factor (2**bits / 10**(digits-1)) to convert the integer into a fixed-point |
|
|
1318 | * number with the first digit in the upper bits. |
|
|
1319 | * Then this digit is converted to text and masked out. The resulting number |
|
|
1320 | * is then multiplied by 10, by multiplying the fixed point representation |
|
|
1321 | * by 5 and shifting the (binary) decimal point one to the right, so a 4.28 |
|
|
1322 | * format becomes 5.27, 6.26 and so on. |
|
|
1323 | * The rest involves only advancing the pointer if we already generated a |
|
|
1324 | * non-zero digit, so leading zeroes are overwritten. |
|
|
1325 | */ |
|
|
1326 | |
|
|
1327 | /* simply return a mask with "bits" bits set */ |
|
|
1328 | #define ecb_i2a_mask(type,bits) ((((type)1) << (bits)) - 1) |
|
|
1329 | |
|
|
1330 | /* oputput a single digit. maskvalue is 10**digitidx */ |
|
|
1331 | #define ecb_i2a_digit(type,bits,digitmask,maskvalue,digitidx) \ |
|
|
1332 | if (digitmask >= maskvalue) /* constant, used to decide how many digits to generate */ \ |
|
|
1333 | { \ |
|
|
1334 | char digit = x >> (bits - digitidx); /* calculate the topmost digit */ \ |
|
|
1335 | *ptr = digit + '0'; /* output it */ \ |
|
|
1336 | nz = (digitmask == maskvalue) || nz || digit; /* first term == always output last digit */ \ |
|
|
1337 | ptr += nz; /* output digit only if non-zero digit seen */ \ |
|
|
1338 | x = (x & ecb_i2a_mask (type, bits - digitidx)) * 5; /* *10, but shift decimal point right */ \ |
|
|
1339 | } |
|
|
1340 | |
|
|
1341 | /* convert integer to fixed point format and multiply out digits, highest first */ |
|
|
1342 | /* requires magic constants: max. digits and number of bits after the decimal point */ |
|
|
1343 | #define ecb_i2a_def(suffix,ptr,v,type,bits,digitmask,lz) \ |
|
|
1344 | ecb_inline char *ecb_i2a_ ## suffix (char *ptr, uint32_t u) \ |
|
|
1345 | { \ |
|
|
1346 | char nz = lz; /* non-zero digit seen? */ \ |
|
|
1347 | /* convert to x.bits fixed-point */ \ |
|
|
1348 | type x = u * ((ecb_i2a_mask (type, bits) + digitmask) / digitmask); \ |
|
|
1349 | /* output up to 10 digits */ \ |
|
|
1350 | ecb_i2a_digit (type,bits,digitmask, 1, 0); \ |
|
|
1351 | ecb_i2a_digit (type,bits,digitmask, 10, 1); \ |
|
|
1352 | ecb_i2a_digit (type,bits,digitmask, 100, 2); \ |
|
|
1353 | ecb_i2a_digit (type,bits,digitmask, 1000, 3); \ |
|
|
1354 | ecb_i2a_digit (type,bits,digitmask, 10000, 4); \ |
|
|
1355 | ecb_i2a_digit (type,bits,digitmask, 100000, 5); \ |
|
|
1356 | ecb_i2a_digit (type,bits,digitmask, 1000000, 6); \ |
|
|
1357 | ecb_i2a_digit (type,bits,digitmask, 10000000, 7); \ |
|
|
1358 | ecb_i2a_digit (type,bits,digitmask, 100000000, 8); \ |
|
|
1359 | ecb_i2a_digit (type,bits,digitmask, 1000000000, 9); \ |
|
|
1360 | return ptr; \ |
|
|
1361 | } |
|
|
1362 | |
|
|
1363 | /* predefined versions of the above, for various digits */ |
|
|
1364 | /* ecb_i2a_xN = almost N digits, limit defined by macro */ |
|
|
1365 | /* ecb_i2a_N = up to N digits, leading zeroes suppressed */ |
|
|
1366 | /* ecb_i2a_0N = exactly N digits, including leading zeroes */ |
|
|
1367 | |
|
|
1368 | /* non-leading-zero versions, limited range */ |
|
|
1369 | #define ECB_I2A_MAX_X5 59074 /* limit for ecb_i2a_x5 */ |
|
|
1370 | #define ECB_I2A_MAX_X10 2932500665 /* limit for ecb_i2a_x10 */ |
|
|
1371 | ecb_i2a_def ( x5, ptr, v, uint32_t, 26, 10000, 0) |
|
|
1372 | ecb_i2a_def (x10, ptr, v, uint64_t, 60, 1000000000, 0) |
|
|
1373 | |
|
|
1374 | /* non-leading zero versions, all digits, 4 and 9 are optimal for 32/64 bit */ |
|
|
1375 | ecb_i2a_def ( 2, ptr, v, uint32_t, 10, 10, 0) |
|
|
1376 | ecb_i2a_def ( 3, ptr, v, uint32_t, 12, 100, 0) |
|
|
1377 | ecb_i2a_def ( 4, ptr, v, uint32_t, 26, 1000, 0) |
|
|
1378 | ecb_i2a_def ( 5, ptr, v, uint64_t, 30, 10000, 0) |
|
|
1379 | ecb_i2a_def ( 6, ptr, v, uint64_t, 36, 100000, 0) |
|
|
1380 | ecb_i2a_def ( 7, ptr, v, uint64_t, 44, 1000000, 0) |
|
|
1381 | ecb_i2a_def ( 8, ptr, v, uint64_t, 50, 10000000, 0) |
|
|
1382 | ecb_i2a_def ( 9, ptr, v, uint64_t, 56, 100000000, 0) |
|
|
1383 | |
|
|
1384 | /* leading-zero versions, all digits, 04 and 09 are optimal for 32/64 bit */ |
|
|
1385 | ecb_i2a_def (02, ptr, v, uint32_t, 10, 10, 1) |
|
|
1386 | ecb_i2a_def (03, ptr, v, uint32_t, 12, 100, 1) |
|
|
1387 | ecb_i2a_def (04, ptr, v, uint32_t, 26, 1000, 1) |
|
|
1388 | ecb_i2a_def (05, ptr, v, uint64_t, 30, 10000, 1) |
|
|
1389 | ecb_i2a_def (06, ptr, v, uint64_t, 36, 100000, 1) |
|
|
1390 | ecb_i2a_def (07, ptr, v, uint64_t, 44, 1000000, 1) |
|
|
1391 | ecb_i2a_def (08, ptr, v, uint64_t, 50, 10000000, 1) |
|
|
1392 | ecb_i2a_def (09, ptr, v, uint64_t, 56, 100000000, 1) |
|
|
1393 | |
|
|
1394 | #define ECB_I2A_I32_DIGITS 11 |
|
|
1395 | #define ECB_I2A_U32_DIGITS 10 |
|
|
1396 | #define ECB_I2A_I64_DIGITS 20 |
|
|
1397 | #define ECB_I2A_U64_DIGITS 21 |
|
|
1398 | #define ECB_I2A_MAX_DIGITS 21 |
|
|
1399 | |
|
|
1400 | ecb_function_ char * ecb_i2a_u32 (char *ptr, uint32_t u); |
|
|
1401 | ecb_function_ char * ecb_i2a_u32 (char *ptr, uint32_t u) |
|
|
1402 | { |
|
|
1403 | #if ECB_64BIT_NATIVE |
|
|
1404 | if (ecb_expect_true (u <= ECB_I2A_MAX_X10)) |
|
|
1405 | ptr = ecb_i2a_x10 (ptr, u); |
|
|
1406 | else /* x10 almost, but not fully, covers 32 bit */ |
|
|
1407 | { |
|
|
1408 | uint32_t u1 = u % 1000000000; |
|
|
1409 | uint32_t u2 = u / 1000000000; |
|
|
1410 | |
|
|
1411 | *ptr++ = u2 + '0'; |
|
|
1412 | ptr = ecb_i2a_09 (ptr, u1); |
|
|
1413 | } |
|
|
1414 | #else |
|
|
1415 | if (ecb_expect_true (u <= ECB_I2A_MAX_X5)) |
|
|
1416 | ecb_i2a_x5 (ptr, u); |
|
|
1417 | else if (ecb_expect_true (u <= ECB_I2A_MAX_X5 * 10000)) |
|
|
1418 | { |
|
|
1419 | uint32_t u1 = u % 10000; |
|
|
1420 | uint32_t u2 = u / 10000; |
|
|
1421 | |
|
|
1422 | ptr = ecb_i2a_x5 (ptr, u2); |
|
|
1423 | ptr = ecb_i2a_04 (ptr, u1); |
|
|
1424 | } |
|
|
1425 | else |
|
|
1426 | { |
|
|
1427 | uint32_t u1 = u % 10000; |
|
|
1428 | uint32_t ua = u / 10000; |
|
|
1429 | uint32_t u2 = ua % 10000; |
|
|
1430 | uint32_t u3 = ua / 10000; |
|
|
1431 | |
|
|
1432 | ptr = ecb_i2a_2 (ptr, u3); |
|
|
1433 | ptr = ecb_i2a_04 (ptr, u2); |
|
|
1434 | ptr = ecb_i2a_04 (ptr, u1); |
|
|
1435 | } |
|
|
1436 | #endif |
|
|
1437 | |
|
|
1438 | return ptr; |
|
|
1439 | } |
|
|
1440 | |
|
|
1441 | ecb_function_ char * ecb_i2a_i32 (char *ptr, int32_t v); |
|
|
1442 | ecb_function_ char * ecb_i2a_i32 (char *ptr, int32_t v) |
|
|
1443 | { |
|
|
1444 | *ptr = '-'; ptr += v < 0; |
|
|
1445 | uint32_t u = v < 0 ? -(uint32_t)v : v; |
|
|
1446 | |
|
|
1447 | #if ECB_64BIT_NATIVE |
|
|
1448 | ptr = ecb_i2a_x10 (ptr, u); /* x10 fully covers 31 bit */ |
|
|
1449 | #else |
|
|
1450 | ptr = ecb_i2a_u32 (ptr, u); |
|
|
1451 | #endif |
|
|
1452 | |
|
|
1453 | return ptr; |
|
|
1454 | } |
|
|
1455 | |
|
|
1456 | ecb_function_ char * ecb_i2a_u64 (char *ptr, uint64_t u); |
|
|
1457 | ecb_function_ char * ecb_i2a_u64 (char *ptr, uint64_t u) |
|
|
1458 | { |
|
|
1459 | #if ECB_64BIT_NATIVE |
|
|
1460 | if (ecb_expect_true (u <= ECB_I2A_MAX_X10)) |
|
|
1461 | ptr = ecb_i2a_x10 (ptr, u); |
|
|
1462 | else if (ecb_expect_false (u <= ECB_I2A_MAX_X10 * 1000000000)) |
|
|
1463 | { |
|
|
1464 | uint64_t u1 = u % 1000000000; |
|
|
1465 | uint64_t u2 = u / 1000000000; |
|
|
1466 | |
|
|
1467 | ptr = ecb_i2a_x10 (ptr, u2); |
|
|
1468 | ptr = ecb_i2a_09 (ptr, u1); |
|
|
1469 | } |
|
|
1470 | else |
|
|
1471 | { |
|
|
1472 | uint64_t u1 = u % 1000000000; |
|
|
1473 | uint64_t ua = u / 1000000000; |
|
|
1474 | uint64_t u2 = ua % 1000000000; |
|
|
1475 | uint64_t u3 = ua / 1000000000; |
|
|
1476 | |
|
|
1477 | ptr = ecb_i2a_2 (ptr, u3); |
|
|
1478 | ptr = ecb_i2a_09 (ptr, u2); |
|
|
1479 | ptr = ecb_i2a_09 (ptr, u1); |
|
|
1480 | } |
|
|
1481 | #else |
|
|
1482 | if (ecb_expect_true (u <= ECB_I2A_MAX_X5)) |
|
|
1483 | ptr = ecb_i2a_x5 (ptr, u); |
|
|
1484 | else |
|
|
1485 | { |
|
|
1486 | uint64_t u1 = u % 10000; |
|
|
1487 | uint64_t u2 = u / 10000; |
|
|
1488 | |
|
|
1489 | ptr = ecb_i2a_u64 (ptr, u2); |
|
|
1490 | ptr = ecb_i2a_04 (ptr, u1); |
|
|
1491 | } |
|
|
1492 | #endif |
|
|
1493 | |
|
|
1494 | return ptr; |
|
|
1495 | } |
|
|
1496 | |
|
|
1497 | ecb_function_ char * ecb_i2a_i64 (char *ptr, int64_t v); |
|
|
1498 | ecb_function_ char * ecb_i2a_i64 (char *ptr, int64_t v) |
|
|
1499 | { |
|
|
1500 | *ptr = '-'; ptr += v < 0; |
|
|
1501 | uint64_t u = v < 0 ? -(uint64_t)v : v; |
|
|
1502 | |
|
|
1503 | #if ECB_64BIT_NATIVE |
|
|
1504 | if (ecb_expect_true (u <= ECB_I2A_MAX_X10)) |
|
|
1505 | ptr = ecb_i2a_x10 (ptr, u); |
|
|
1506 | else if (ecb_expect_false (u <= ECB_I2A_MAX_X10 * 1000000000)) |
|
|
1507 | { |
|
|
1508 | uint64_t u1 = u % 1000000000; |
|
|
1509 | uint64_t u2 = u / 1000000000; |
|
|
1510 | |
|
|
1511 | ptr = ecb_i2a_x10 (ptr, u2); |
|
|
1512 | ptr = ecb_i2a_09 (ptr, u1); |
|
|
1513 | } |
|
|
1514 | else |
|
|
1515 | { |
|
|
1516 | uint64_t u1 = u % 1000000000; |
|
|
1517 | uint64_t ua = u / 1000000000; |
|
|
1518 | uint64_t u2 = ua % 1000000000; |
|
|
1519 | uint64_t u3 = ua / 1000000000; |
|
|
1520 | |
|
|
1521 | /* 2**31 is 19 digits, so the top is exactly one digit */ |
|
|
1522 | *ptr++ = u3 + '0'; |
|
|
1523 | ptr = ecb_i2a_09 (ptr, u2); |
|
|
1524 | ptr = ecb_i2a_09 (ptr, u1); |
|
|
1525 | } |
|
|
1526 | #else |
|
|
1527 | ptr = ecb_i2a_u64 (ptr, u); |
|
|
1528 | #endif |
|
|
1529 | |
|
|
1530 | return ptr; |
804 | } |
1531 | } |
805 | |
1532 | |
806 | /*******************************************************************************/ |
1533 | /*******************************************************************************/ |
807 | /* floating point stuff, can be disabled by defining ECB_NO_LIBM */ |
1534 | /* floating point stuff, can be disabled by defining ECB_NO_LIBM */ |
808 | |
1535 | |
… | |
… | |
822 | || defined __sh__ \ |
1549 | || defined __sh__ \ |
823 | || defined _M_IX86 || defined ECB_MSVC_AMD64 || defined _M_IA64 \ |
1550 | || defined _M_IX86 || defined ECB_MSVC_AMD64 || defined _M_IA64 \ |
824 | || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \ |
1551 | || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \ |
825 | || defined __aarch64__ |
1552 | || defined __aarch64__ |
826 | #define ECB_STDFP 1 |
1553 | #define ECB_STDFP 1 |
827 | #include <string.h> /* for memcpy */ |
|
|
828 | #else |
1554 | #else |
829 | #define ECB_STDFP 0 |
1555 | #define ECB_STDFP 0 |
830 | #endif |
1556 | #endif |
831 | |
1557 | |
832 | #ifndef ECB_NO_LIBM |
1558 | #ifndef ECB_NO_LIBM |
… | |
… | |
854 | #define ecb_frexpf(x,e) (float) frexp ((double) (x), (e)) |
1580 | #define ecb_frexpf(x,e) (float) frexp ((double) (x), (e)) |
855 | #endif |
1581 | #endif |
856 | |
1582 | |
857 | /* convert a float to ieee single/binary32 */ |
1583 | /* convert a float to ieee single/binary32 */ |
858 | ecb_function_ ecb_const uint32_t ecb_float_to_binary32 (float x); |
1584 | ecb_function_ ecb_const uint32_t ecb_float_to_binary32 (float x); |
859 | ecb_function_ ecb_const uint32_t |
1585 | ecb_function_ ecb_const uint32_t ecb_float_to_binary32 (float x) |
860 | ecb_float_to_binary32 (float x) |
|
|
861 | { |
1586 | { |
862 | uint32_t r; |
1587 | uint32_t r; |
863 | |
1588 | |
864 | #if ECB_STDFP |
1589 | #if ECB_STDFP |
865 | memcpy (&r, &x, 4); |
1590 | memcpy (&r, &x, 4); |
… | |
… | |
894 | return r; |
1619 | return r; |
895 | } |
1620 | } |
896 | |
1621 | |
897 | /* converts an ieee single/binary32 to a float */ |
1622 | /* converts an ieee single/binary32 to a float */ |
898 | ecb_function_ ecb_const float ecb_binary32_to_float (uint32_t x); |
1623 | ecb_function_ ecb_const float ecb_binary32_to_float (uint32_t x); |
899 | ecb_function_ ecb_const float |
1624 | ecb_function_ ecb_const float ecb_binary32_to_float (uint32_t x) |
900 | ecb_binary32_to_float (uint32_t x) |
|
|
901 | { |
1625 | { |
902 | float r; |
1626 | float r; |
903 | |
1627 | |
904 | #if ECB_STDFP |
1628 | #if ECB_STDFP |
905 | memcpy (&r, &x, 4); |
1629 | memcpy (&r, &x, 4); |
… | |
… | |
924 | return r; |
1648 | return r; |
925 | } |
1649 | } |
926 | |
1650 | |
927 | /* convert a double to ieee double/binary64 */ |
1651 | /* convert a double to ieee double/binary64 */ |
928 | ecb_function_ ecb_const uint64_t ecb_double_to_binary64 (double x); |
1652 | ecb_function_ ecb_const uint64_t ecb_double_to_binary64 (double x); |
929 | ecb_function_ ecb_const uint64_t |
1653 | ecb_function_ ecb_const uint64_t ecb_double_to_binary64 (double x) |
930 | ecb_double_to_binary64 (double x) |
|
|
931 | { |
1654 | { |
932 | uint64_t r; |
1655 | uint64_t r; |
933 | |
1656 | |
934 | #if ECB_STDFP |
1657 | #if ECB_STDFP |
935 | memcpy (&r, &x, 8); |
1658 | memcpy (&r, &x, 8); |
… | |
… | |
964 | return r; |
1687 | return r; |
965 | } |
1688 | } |
966 | |
1689 | |
967 | /* converts an ieee double/binary64 to a double */ |
1690 | /* converts an ieee double/binary64 to a double */ |
968 | ecb_function_ ecb_const double ecb_binary64_to_double (uint64_t x); |
1691 | ecb_function_ ecb_const double ecb_binary64_to_double (uint64_t x); |
969 | ecb_function_ ecb_const double |
1692 | ecb_function_ ecb_const double ecb_binary64_to_double (uint64_t x) |
970 | ecb_binary64_to_double (uint64_t x) |
|
|
971 | { |
1693 | { |
972 | double r; |
1694 | double r; |
973 | |
1695 | |
974 | #if ECB_STDFP |
1696 | #if ECB_STDFP |
975 | memcpy (&r, &x, 8); |
1697 | memcpy (&r, &x, 8); |
… | |
… | |
994 | return r; |
1716 | return r; |
995 | } |
1717 | } |
996 | |
1718 | |
997 | /* convert a float to ieee half/binary16 */ |
1719 | /* convert a float to ieee half/binary16 */ |
998 | ecb_function_ ecb_const uint16_t ecb_float_to_binary16 (float x); |
1720 | ecb_function_ ecb_const uint16_t ecb_float_to_binary16 (float x); |
999 | ecb_function_ ecb_const uint16_t |
1721 | ecb_function_ ecb_const uint16_t ecb_float_to_binary16 (float x) |
1000 | ecb_float_to_binary16 (float x) |
|
|
1001 | { |
1722 | { |
1002 | return ecb_binary32_to_binary16 (ecb_float_to_binary32 (x)); |
1723 | return ecb_binary32_to_binary16 (ecb_float_to_binary32 (x)); |
1003 | } |
1724 | } |
1004 | |
1725 | |
1005 | /* convert an ieee half/binary16 to float */ |
1726 | /* convert an ieee half/binary16 to float */ |
1006 | ecb_function_ ecb_const float ecb_binary16_to_float (uint16_t x); |
1727 | ecb_function_ ecb_const float ecb_binary16_to_float (uint16_t x); |
1007 | ecb_function_ ecb_const float |
1728 | ecb_function_ ecb_const float ecb_binary16_to_float (uint16_t x) |
1008 | ecb_binary16_to_float (uint16_t x) |
|
|
1009 | { |
1729 | { |
1010 | return ecb_binary32_to_float (ecb_binary16_to_binary32 (x)); |
1730 | return ecb_binary32_to_float (ecb_binary16_to_binary32 (x)); |
1011 | } |
1731 | } |
1012 | |
1732 | |
1013 | #endif |
1733 | #endif |