ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libecb/ecb.h
(Generate patch)

Comparing libecb/ecb.h (file contents):
Revision 1.197 by root, Sat Jul 31 14:39:16 2021 UTC vs.
Revision 1.212 by root, Fri Mar 25 15:31:22 2022 UTC

40 40
41#ifndef ECB_H 41#ifndef ECB_H
42#define ECB_H 42#define ECB_H
43 43
44/* 16 bits major, 16 bits minor */ 44/* 16 bits major, 16 bits minor */
45#define ECB_VERSION 0x00010009 45#define ECB_VERSION 0x0001000c
46 46
47#include <string.h> /* for memcpy */ 47#include <string.h> /* for memcpy */
48 48
49#if defined (_WIN32) && !defined (__MINGW32__) 49#if defined (_WIN32) && !defined (__MINGW32__)
50 typedef signed char int8_t; 50 typedef signed char int8_t;
355#define ECB_CONCAT(a, b) ECB_CONCAT_(a, b) 355#define ECB_CONCAT(a, b) ECB_CONCAT_(a, b)
356#define ECB_STRINGIFY_(a) # a 356#define ECB_STRINGIFY_(a) # a
357#define ECB_STRINGIFY(a) ECB_STRINGIFY_(a) 357#define ECB_STRINGIFY(a) ECB_STRINGIFY_(a)
358#define ECB_STRINGIFY_EXPR(expr) ((expr), ECB_STRINGIFY_ (expr)) 358#define ECB_STRINGIFY_EXPR(expr) ((expr), ECB_STRINGIFY_ (expr))
359 359
360/* This marks larger functions that do not neccessarily need to be inlined */
361/* The idea is to possibly compile the header twice, */
362/* once exposing only the declarations, another time to define external functions */
363/* TODO: possibly static would be best for these at the moment? */
360#define ecb_function_ ecb_inline 364#define ecb_function_ ecb_inline
361 365
362#if ECB_GCC_VERSION(3,1) || ECB_CLANG_VERSION(2,8) 366#if ECB_GCC_VERSION(3,1) || ECB_CLANG_VERSION(2,8)
363 #define ecb_attribute(attrlist) __attribute__ (attrlist) 367 #define ecb_attribute(attrlist) __attribute__ (attrlist)
364#else 368#else
454/* count trailing zero bits and count # of one bits */ 458/* count trailing zero bits and count # of one bits */
455#if ECB_GCC_VERSION(3,4) \ 459#if ECB_GCC_VERSION(3,4) \
456 || (ECB_CLANG_BUILTIN(__builtin_clz) && ECB_CLANG_BUILTIN(__builtin_clzll) \ 460 || (ECB_CLANG_BUILTIN(__builtin_clz) && ECB_CLANG_BUILTIN(__builtin_clzll) \
457 && ECB_CLANG_BUILTIN(__builtin_ctz) && ECB_CLANG_BUILTIN(__builtin_ctzll) \ 461 && ECB_CLANG_BUILTIN(__builtin_ctz) && ECB_CLANG_BUILTIN(__builtin_ctzll) \
458 && ECB_CLANG_BUILTIN(__builtin_popcount)) 462 && ECB_CLANG_BUILTIN(__builtin_popcount))
459 /* we assume int == 32 bit, long == 32 or 64 bit and long long == 64 bit */
460 #define ecb_ld32(x) (__builtin_clz (x) ^ 31)
461 #define ecb_ld64(x) (__builtin_clzll (x) ^ 63)
462 #define ecb_ctz32(x) __builtin_ctz (x) 463 #define ecb_ctz32(x) __builtin_ctz (x)
464 #define ecb_ctz64(x) (__SIZEOF_LONG__ == 64 ? __builtin_ctzl (x) : __builtin_ctzll (x))
463 #define ecb_ctz64(x) __builtin_ctzll (x) 465 #define ecb_clz32(x) __builtin_clz (x)
466 #define ecb_clz64(x) (__SIZEOF_LONG__ == 64 ? __builtin_clzl (x) : __builtin_clzll (x))
467 #define ecb_ld32(x) (ecb_clz32 (x) ^ 31)
468 #define ecb_ld64(x) (ecb_clz64 (x) ^ 63)
464 #define ecb_popcount32(x) __builtin_popcount (x) 469 #define ecb_popcount32(x) __builtin_popcount (x)
465 /* no popcountll */ 470 /* ecb_popcount64 is more difficult, see below */
466#else 471#else
467 ecb_function_ ecb_const int ecb_ctz32 (uint32_t x); 472 ecb_function_ ecb_const int ecb_ctz32 (uint32_t x);
468 ecb_function_ ecb_const int 473 ecb_function_ ecb_const int
469 ecb_ctz32 (uint32_t x) 474 ecb_ctz32 (uint32_t x)
470 { 475 {
471#if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM) 476#if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM)
472 unsigned long r; 477 unsigned long r;
473 _BitScanForward (&r, x); 478 _BitScanForward (&r, x);
474 return (int)r; 479 return (int)r;
475#else 480#else
476 int r = 0; 481 int r;
477 482
478 x &= ~x + 1; /* this isolates the lowest bit */ 483 x &= ~x + 1; /* this isolates the lowest bit */
479 484
480#if ECB_branchless_on_i386 485 #if 1
486 /* David Seal's algorithm, Message-ID: <32975@armltd.uucp> from 1994 */
487 /* This happens to return 32 for x == 0, but the API does not support this */
488
489 /* -0 marks unused entries */
490 static unsigned char table[64] =
491 {
492 32, 0, 1, 12, 2, 6, -0, 13, 3, -0, 7, -0, -0, -0, -0, 14,
493 10, 4, -0, -0, 8, -0, -0, 25, -0, -0, -0, -0, -0, 21, 27, 15,
494 31, 11, 5, -0, -0, -0, -0, -0, 9, -0, -0, 24, -0, -0, 20, 26,
495 30, -0, -0, -0, -0, 23, -0, 19, 29, -0, 22, 18, 28, 17, 16, -0
496 };
497
498 /* magic constant results in 33 unique values in the upper 6 bits */
499 x *= 0x0450fbafU; /* == 17 * 65 * 65535 */
500
501 r = table [x >> 26];
502 #elif 0 /* branchless on i386, typically */
503 r = 0;
481 r += !!(x & 0xaaaaaaaa) << 0; 504 r += !!(x & 0xaaaaaaaa) << 0;
482 r += !!(x & 0xcccccccc) << 1; 505 r += !!(x & 0xcccccccc) << 1;
483 r += !!(x & 0xf0f0f0f0) << 2; 506 r += !!(x & 0xf0f0f0f0) << 2;
484 r += !!(x & 0xff00ff00) << 3; 507 r += !!(x & 0xff00ff00) << 3;
485 r += !!(x & 0xffff0000) << 4; 508 r += !!(x & 0xffff0000) << 4;
486#else 509 #else /* branchless on modern compilers, typically */
510 r = 0;
487 if (x & 0xaaaaaaaa) r += 1; 511 if (x & 0xaaaaaaaa) r += 1;
488 if (x & 0xcccccccc) r += 2; 512 if (x & 0xcccccccc) r += 2;
489 if (x & 0xf0f0f0f0) r += 4; 513 if (x & 0xf0f0f0f0) r += 4;
490 if (x & 0xff00ff00) r += 8; 514 if (x & 0xff00ff00) r += 8;
491 if (x & 0xffff0000) r += 16; 515 if (x & 0xffff0000) r += 16;
507 int shift = x & 0xffffffff ? 0 : 32; 531 int shift = x & 0xffffffff ? 0 : 32;
508 return ecb_ctz32 (x >> shift) + shift; 532 return ecb_ctz32 (x >> shift) + shift;
509#endif 533#endif
510 } 534 }
511 535
536 ecb_function_ ecb_const int ecb_clz32 (uint32_t x);
537 ecb_function_ ecb_const int
538 ecb_clz32 (uint32_t x)
539 {
540#if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM)
541 unsigned long r;
542 _BitScanReverse (&r, x);
543 return (int)r;
544#else
545
546 /* Robert Harley's algorithm from comp.arch 1996-12-07 */
547 /* This happens to return 32 for x == 0, but the API does not support this */
548
549 /* -0 marks unused table elements */
550 static unsigned char table[64] =
551 {
552 32, 31, -0, 16, -0, 30, 3, -0, 15, -0, -0, -0, 29, 10, 2, -0,
553 -0, -0, 12, 14, 21, -0, 19, -0, -0, 28, -0, 25, -0, 9, 1, -0,
554 17, -0, 4, -0, -0, -0, 11, -0, 13, 22, 20, -0, 26, -0, -0, 18,
555 5, -0, -0, 23, -0, 27, -0, 6, -0, 24, 7, -0, 8, -0, 0, -0
556 };
557
558 /* propagate leftmost 1 bit to the right */
559 x |= x >> 1;
560 x |= x >> 2;
561 x |= x >> 4;
562 x |= x >> 8;
563 x |= x >> 16;
564
565 /* magic constant results in 33 unique values in the upper 6 bits */
566 x *= 0x06EB14F9U; /* == 7 * 255 * 255 * 255 */
567
568 return table [x >> 26];
569#endif
570 }
571
572 ecb_function_ ecb_const int ecb_clz64 (uint64_t x);
573 ecb_function_ ecb_const int
574 ecb_clz64 (uint64_t x)
575 {
576#if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM)
577 unsigned long r;
578 _BitScanReverse64 (&r, x);
579 return (int)r;
580#else
581 uint32_t l = x >> 32;
582 int shift = l ? 0 : 32;
583 return ecb_clz32 (l ? l : x) + shift;
584#endif
585 }
586
512 ecb_function_ ecb_const int ecb_popcount32 (uint32_t x); 587 ecb_function_ ecb_const int ecb_popcount32 (uint32_t x);
513 ecb_function_ ecb_const int 588 ecb_function_ ecb_const int
514 ecb_popcount32 (uint32_t x) 589 ecb_popcount32 (uint32_t x)
515 { 590 {
516 x -= (x >> 1) & 0x55555555; 591 x -= (x >> 1) & 0x55555555;
591 x = ( x >> 16 ) | ( x << 16); 666 x = ( x >> 16 ) | ( x << 16);
592 667
593 return x; 668 return x;
594} 669}
595 670
596/* popcount64 is only available on 64 bit cpus as gcc builtin */
597/* so for this version we are lazy */
598ecb_function_ ecb_const int ecb_popcount64 (uint64_t x); 671ecb_function_ ecb_const int ecb_popcount64 (uint64_t x);
599ecb_function_ ecb_const int 672ecb_function_ ecb_const int
600ecb_popcount64 (uint64_t x) 673ecb_popcount64 (uint64_t x)
601{ 674{
675 /* popcount64 is only available on 64 bit cpus as gcc builtin. */
676 /* also, gcc/clang make this surprisingly difficult to use */
677#if (__SIZEOF_LONG__ == 8) && (ECB_GCC_VERSION(3,4) || ECB_CLANG_BUILTIN (__builtin_popcountl))
678 return __builtin_popcountl (x);
679#else
602 return ecb_popcount32 (x) + ecb_popcount32 (x >> 32); 680 return ecb_popcount32 (x) + ecb_popcount32 (x >> 32);
681#endif
603} 682}
604 683
605ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count); 684ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count);
606ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count); 685ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count);
607ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count); 686ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count);
609ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count); 688ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count);
610ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count); 689ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count);
611ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count); 690ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count);
612ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count); 691ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count);
613 692
614ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> ( 8 - count)) | (x << count); } 693ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> (-count & 7)) | (x << (count & 7)); }
615ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << ( 8 - count)) | (x >> count); } 694ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << (-count & 7)) | (x >> (count & 7)); }
616ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (16 - count)) | (x << count); } 695ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (-count & 15)) | (x << (count & 15)); }
617ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (16 - count)) | (x >> count); } 696ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (-count & 15)) | (x >> (count & 15)); }
618ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); } 697ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (-count & 31)) | (x << (count & 31)); }
619ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); } 698ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (-count & 31)) | (x >> (count & 31)); }
620ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); } 699ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (-count & 63)) | (x << (count & 63)); }
621ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); } 700ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (-count & 63)) | (x >> (count & 63)); }
622 701
623#if ECB_CPP 702#if ECB_CPP
624 703
625inline uint8_t ecb_ctz (uint8_t v) { return ecb_ctz32 (v); } 704inline uint8_t ecb_ctz (uint8_t v) { return ecb_ctz32 (v); }
626inline uint16_t ecb_ctz (uint16_t v) { return ecb_ctz32 (v); } 705inline uint16_t ecb_ctz (uint16_t v) { return ecb_ctz32 (v); }
807template<typename T> inline void ecb_poke_le_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_le (v)); } 886template<typename T> inline void ecb_poke_le_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_le (v)); }
808 887
809#endif 888#endif
810 889
811/*****************************************************************************/ 890/*****************************************************************************/
891/* pointer/integer hashing */
892
893/* based on hash by Chris Wellons, https://nullprogram.com/blog/2018/07/31/ */
894ecb_function_ uint32_t ecb_mix32 (uint32_t v);
895ecb_function_ uint32_t ecb_mix32 (uint32_t v)
896{
897 v ^= v >> 16; v *= 0x7feb352dU;
898 v ^= v >> 15; v *= 0x846ca68bU;
899 v ^= v >> 16;
900 return v;
901}
902
903ecb_function_ uint32_t ecb_unmix32 (uint32_t v);
904ecb_function_ uint32_t ecb_unmix32 (uint32_t v)
905{
906 v ^= v >> 16 ; v *= 0x43021123U;
907 v ^= v >> 15 ^ v >> 30; v *= 0x1d69e2a5U;
908 v ^= v >> 16 ;
909 return v;
910}
911
912/* based on splitmix64, by Sebastiona Vigna, https://prng.di.unimi.it/splitmix64.c */
913ecb_function_ uint64_t ecb_mix64 (uint64_t v);
914ecb_function_ uint64_t ecb_mix64 (uint64_t v)
915{
916 v ^= v >> 30; v *= 0xbf58476d1ce4e5b9U;
917 v ^= v >> 27; v *= 0x94d049bb133111ebU;
918 v ^= v >> 31;
919 return v;
920}
921
922ecb_function_ uint64_t ecb_unmix64 (uint64_t v);
923ecb_function_ uint64_t ecb_unmix64 (uint64_t v)
924{
925 v ^= v >> 31 ^ v >> 62; v *= 0x319642b2d24d8ec3U;
926 v ^= v >> 27 ^ v >> 54; v *= 0x96de1b173f119089U;
927 v ^= v >> 30 ^ v >> 60;
928 return v;
929}
930
931ecb_function_ uintptr_t ecb_ptrmix (void *p);
932ecb_function_ uintptr_t ecb_ptrmix (void *p)
933{
934 #if ECB_PTRSIZE <= 4
935 return ecb_mix32 ((uint32_t)p);
936 #else
937 return ecb_mix64 ((uint64_t)p);
938 #endif
939}
940
941ecb_function_ void *ecb_ptrunmix (uintptr_t v);
942ecb_function_ void *ecb_ptrunmix (uintptr_t v)
943{
944 #if ECB_PTRSIZE <= 4
945 return (void *)ecb_unmix32 (v);
946 #else
947 return (void *)ecb_unmix64 (v);
948 #endif
949}
950
951#if ECB_CPP
952
953template<typename T>
954inline uintptr_t ecb_ptrmix (T *p)
955{
956 return ecb_ptrmix (static_cast<void *>(p));
957}
958
959template<typename T>
960inline T *ecb_ptrunmix (uintptr_t v)
961{
962 return static_cast<T *>(ecb_ptrunmix (v));
963}
964
965#endif
966
967/*****************************************************************************/
968/* gray code */
969
970ecb_inline uint_fast8_t ecb_gray_encode8 (uint_fast8_t b) { return b ^ (b >> 1); }
971ecb_inline uint_fast16_t ecb_gray_encode16 (uint_fast16_t b) { return b ^ (b >> 1); }
972ecb_inline uint_fast32_t ecb_gray_encode32 (uint_fast32_t b) { return b ^ (b >> 1); }
973ecb_inline uint_fast64_t ecb_gray_encode64 (uint_fast64_t b) { return b ^ (b >> 1); }
974
975ecb_function_ uint8_t ecb_gray_decode8 (uint8_t g);
976ecb_function_ uint8_t ecb_gray_decode8 (uint8_t g)
977{
978 g ^= g >> 1;
979 g ^= g >> 2;
980 g ^= g >> 4;
981
982 return g;
983}
984
985ecb_function_ uint16_t ecb_gray_decode16 (uint16_t g);
986ecb_function_ uint16_t ecb_gray_decode16 (uint16_t g)
987{
988 g ^= g >> 1;
989 g ^= g >> 2;
990 g ^= g >> 4;
991 g ^= g >> 8;
992
993 return g;
994}
995
996ecb_function_ uint32_t ecb_gray_decode32 (uint32_t g);
997ecb_function_ uint32_t ecb_gray_decode32 (uint32_t g)
998{
999 g ^= g >> 1;
1000 g ^= g >> 2;
1001 g ^= g >> 4;
1002 g ^= g >> 8;
1003 g ^= g >> 16;
1004
1005 return g;
1006}
1007
1008ecb_function_ uint64_t ecb_gray_decode64 (uint64_t g);
1009ecb_function_ uint64_t ecb_gray_decode64 (uint64_t g)
1010{
1011 g ^= g >> 1;
1012 g ^= g >> 2;
1013 g ^= g >> 4;
1014 g ^= g >> 8;
1015 g ^= g >> 16;
1016 g ^= g >> 32;
1017
1018 return g;
1019}
1020
1021#if ECB_CPP
1022
1023ecb_inline uint8_t ecb_gray_encode (uint8_t b) { return ecb_gray_encode8 (b); }
1024ecb_inline uint16_t ecb_gray_encode (uint16_t b) { return ecb_gray_encode16 (b); }
1025ecb_inline uint32_t ecb_gray_encode (uint32_t b) { return ecb_gray_encode32 (b); }
1026ecb_inline uint64_t ecb_gray_encode (uint64_t b) { return ecb_gray_encode64 (b); }
1027
1028ecb_inline uint8_t ecb_gray_decode (uint8_t g) { return ecb_gray_decode8 (g); }
1029ecb_inline uint16_t ecb_gray_decode (uint16_t g) { return ecb_gray_decode16 (g); }
1030ecb_inline uint32_t ecb_gray_decode (uint32_t g) { return ecb_gray_decode32 (g); }
1031ecb_inline uint64_t ecb_gray_decode (uint64_t g) { return ecb_gray_decode64 (g); }
1032
1033#endif
1034
1035/*****************************************************************************/
1036/* 2d hilbert curves */
1037
1038/* algorithm from the book Hacker's Delight, modified to not */
1039/* run into undefined behaviour for n==16 */
1040static uint32_t ecb_hilbert2d_index_to_coord32 (int n, uint32_t s);
1041static uint32_t ecb_hilbert2d_index_to_coord32 (int n, uint32_t s)
1042{
1043 uint32_t comp, swap, cs, t, sr;
1044
1045 /* pad s on the left (unused) bits with 01 (no change groups) */
1046 s |= 0x55555555U << n << n;
1047 /* "s shift right" */
1048 sr = (s >> 1) & 0x55555555U;
1049 /* compute complement and swap info in two-bit groups */
1050 cs = ((s & 0x55555555U) + sr) ^ 0x55555555U;
1051
1052 /* parallel prefix xor op to propagate both complement
1053 * and swap info together from left to right (there is
1054 * no step "cs ^= cs >> 1", so in effect it computes
1055 * two independent parallel prefix operations on two
1056 * interleaved sets of sixteen bits).
1057 */
1058 cs ^= cs >> 2;
1059 cs ^= cs >> 4;
1060 cs ^= cs >> 8;
1061 cs ^= cs >> 16;
1062
1063 /* separate swap and complement bits */
1064 swap = cs & 0x55555555U;
1065 comp = (cs >> 1) & 0x55555555U;
1066
1067 /* calculate coordinates in odd and even bit positions */
1068 t = (s & swap) ^ comp;
1069 s = s ^ sr ^ t ^ (t << 1);
1070
1071 /* unpad/clear out any junk on the left */
1072 s = s & ((1 << n << n) - 1);
1073
1074 /* Now "unshuffle" to separate the x and y bits. */
1075 t = (s ^ (s >> 1)) & 0x22222222U; s ^= t ^ (t << 1);
1076 t = (s ^ (s >> 2)) & 0x0c0c0c0cU; s ^= t ^ (t << 2);
1077 t = (s ^ (s >> 4)) & 0x00f000f0U; s ^= t ^ (t << 4);
1078 t = (s ^ (s >> 8)) & 0x0000ff00U; s ^= t ^ (t << 8);
1079
1080 /* now s contains two 16-bit coordinates */
1081 return s;
1082}
1083
1084/* 64 bit, a straightforward extension to the 32 bit case */
1085static uint64_t ecb_hilbert2d_index_to_coord64 (int n, uint64_t s);
1086static uint64_t ecb_hilbert2d_index_to_coord64 (int n, uint64_t s)
1087{
1088 uint64_t comp, swap, cs, t, sr;
1089
1090 /* pad s on the left (unused) bits with 01 (no change groups) */
1091 s |= 0x5555555555555555U << n << n;
1092 /* "s shift right" */
1093 sr = (s >> 1) & 0x5555555555555555U;
1094 /* compute complement and swap info in two-bit groups */
1095 cs = ((s & 0x5555555555555555U) + sr) ^ 0x5555555555555555U;
1096
1097 /* parallel prefix xor op to propagate both complement
1098 * and swap info together from left to right (there is
1099 * no step "cs ^= cs >> 1", so in effect it computes
1100 * two independent parallel prefix operations on two
1101 * interleaved sets of thirty-two bits).
1102 */
1103 cs ^= cs >> 2;
1104 cs ^= cs >> 4;
1105 cs ^= cs >> 8;
1106 cs ^= cs >> 16;
1107 cs ^= cs >> 32;
1108
1109 /* separate swap and complement bits */
1110 swap = cs & 0x5555555555555555U;
1111 comp = (cs >> 1) & 0x5555555555555555U;
1112
1113 /* calculate coordinates in odd and even bit positions */
1114 t = (s & swap) ^ comp;
1115 s = s ^ sr ^ t ^ (t << 1);
1116
1117 /* unpad/clear out any junk on the left */
1118 s = s & ((1 << n << n) - 1);
1119
1120 /* Now "unshuffle" to separate the x and y bits. */
1121 t = (s ^ (s >> 1)) & 0x2222222222222222U; s ^= t ^ (t << 1);
1122 t = (s ^ (s >> 2)) & 0x0c0c0c0c0c0c0c0cU; s ^= t ^ (t << 2);
1123 t = (s ^ (s >> 4)) & 0x00f000f000f000f0U; s ^= t ^ (t << 4);
1124 t = (s ^ (s >> 8)) & 0x0000ff000000ff00U; s ^= t ^ (t << 8);
1125 t = (s ^ (s >> 16)) & 0x00000000ffff0000U; s ^= t ^ (t << 16);
1126
1127 /* now s contains two 32-bit coordinates */
1128 return s;
1129}
1130
1131/* algorithm from the book Hacker's Delight, but a similar algorithm*/
1132/* is given in https://doi.org/10.1002/spe.4380160103 */
1133/* this has been slightly improved over the original version */
1134ecb_function_ uint32_t ecb_hilbert2d_coord_to_index32 (int n, uint32_t xy);
1135ecb_function_ uint32_t ecb_hilbert2d_coord_to_index32 (int n, uint32_t xy)
1136{
1137 uint32_t row;
1138 uint32_t state = 0;
1139 uint32_t s = 0;
1140
1141 do
1142 {
1143 --n;
1144
1145 row = 4 * state
1146 | (2 & (xy >> n >> 15))
1147 | (1 & (xy >> n ));
1148
1149 /* these funky constants are lookup tables for two-bit values */
1150 s = (s << 2) | (0x361e9cb4U >> 2 * row) & 3;
1151 state = (0x8fe65831U >> 2 * row) & 3;
1152 }
1153 while (n > 0);
1154
1155 return s;
1156}
1157
1158/* 64 bit, essentially the same as 32 bit */
1159ecb_function_ uint64_t ecb_hilbert2d_coord_to_index64 (int n, uint64_t xy);
1160ecb_function_ uint64_t ecb_hilbert2d_coord_to_index64 (int n, uint64_t xy)
1161{
1162 uint32_t row;
1163 uint32_t state = 0;
1164 uint64_t s = 0;
1165
1166 do
1167 {
1168 --n;
1169
1170 row = 4 * state
1171 | (2 & (xy >> n >> 31))
1172 | (1 & (xy >> n ));
1173
1174 /* these funky constants are lookup tables for two-bit values */
1175 s = (s << 2) | (0x361e9cb4U >> 2 * row) & 3;
1176 state = (0x8fe65831U >> 2 * row) & 3;
1177 }
1178 while (n > 0);
1179
1180 return s;
1181}
1182
1183/*****************************************************************************/
812/* division */ 1184/* division */
813 1185
814#if ECB_GCC_VERSION(3,0) || ECB_C99 1186#if ECB_GCC_VERSION(3,0) || ECB_C99
815 /* C99 tightened the definition of %, so we can use a more efficient version */ 1187 /* C99 tightened the definition of %, so we can use a more efficient version */
816 #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0)) 1188 #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0))
961 * format becomes 5.27, 6.26 and so on. 1333 * format becomes 5.27, 6.26 and so on.
962 * The rest involves only advancing the pointer if we already generated a 1334 * The rest involves only advancing the pointer if we already generated a
963 * non-zero digit, so leading zeroes are overwritten. 1335 * non-zero digit, so leading zeroes are overwritten.
964 */ 1336 */
965 1337
966// simply return a mask with "bits" bits set 1338/* simply return a mask with "bits" bits set */
967#define ecb_i2a_mask(type,bits) ((((type)1) << (bits)) - 1) 1339#define ecb_i2a_mask(type,bits) ((((type)1) << (bits)) - 1)
968 1340
969// oputput a single digit. maskvalue is 10**digitidx 1341/* oputput a single digit. maskvalue is 10**digitidx */
970#define ecb_i2a_digit(type,bits,digitmask,maskvalue,digitidx) \ 1342#define ecb_i2a_digit(type,bits,digitmask,maskvalue,digitidx) \
971 if (digitmask >= maskvalue) /* constant, used to decide how many digits to generate */ \ 1343 if (digitmask >= maskvalue) /* constant, used to decide how many digits to generate */ \
972 { \ 1344 { \
973 char digit = x >> (bits - digitidx); /* calculate the topmost digit */ \ 1345 char digit = x >> (bits - digitidx); /* calculate the topmost digit */ \
974 *ptr = digit + '0'; /* output it */ \ 1346 *ptr = digit + '0'; /* output it */ \
975 nz = (digitmask == maskvalue) || nz || digit; /* first term == always output last digit */ \ 1347 nz = (digitmask == maskvalue) || nz || digit; /* first term == always output last digit */ \
976 ptr += nz; /* output digit only if non-zero digit seen */ \ 1348 ptr += nz; /* output digit only if non-zero digit seen */ \
977 x = (x & ecb_i2a_mask (type, bits - digitidx)) * 5; /* *10, but shift decimal point right */ \ 1349 x = (x & ecb_i2a_mask (type, bits - digitidx)) * 5; /* *10, but shift decimal point right */ \
978 } 1350 }
979 1351
980// convert integer to fixed point format and multiply out digits, highest first 1352/* convert integer to fixed point format and multiply out digits, highest first */
981// requires magic constants: max. digits and number of bits after the decimal point 1353/* requires magic constants: max. digits and number of bits after the decimal point */
982#define ecb_i2a_def(suffix,ptr,v,type,bits,digitmask,lz) \ 1354#define ecb_i2a_def(suffix,ptr,v,type,bits,digitmask,lz) \
983ecb_inline char *ecb_i2a_ ## suffix (char *ptr, uint32_t u) \ 1355ecb_inline char *ecb_i2a_ ## suffix (char *ptr, uint32_t u) \
984{ \ 1356{ \
985 char nz = lz; /* non-zero digit seen? */ \ 1357 char nz = lz; /* non-zero digit seen? */ \
986 /* convert to x.bits fixed-point */ \ 1358 /* convert to x.bits fixed-point */ \
997 ecb_i2a_digit (type,bits,digitmask, 100000000, 8); \ 1369 ecb_i2a_digit (type,bits,digitmask, 100000000, 8); \
998 ecb_i2a_digit (type,bits,digitmask, 1000000000, 9); \ 1370 ecb_i2a_digit (type,bits,digitmask, 1000000000, 9); \
999 return ptr; \ 1371 return ptr; \
1000} 1372}
1001 1373
1002// predefined versions of the above, for various digits 1374/* predefined versions of the above, for various digits */
1003// ecb_i2a_xN = almost N digits, limit defined by macro 1375/* ecb_i2a_xN = almost N digits, limit defined by macro */
1004// ecb_i2a_N = up to N digits, leading zeroes suppressed 1376/* ecb_i2a_N = up to N digits, leading zeroes suppressed */
1005// ecb_i2a_0N = exactly N digits, including leading zeroes 1377/* ecb_i2a_0N = exactly N digits, including leading zeroes */
1006 1378
1007// non-leading-zero versions, limited range 1379/* non-leading-zero versions, limited range */
1008#define ECB_I2A_MAX_X5 59074 // limit for ecb_i2a_x5 1380#define ECB_I2A_MAX_X5 59074 /* limit for ecb_i2a_x5 */
1009#define ECB_I2A_MAX_X10 2932500665 // limit for ecb_i2a_x10 1381#define ECB_I2A_MAX_X10 2932500665 /* limit for ecb_i2a_x10 */
1010ecb_i2a_def ( x5, ptr, v, uint32_t, 26, 10000, 0) 1382ecb_i2a_def ( x5, ptr, v, uint32_t, 26, 10000, 0)
1011ecb_i2a_def (x10, ptr, v, uint64_t, 60, 1000000000, 0) 1383ecb_i2a_def (x10, ptr, v, uint64_t, 60, 1000000000, 0)
1012 1384
1013// non-leading zero versions, all digits, 4 and 9 are optimal for 32/64 bit 1385/* non-leading zero versions, all digits, 4 and 9 are optimal for 32/64 bit */
1014ecb_i2a_def ( 2, ptr, v, uint32_t, 10, 10, 0) 1386ecb_i2a_def ( 2, ptr, v, uint32_t, 10, 10, 0)
1015ecb_i2a_def ( 3, ptr, v, uint32_t, 12, 100, 0) 1387ecb_i2a_def ( 3, ptr, v, uint32_t, 12, 100, 0)
1016ecb_i2a_def ( 4, ptr, v, uint32_t, 26, 1000, 0) 1388ecb_i2a_def ( 4, ptr, v, uint32_t, 26, 1000, 0)
1017ecb_i2a_def ( 5, ptr, v, uint64_t, 30, 10000, 0) 1389ecb_i2a_def ( 5, ptr, v, uint64_t, 30, 10000, 0)
1018ecb_i2a_def ( 6, ptr, v, uint64_t, 36, 100000, 0) 1390ecb_i2a_def ( 6, ptr, v, uint64_t, 36, 100000, 0)
1019ecb_i2a_def ( 7, ptr, v, uint64_t, 44, 1000000, 0) 1391ecb_i2a_def ( 7, ptr, v, uint64_t, 44, 1000000, 0)
1020ecb_i2a_def ( 8, ptr, v, uint64_t, 50, 10000000, 0) 1392ecb_i2a_def ( 8, ptr, v, uint64_t, 50, 10000000, 0)
1021ecb_i2a_def ( 9, ptr, v, uint64_t, 56, 100000000, 0) 1393ecb_i2a_def ( 9, ptr, v, uint64_t, 56, 100000000, 0)
1022 1394
1023// leading-zero versions, all digits, 04 and 09 are optimal for 32/64 bit 1395/* leading-zero versions, all digits, 04 and 09 are optimal for 32/64 bit */
1024ecb_i2a_def (02, ptr, v, uint32_t, 10, 10, 1) 1396ecb_i2a_def (02, ptr, v, uint32_t, 10, 10, 1)
1025ecb_i2a_def (03, ptr, v, uint32_t, 12, 100, 1) 1397ecb_i2a_def (03, ptr, v, uint32_t, 12, 100, 1)
1026ecb_i2a_def (04, ptr, v, uint32_t, 26, 1000, 1) 1398ecb_i2a_def (04, ptr, v, uint32_t, 26, 1000, 1)
1027ecb_i2a_def (05, ptr, v, uint64_t, 30, 10000, 1) 1399ecb_i2a_def (05, ptr, v, uint64_t, 30, 10000, 1)
1028ecb_i2a_def (06, ptr, v, uint64_t, 36, 100000, 1) 1400ecb_i2a_def (06, ptr, v, uint64_t, 36, 100000, 1)
1040ecb_i2a_u32 (char *ptr, uint32_t u) 1412ecb_i2a_u32 (char *ptr, uint32_t u)
1041{ 1413{
1042 #if ECB_64BIT_NATIVE 1414 #if ECB_64BIT_NATIVE
1043 if (ecb_expect_true (u <= ECB_I2A_MAX_X10)) 1415 if (ecb_expect_true (u <= ECB_I2A_MAX_X10))
1044 ptr = ecb_i2a_x10 (ptr, u); 1416 ptr = ecb_i2a_x10 (ptr, u);
1045 else // x10 almost, but not fully, covers 32 bit 1417 else /* x10 almost, but not fully, covers 32 bit */
1046 { 1418 {
1047 uint32_t u1 = u % 1000000000; 1419 uint32_t u1 = u % 1000000000;
1048 uint32_t u2 = u / 1000000000; 1420 uint32_t u2 = u / 1000000000;
1049 1421
1050 *ptr++ = u2 + '0'; 1422 *ptr++ = u2 + '0';
1082{ 1454{
1083 *ptr = '-'; ptr += v < 0; 1455 *ptr = '-'; ptr += v < 0;
1084 uint32_t u = v < 0 ? -(uint32_t)v : v; 1456 uint32_t u = v < 0 ? -(uint32_t)v : v;
1085 1457
1086 #if ECB_64BIT_NATIVE 1458 #if ECB_64BIT_NATIVE
1087 ptr = ecb_i2a_x10 (ptr, u); // x10 fully covers 31 bit 1459 ptr = ecb_i2a_x10 (ptr, u); /* x10 fully covers 31 bit */
1088 #else 1460 #else
1089 ptr = ecb_i2a_u32 (ptr, u); 1461 ptr = ecb_i2a_u32 (ptr, u);
1090 #endif 1462 #endif
1091 1463
1092 return ptr; 1464 return ptr;
1155 uint64_t u1 = u % 1000000000; 1527 uint64_t u1 = u % 1000000000;
1156 uint64_t ua = u / 1000000000; 1528 uint64_t ua = u / 1000000000;
1157 uint64_t u2 = ua % 1000000000; 1529 uint64_t u2 = ua % 1000000000;
1158 uint64_t u3 = ua / 1000000000; 1530 uint64_t u3 = ua / 1000000000;
1159 1531
1160 // 2**31 is 19 digits, so the top is exactly one digit 1532 /* 2**31 is 19 digits, so the top is exactly one digit */
1161 *ptr++ = u3 + '0'; 1533 *ptr++ = u3 + '0';
1162 ptr = ecb_i2a_09 (ptr, u2); 1534 ptr = ecb_i2a_09 (ptr, u2);
1163 ptr = ecb_i2a_09 (ptr, u1); 1535 ptr = ecb_i2a_09 (ptr, u1);
1164 } 1536 }
1165 #else 1537 #else

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines