… | |
… | |
40 | |
40 | |
41 | #ifndef ECB_H |
41 | #ifndef ECB_H |
42 | #define ECB_H |
42 | #define ECB_H |
43 | |
43 | |
44 | /* 16 bits major, 16 bits minor */ |
44 | /* 16 bits major, 16 bits minor */ |
45 | #define ECB_VERSION 0x00010009 |
45 | #define ECB_VERSION 0x0001000c |
46 | |
46 | |
47 | #include <string.h> /* for memcpy */ |
47 | #include <string.h> /* for memcpy */ |
48 | |
48 | |
49 | #if defined (_WIN32) && !defined (__MINGW32__) |
49 | #if defined (_WIN32) && !defined (__MINGW32__) |
50 | typedef signed char int8_t; |
50 | typedef signed char int8_t; |
… | |
… | |
609 | ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count); |
609 | ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count); |
610 | ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count); |
610 | ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count); |
611 | ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count); |
611 | ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count); |
612 | ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count); |
612 | ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count); |
613 | |
613 | |
614 | ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> ( 8 - count)) | (x << count); } |
614 | ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> (-count & 7)) | (x << (count & 7)); } |
615 | ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << ( 8 - count)) | (x >> count); } |
615 | ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << (-count & 7)) | (x >> (count & 7)); } |
616 | ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (16 - count)) | (x << count); } |
616 | ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (-count & 15)) | (x << (count & 15)); } |
617 | ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (16 - count)) | (x >> count); } |
617 | ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (-count & 15)) | (x >> (count & 15)); } |
618 | ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); } |
618 | ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (-count & 31)) | (x << (count & 31)); } |
619 | ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); } |
619 | ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (-count & 31)) | (x >> (count & 31)); } |
620 | ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); } |
620 | ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (-count & 63)) | (x << (count & 63)); } |
621 | ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); } |
621 | ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (-count & 63)) | (x >> (count & 63)); } |
622 | |
622 | |
623 | #if ECB_CPP |
623 | #if ECB_CPP |
624 | |
624 | |
625 | inline uint8_t ecb_ctz (uint8_t v) { return ecb_ctz32 (v); } |
625 | inline uint8_t ecb_ctz (uint8_t v) { return ecb_ctz32 (v); } |
626 | inline uint16_t ecb_ctz (uint16_t v) { return ecb_ctz32 (v); } |
626 | inline uint16_t ecb_ctz (uint16_t v) { return ecb_ctz32 (v); } |
… | |
… | |
774 | ecb_inline void ecb_poke_u64_u (void *ptr, uint64_t v) { memcpy (ptr, &v, sizeof (v)); } |
774 | ecb_inline void ecb_poke_u64_u (void *ptr, uint64_t v) { memcpy (ptr, &v, sizeof (v)); } |
775 | |
775 | |
776 | ecb_inline void ecb_poke_be_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_be_u16 (v)); } |
776 | ecb_inline void ecb_poke_be_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_be_u16 (v)); } |
777 | ecb_inline void ecb_poke_be_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_be_u32 (v)); } |
777 | ecb_inline void ecb_poke_be_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_be_u32 (v)); } |
778 | ecb_inline void ecb_poke_be_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_be_u64 (v)); } |
778 | ecb_inline void ecb_poke_be_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_be_u64 (v)); } |
779 | |
779 | |
780 | ecb_inline void ecb_poke_le_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_le_u16 (v)); } |
780 | ecb_inline void ecb_poke_le_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_le_u16 (v)); } |
781 | ecb_inline void ecb_poke_le_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_le_u32 (v)); } |
781 | ecb_inline void ecb_poke_le_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_le_u32 (v)); } |
782 | ecb_inline void ecb_poke_le_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_le_u64 (v)); } |
782 | ecb_inline void ecb_poke_le_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_le_u64 (v)); } |
783 | |
783 | |
784 | #if ECB_CPP |
784 | #if ECB_CPP |
… | |
… | |
805 | template<typename T> inline void ecb_poke_u (void *ptr, T v) { memcpy (ptr, &v, sizeof (v)); } |
805 | template<typename T> inline void ecb_poke_u (void *ptr, T v) { memcpy (ptr, &v, sizeof (v)); } |
806 | template<typename T> inline void ecb_poke_be_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_be (v)); } |
806 | template<typename T> inline void ecb_poke_be_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_be (v)); } |
807 | template<typename T> inline void ecb_poke_le_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_le (v)); } |
807 | template<typename T> inline void ecb_poke_le_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_le (v)); } |
808 | |
808 | |
809 | #endif |
809 | #endif |
|
|
810 | |
|
|
811 | /*****************************************************************************/ |
|
|
812 | /* pointer/integer hashing */ |
|
|
813 | |
|
|
814 | /* based on hash by Chris Wellons, https://nullprogram.com/blog/2018/07/31/ */ |
|
|
815 | ecb_function_ uint32_t ecb_mix32 (uint32_t v); |
|
|
816 | ecb_function_ uint32_t ecb_mix32 (uint32_t v) |
|
|
817 | { |
|
|
818 | v ^= v >> 16; v *= 0x7feb352dU; |
|
|
819 | v ^= v >> 15; v *= 0x846ca68bU; |
|
|
820 | v ^= v >> 16; |
|
|
821 | return v; |
|
|
822 | } |
|
|
823 | |
|
|
824 | ecb_function_ uint32_t ecb_unmix32 (uint32_t v); |
|
|
825 | ecb_function_ uint32_t ecb_unmix32 (uint32_t v) |
|
|
826 | { |
|
|
827 | v ^= v >> 16 ; v *= 0x43021123U; |
|
|
828 | v ^= v >> 15 ^ v >> 30; v *= 0x1d69e2a5U; |
|
|
829 | v ^= v >> 16 ; |
|
|
830 | return v; |
|
|
831 | } |
|
|
832 | |
|
|
833 | /* based on splitmix64, by Sebastiona Vigna, https://prng.di.unimi.it/splitmix64.c */ |
|
|
834 | ecb_function_ uint64_t ecb_mix64 (uint64_t v); |
|
|
835 | ecb_function_ uint64_t ecb_mix64 (uint64_t v) |
|
|
836 | { |
|
|
837 | v ^= v >> 30; v *= 0xbf58476d1ce4e5b9U; |
|
|
838 | v ^= v >> 27; v *= 0x94d049bb133111ebU; |
|
|
839 | v ^= v >> 31; |
|
|
840 | return v; |
|
|
841 | } |
|
|
842 | |
|
|
843 | ecb_function_ uint64_t ecb_unmix64 (uint64_t v); |
|
|
844 | ecb_function_ uint64_t ecb_unmix64 (uint64_t v) |
|
|
845 | { |
|
|
846 | v ^= v >> 31 ^ v >> 62; v *= 0x319642b2d24d8ec3U; |
|
|
847 | v ^= v >> 27 ^ v >> 54; v *= 0x96de1b173f119089U; |
|
|
848 | v ^= v >> 30 ^ v >> 60; |
|
|
849 | return v; |
|
|
850 | } |
|
|
851 | |
|
|
852 | ecb_function_ uintptr_t ecb_ptrmix (void *p); |
|
|
853 | ecb_function_ uintptr_t ecb_ptrmix (void *p) |
|
|
854 | { |
|
|
855 | #if ECB_PTRSIZE <= 4 |
|
|
856 | return ecb_mix32 ((uint32_t)p); |
|
|
857 | #else |
|
|
858 | return ecb_mix64 ((uint64_t)p); |
|
|
859 | #endif |
|
|
860 | } |
|
|
861 | |
|
|
862 | ecb_function_ void *ecb_ptrunmix (uintptr_t v); |
|
|
863 | ecb_function_ void *ecb_ptrunmix (uintptr_t v) |
|
|
864 | { |
|
|
865 | #if ECB_PTRSIZE <= 4 |
|
|
866 | return (void *)ecb_unmix32 (v); |
|
|
867 | #else |
|
|
868 | return (void *)ecb_unmix64 (v); |
|
|
869 | #endif |
|
|
870 | } |
|
|
871 | |
|
|
872 | #if ECB_CPP |
|
|
873 | |
|
|
874 | template<typename T> |
|
|
875 | inline uintptr_t ecb_ptrmix (T *p) |
|
|
876 | { |
|
|
877 | return ecb_ptrmix (static_cast<void *>(p)); |
|
|
878 | } |
|
|
879 | |
|
|
880 | template<typename T> |
|
|
881 | inline T *ecb_ptrunmix (uintptr_t v) |
|
|
882 | { |
|
|
883 | return static_cast<T *>(ecb_ptrunmix (v)); |
|
|
884 | } |
|
|
885 | |
|
|
886 | #endif |
|
|
887 | |
|
|
888 | /*****************************************************************************/ |
|
|
889 | /* gray code */ |
|
|
890 | |
|
|
891 | ecb_function_ uint_fast8_t ecb_gray8_encode (uint_fast8_t b) { return b ^ (b >> 1); } |
|
|
892 | ecb_function_ uint_fast16_t ecb_gray16_encode (uint_fast16_t b) { return b ^ (b >> 1); } |
|
|
893 | ecb_function_ uint_fast32_t ecb_gray32_encode (uint_fast32_t b) { return b ^ (b >> 1); } |
|
|
894 | ecb_function_ uint_fast64_t ecb_gray64_encode (uint_fast64_t b) { return b ^ (b >> 1); } |
|
|
895 | |
|
|
896 | ecb_function_ uint8_t ecb_gray8_decode (uint8_t g) |
|
|
897 | { |
|
|
898 | g ^= g >> 1; |
|
|
899 | g ^= g >> 2; |
|
|
900 | g ^= g >> 4; |
|
|
901 | |
|
|
902 | return g; |
|
|
903 | } |
|
|
904 | |
|
|
905 | ecb_function_ uint16_t ecb_gray16_decode (uint16_t g) |
|
|
906 | { |
|
|
907 | g ^= g >> 1; |
|
|
908 | g ^= g >> 2; |
|
|
909 | g ^= g >> 4; |
|
|
910 | g ^= g >> 8; |
|
|
911 | |
|
|
912 | return g; |
|
|
913 | } |
|
|
914 | |
|
|
915 | ecb_function_ uint32_t ecb_gray32_decode (uint32_t g) |
|
|
916 | { |
|
|
917 | g ^= g >> 1; |
|
|
918 | g ^= g >> 2; |
|
|
919 | g ^= g >> 4; |
|
|
920 | g ^= g >> 8; |
|
|
921 | g ^= g >> 16; |
|
|
922 | |
|
|
923 | return g; |
|
|
924 | } |
|
|
925 | |
|
|
926 | ecb_function_ uint64_t ecb_gray64_decode (uint64_t g) |
|
|
927 | { |
|
|
928 | g ^= g >> 1; |
|
|
929 | g ^= g >> 2; |
|
|
930 | g ^= g >> 4; |
|
|
931 | g ^= g >> 8; |
|
|
932 | g ^= g >> 16; |
|
|
933 | g ^= g >> 32; |
|
|
934 | |
|
|
935 | return g; |
|
|
936 | } |
|
|
937 | |
|
|
938 | #if ECB_CPP |
|
|
939 | |
|
|
940 | ecb_function_ uint8_t ecb_gray_encode (uint8_t b) { return ecb_gray8_encode (b); } |
|
|
941 | ecb_function_ uint16_t ecb_gray_encode (uint16_t b) { return ecb_gray16_encode (b); } |
|
|
942 | ecb_function_ uint32_t ecb_gray_encode (uint32_t b) { return ecb_gray32_encode (b); } |
|
|
943 | ecb_function_ uint64_t ecb_gray_encode (uint64_t b) { return ecb_gray64_encode (b); } |
|
|
944 | |
|
|
945 | ecb_function_ uint8_t ecb_gray_decode (uint8_t g) { return ecb_gray8_decode (g); } |
|
|
946 | ecb_function_ uint16_t ecb_gray_decode (uint16_t g) { return ecb_gray16_decode (g); } |
|
|
947 | ecb_function_ uint32_t ecb_gray_decode (uint32_t g) { return ecb_gray32_decode (g); } |
|
|
948 | ecb_function_ uint64_t ecb_gray_decode (uint64_t g) { return ecb_gray64_decode (g); } |
|
|
949 | |
|
|
950 | #endif |
|
|
951 | |
|
|
952 | /*****************************************************************************/ |
|
|
953 | /* 2d hilbert curves */ |
|
|
954 | |
|
|
955 | /* algorithm from the book Hacker's Delight, modified to not */ |
|
|
956 | /* run into undefined behaviour for n==16 */ |
|
|
957 | static uint32_t |
|
|
958 | ecb_hilbert2d_index_to_coord32 (int n, uint32_t s) |
|
|
959 | { |
|
|
960 | uint32_t comp, swap, cs, t, sr; |
|
|
961 | |
|
|
962 | /* pad s on the left (unused) bits with 01 (no change groups) */ |
|
|
963 | s |= 0x55555555U << n << n; |
|
|
964 | /* "s shift right" */ |
|
|
965 | sr = (s >> 1) & 0x55555555U; |
|
|
966 | /* compute complement and swap info in two-bit groups */ |
|
|
967 | cs = ((s & 0x55555555U) + sr) ^ 0x55555555U; |
|
|
968 | |
|
|
969 | /* parallel prefix xor op to propagate both complement |
|
|
970 | * and swap info together from left to right (there is |
|
|
971 | * no step "cs ^= cs >> 1", so in effect it computes |
|
|
972 | * two independent parallel prefix operations on two |
|
|
973 | * interleaved sets of sixteen bits). |
|
|
974 | */ |
|
|
975 | cs ^= cs >> 2; |
|
|
976 | cs ^= cs >> 4; |
|
|
977 | cs ^= cs >> 8; |
|
|
978 | cs ^= cs >> 16; |
|
|
979 | |
|
|
980 | /* separate swap and complement bits */ |
|
|
981 | swap = cs & 0x55555555U; |
|
|
982 | comp = (cs >> 1) & 0x55555555U; |
|
|
983 | |
|
|
984 | /* calculate coordinates in odd and even bit positions */ |
|
|
985 | t = (s & swap) ^ comp; |
|
|
986 | s = s ^ sr ^ t ^ (t << 1); |
|
|
987 | |
|
|
988 | /* unpad/clear out any junk on the left */ |
|
|
989 | s = s & ((1 << n << n) - 1); |
|
|
990 | |
|
|
991 | /* Now "unshuffle" to separate the x and y bits. */ |
|
|
992 | t = (s ^ (s >> 1)) & 0x22222222U; s ^= t ^ (t << 1); |
|
|
993 | t = (s ^ (s >> 2)) & 0x0c0c0c0cU; s ^= t ^ (t << 2); |
|
|
994 | t = (s ^ (s >> 4)) & 0x00f000f0U; s ^= t ^ (t << 4); |
|
|
995 | t = (s ^ (s >> 8)) & 0x0000ff00U; s ^= t ^ (t << 8); |
|
|
996 | |
|
|
997 | /* now s contains two 16-bit coordinates */ |
|
|
998 | return s; |
|
|
999 | } |
|
|
1000 | |
|
|
1001 | /* 64 bit, a straightforward extension to the 32 bit case */ |
|
|
1002 | static uint64_t |
|
|
1003 | ecb_hilbert2d_index_to_coord64 (int n, uint64_t s) |
|
|
1004 | { |
|
|
1005 | uint64_t comp, swap, cs, t, sr; |
|
|
1006 | |
|
|
1007 | /* pad s on the left (unused) bits with 01 (no change groups) */ |
|
|
1008 | s |= 0x5555555555555555U << n << n; |
|
|
1009 | /* "s shift right" */ |
|
|
1010 | sr = (s >> 1) & 0x5555555555555555U; |
|
|
1011 | /* compute complement and swap info in two-bit groups */ |
|
|
1012 | cs = ((s & 0x5555555555555555U) + sr) ^ 0x5555555555555555U; |
|
|
1013 | |
|
|
1014 | /* parallel prefix xor op to propagate both complement |
|
|
1015 | * and swap info together from left to right (there is |
|
|
1016 | * no step "cs ^= cs >> 1", so in effect it computes |
|
|
1017 | * two independent parallel prefix operations on two |
|
|
1018 | * interleaved sets of thirty-two bits). |
|
|
1019 | */ |
|
|
1020 | cs ^= cs >> 2; |
|
|
1021 | cs ^= cs >> 4; |
|
|
1022 | cs ^= cs >> 8; |
|
|
1023 | cs ^= cs >> 16; |
|
|
1024 | cs ^= cs >> 32; |
|
|
1025 | |
|
|
1026 | /* separate swap and complement bits */ |
|
|
1027 | swap = cs & 0x5555555555555555U; |
|
|
1028 | comp = (cs >> 1) & 0x5555555555555555U; |
|
|
1029 | |
|
|
1030 | /* calculate coordinates in odd and even bit positions */ |
|
|
1031 | t = (s & swap) ^ comp; |
|
|
1032 | s = s ^ sr ^ t ^ (t << 1); |
|
|
1033 | |
|
|
1034 | /* unpad/clear out any junk on the left */ |
|
|
1035 | s = s & ((1 << n << n) - 1); |
|
|
1036 | |
|
|
1037 | /* Now "unshuffle" to separate the x and y bits. */ |
|
|
1038 | t = (s ^ (s >> 1)) & 0x2222222222222222U; s ^= t ^ (t << 1); |
|
|
1039 | t = (s ^ (s >> 2)) & 0x0c0c0c0c0c0c0c0cU; s ^= t ^ (t << 2); |
|
|
1040 | t = (s ^ (s >> 4)) & 0x00f000f000f000f0U; s ^= t ^ (t << 4); |
|
|
1041 | t = (s ^ (s >> 8)) & 0x0000ff000000ff00U; s ^= t ^ (t << 8); |
|
|
1042 | t = (s ^ (s >> 16)) & 0x00000000ffff0000U; s ^= t ^ (t << 16); |
|
|
1043 | |
|
|
1044 | /* now s contains two 32-bit coordinates */ |
|
|
1045 | return s; |
|
|
1046 | } |
|
|
1047 | |
|
|
1048 | /* algorithm from the book Hacker's Delight, but a similar algorithm*/ |
|
|
1049 | /* is given in https://doi.org/10.1002/spe.4380160103 */ |
|
|
1050 | /* this has been slightly improved over the original version */ |
|
|
1051 | ecb_function_ uint32_t |
|
|
1052 | ecb_hilbert2d_coord_to_index32 (int n, uint32_t xy) |
|
|
1053 | { |
|
|
1054 | uint32_t row; |
|
|
1055 | uint32_t state = 0; |
|
|
1056 | uint32_t s = 0; |
|
|
1057 | |
|
|
1058 | do |
|
|
1059 | { |
|
|
1060 | --n; |
|
|
1061 | |
|
|
1062 | row = 4 * state |
|
|
1063 | | (2 & (xy >> n >> 15)) |
|
|
1064 | | (1 & (xy >> n )); |
|
|
1065 | |
|
|
1066 | /* these funky constants are lookup tables for two-bit values */ |
|
|
1067 | s = (s << 2) | (0x361e9cb4U >> 2 * row) & 3; |
|
|
1068 | state = (0x8fe65831U >> 2 * row) & 3; |
|
|
1069 | } |
|
|
1070 | while (n > 0); |
|
|
1071 | |
|
|
1072 | return s; |
|
|
1073 | } |
|
|
1074 | |
|
|
1075 | /* 64 bit, essentially the same as 32 bit */ |
|
|
1076 | ecb_function_ uint64_t |
|
|
1077 | ecb_hilbert2d_coord_to_index64 (int n, uint64_t xy) |
|
|
1078 | { |
|
|
1079 | uint32_t row; |
|
|
1080 | uint32_t state = 0; |
|
|
1081 | uint64_t s = 0; |
|
|
1082 | |
|
|
1083 | do |
|
|
1084 | { |
|
|
1085 | --n; |
|
|
1086 | |
|
|
1087 | row = 4 * state |
|
|
1088 | | (2 & (xy >> n >> 31)) |
|
|
1089 | | (1 & (xy >> n )); |
|
|
1090 | |
|
|
1091 | /* these funky constants are lookup tables for two-bit values */ |
|
|
1092 | s = (s << 2) | (0x361e9cb4U >> 2 * row) & 3; |
|
|
1093 | state = (0x8fe65831U >> 2 * row) & 3; |
|
|
1094 | } |
|
|
1095 | while (n > 0); |
|
|
1096 | |
|
|
1097 | return s; |
|
|
1098 | } |
810 | |
1099 | |
811 | /*****************************************************************************/ |
1100 | /*****************************************************************************/ |
812 | /* division */ |
1101 | /* division */ |
813 | |
1102 | |
814 | #if ECB_GCC_VERSION(3,0) || ECB_C99 |
1103 | #if ECB_GCC_VERSION(3,0) || ECB_C99 |
… | |
… | |
948 | } |
1237 | } |
949 | |
1238 | |
950 | /*******************************************************************************/ |
1239 | /*******************************************************************************/ |
951 | /* fast integer to ascii */ |
1240 | /* fast integer to ascii */ |
952 | |
1241 | |
|
|
1242 | /* |
|
|
1243 | * This code is pretty complicated because it is general. The idea behind it, |
|
|
1244 | * however, is pretty simple: first, the number is multiplied with a scaling |
|
|
1245 | * factor (2**bits / 10**(digits-1)) to convert the integer into a fixed-point |
|
|
1246 | * number with the first digit in the upper bits. |
|
|
1247 | * Then this digit is converted to text and masked out. The resulting number |
|
|
1248 | * is then multiplied by 10, by multiplying the fixed point representation |
|
|
1249 | * by 5 and shifting the (binary) decimal point one to the right, so a 4.28 |
|
|
1250 | * format becomes 5.27, 6.26 and so on. |
|
|
1251 | * The rest involves only advancing the pointer if we already generated a |
|
|
1252 | * non-zero digit, so leading zeroes are overwritten. |
|
|
1253 | */ |
|
|
1254 | |
953 | // simply return a mask with "bits" bits set |
1255 | /* simply return a mask with "bits" bits set */ |
954 | #define ecb_i2a_mask(type,bits) ((((type)1) << (bits)) - 1) |
1256 | #define ecb_i2a_mask(type,bits) ((((type)1) << (bits)) - 1) |
955 | |
1257 | |
956 | // oputput a single digit. maskvalue is 10**digitidx |
1258 | /* oputput a single digit. maskvalue is 10**digitidx */ |
957 | #define ecb_i2a_digit(type,bits,digitmask,maskvalue,digitidx) \ |
1259 | #define ecb_i2a_digit(type,bits,digitmask,maskvalue,digitidx) \ |
958 | if (digitmask >= maskvalue) /* constant, used to decide how many digits to generate */ \ |
1260 | if (digitmask >= maskvalue) /* constant, used to decide how many digits to generate */ \ |
959 | { \ |
1261 | { \ |
960 | char digit = x >> (bits - digitidx); /* calculate the topmost digit */ \ |
1262 | char digit = x >> (bits - digitidx); /* calculate the topmost digit */ \ |
961 | *ptr = digit + '0'; /* output it */ \ |
1263 | *ptr = digit + '0'; /* output it */ \ |
962 | nz = (digitmask == maskvalue) || nz || digit; /* first term == always output last digit */ \ |
1264 | nz = (digitmask == maskvalue) || nz || digit; /* first term == always output last digit */ \ |
963 | ptr += nz; /* output digit only if non-zero digit seen */ \ |
1265 | ptr += nz; /* output digit only if non-zero digit seen */ \ |
964 | x = (x & ecb_i2a_mask (type, bits - digitidx)) * 5; /* *10, but shift decimal point right */ \ |
1266 | x = (x & ecb_i2a_mask (type, bits - digitidx)) * 5; /* *10, but shift decimal point right */ \ |
965 | } |
1267 | } |
966 | |
1268 | |
967 | // convert integer to fixed point format and multiply out digits, highest first |
1269 | /* convert integer to fixed point format and multiply out digits, highest first */ |
968 | // requires magic constants: max. digits and number of bits after the decimal point |
1270 | /* requires magic constants: max. digits and number of bits after the decimal point */ |
969 | #define ecb_i2a_def(suffix,ptr,v,type,bits,digitmask,lz) \ |
1271 | #define ecb_i2a_def(suffix,ptr,v,type,bits,digitmask,lz) \ |
970 | ecb_inline char *ecb_i2a_ ## suffix (char *ptr, uint32_t u) \ |
1272 | ecb_inline char *ecb_i2a_ ## suffix (char *ptr, uint32_t u) \ |
971 | { \ |
1273 | { \ |
972 | char nz = lz; /* non-zero digit seen? */ \ |
1274 | char nz = lz; /* non-zero digit seen? */ \ |
973 | /* convert to x.bits fixed-point */ \ |
1275 | /* convert to x.bits fixed-point */ \ |
… | |
… | |
984 | ecb_i2a_digit (type,bits,digitmask, 100000000, 8); \ |
1286 | ecb_i2a_digit (type,bits,digitmask, 100000000, 8); \ |
985 | ecb_i2a_digit (type,bits,digitmask, 1000000000, 9); \ |
1287 | ecb_i2a_digit (type,bits,digitmask, 1000000000, 9); \ |
986 | return ptr; \ |
1288 | return ptr; \ |
987 | } |
1289 | } |
988 | |
1290 | |
989 | // predefined versions of the above, for various digits |
1291 | /* predefined versions of the above, for various digits */ |
990 | // ecb_i2a_xN = almost N digits, limit defined by macro |
1292 | /* ecb_i2a_xN = almost N digits, limit defined by macro */ |
991 | // ecb_i2a_N = up to N digits, leading zeroes suppressed |
1293 | /* ecb_i2a_N = up to N digits, leading zeroes suppressed */ |
992 | // ecb_i2a_0N = exactly N digits, including leading zeroes |
1294 | /* ecb_i2a_0N = exactly N digits, including leading zeroes */ |
993 | |
1295 | |
994 | // non-leading-zero versions, limited range |
1296 | /* non-leading-zero versions, limited range */ |
995 | #define ECB_I2A_MAX_X5 59074 // limit for ecb_i2a_x5 |
1297 | #define ECB_I2A_MAX_X5 59074 /* limit for ecb_i2a_x5 */ |
996 | #define ECB_I2A_MAX_X10 2932500665 // limit for ecb_i2a_x10 |
1298 | #define ECB_I2A_MAX_X10 2932500665 /* limit for ecb_i2a_x10 */ |
997 | ecb_i2a_def ( x5, ptr, v, uint32_t, 26, 10000, 0) |
1299 | ecb_i2a_def ( x5, ptr, v, uint32_t, 26, 10000, 0) |
998 | ecb_i2a_def (x10, ptr, v, uint64_t, 60, 1000000000, 0) |
1300 | ecb_i2a_def (x10, ptr, v, uint64_t, 60, 1000000000, 0) |
999 | |
1301 | |
1000 | // non-leading zero versions, all digits, 4 and 9 are optimal for 32/64 bit |
1302 | /* non-leading zero versions, all digits, 4 and 9 are optimal for 32/64 bit */ |
1001 | ecb_i2a_def ( 2, ptr, v, uint32_t, 10, 10, 0) |
1303 | ecb_i2a_def ( 2, ptr, v, uint32_t, 10, 10, 0) |
1002 | ecb_i2a_def ( 3, ptr, v, uint32_t, 12, 100, 0) |
1304 | ecb_i2a_def ( 3, ptr, v, uint32_t, 12, 100, 0) |
1003 | ecb_i2a_def ( 4, ptr, v, uint32_t, 26, 1000, 0) |
1305 | ecb_i2a_def ( 4, ptr, v, uint32_t, 26, 1000, 0) |
1004 | ecb_i2a_def ( 5, ptr, v, uint64_t, 30, 10000, 0) |
1306 | ecb_i2a_def ( 5, ptr, v, uint64_t, 30, 10000, 0) |
1005 | ecb_i2a_def ( 6, ptr, v, uint64_t, 36, 100000, 0) |
1307 | ecb_i2a_def ( 6, ptr, v, uint64_t, 36, 100000, 0) |
1006 | ecb_i2a_def ( 7, ptr, v, uint64_t, 44, 1000000, 0) |
1308 | ecb_i2a_def ( 7, ptr, v, uint64_t, 44, 1000000, 0) |
1007 | ecb_i2a_def ( 8, ptr, v, uint64_t, 50, 10000000, 0) |
1309 | ecb_i2a_def ( 8, ptr, v, uint64_t, 50, 10000000, 0) |
1008 | ecb_i2a_def ( 9, ptr, v, uint64_t, 56, 100000000, 0) |
1310 | ecb_i2a_def ( 9, ptr, v, uint64_t, 56, 100000000, 0) |
1009 | |
1311 | |
1010 | // leading-zero versions, all digits, 04 and 09 are optimal for 32/64 bit |
1312 | /* leading-zero versions, all digits, 04 and 09 are optimal for 32/64 bit */ |
1011 | ecb_i2a_def (02, ptr, v, uint32_t, 10, 10, 1) |
1313 | ecb_i2a_def (02, ptr, v, uint32_t, 10, 10, 1) |
1012 | ecb_i2a_def (03, ptr, v, uint32_t, 12, 100, 1) |
1314 | ecb_i2a_def (03, ptr, v, uint32_t, 12, 100, 1) |
1013 | ecb_i2a_def (04, ptr, v, uint32_t, 26, 1000, 1) |
1315 | ecb_i2a_def (04, ptr, v, uint32_t, 26, 1000, 1) |
1014 | ecb_i2a_def (05, ptr, v, uint64_t, 30, 10000, 1) |
1316 | ecb_i2a_def (05, ptr, v, uint64_t, 30, 10000, 1) |
1015 | ecb_i2a_def (06, ptr, v, uint64_t, 36, 100000, 1) |
1317 | ecb_i2a_def (06, ptr, v, uint64_t, 36, 100000, 1) |
1016 | ecb_i2a_def (07, ptr, v, uint64_t, 44, 1000000, 1) |
1318 | ecb_i2a_def (07, ptr, v, uint64_t, 44, 1000000, 1) |
1017 | ecb_i2a_def (08, ptr, v, uint64_t, 50, 10000000, 1) |
1319 | ecb_i2a_def (08, ptr, v, uint64_t, 50, 10000000, 1) |
1018 | ecb_i2a_def (09, ptr, v, uint64_t, 56, 100000000, 1) |
1320 | ecb_i2a_def (09, ptr, v, uint64_t, 56, 100000000, 1) |
1019 | |
1321 | |
1020 | #define ECB_I2A_I32_DIGITS 11 |
1322 | #define ECB_I2A_I32_DIGITS 11 |
1021 | #define ECB_I2A_U32_DIGITS 10 |
1323 | #define ECB_I2A_U32_DIGITS 10 |
1022 | #define ECB_I2A_I64_DIGITS 20 |
1324 | #define ECB_I2A_I64_DIGITS 20 |
1023 | #define ECB_I2A_U32_DIGITS 21 |
1325 | #define ECB_I2A_U64_DIGITS 21 |
1024 | #define ECB_I2A_DIGITS 21 |
1326 | #define ECB_I2A_MAX_DIGITS 21 |
1025 | |
1327 | |
1026 | ecb_inline char * |
1328 | ecb_inline char * |
1027 | ecb_i2a_u32 (char *ptr, uint32_t u) |
1329 | ecb_i2a_u32 (char *ptr, uint32_t u) |
1028 | { |
1330 | { |
1029 | #if ECB_64BIT_NATIVE |
1331 | #if ECB_64BIT_NATIVE |
1030 | if (ecb_expect_true (u <= ECB_I2A_MAX_X10)) |
1332 | if (ecb_expect_true (u <= ECB_I2A_MAX_X10)) |
1031 | ptr = ecb_i2a_x10 (ptr, u); |
1333 | ptr = ecb_i2a_x10 (ptr, u); |
1032 | else // x10 almost, but not fully, covers 32 bit |
1334 | else /* x10 almost, but not fully, covers 32 bit */ |
1033 | { |
1335 | { |
1034 | uint32_t u1 = u % 1000000000; |
1336 | uint32_t u1 = u % 1000000000; |
1035 | uint32_t u2 = u / 1000000000; |
1337 | uint32_t u2 = u / 1000000000; |
1036 | |
1338 | |
1037 | *ptr++ = u2 + '0'; |
1339 | *ptr++ = u2 + '0'; |
… | |
… | |
1069 | { |
1371 | { |
1070 | *ptr = '-'; ptr += v < 0; |
1372 | *ptr = '-'; ptr += v < 0; |
1071 | uint32_t u = v < 0 ? -(uint32_t)v : v; |
1373 | uint32_t u = v < 0 ? -(uint32_t)v : v; |
1072 | |
1374 | |
1073 | #if ECB_64BIT_NATIVE |
1375 | #if ECB_64BIT_NATIVE |
1074 | ptr = ecb_i2a_x10 (ptr, u); // x10 fully covers 31 bit |
1376 | ptr = ecb_i2a_x10 (ptr, u); /* x10 fully covers 31 bit */ |
1075 | #else |
1377 | #else |
1076 | ptr = ecb_i2a_u32 (ptr, u); |
1378 | ptr = ecb_i2a_u32 (ptr, u); |
1077 | #endif |
1379 | #endif |
1078 | |
1380 | |
1079 | return ptr; |
1381 | return ptr; |
… | |
… | |
1142 | uint64_t u1 = u % 1000000000; |
1444 | uint64_t u1 = u % 1000000000; |
1143 | uint64_t ua = u / 1000000000; |
1445 | uint64_t ua = u / 1000000000; |
1144 | uint64_t u2 = ua % 1000000000; |
1446 | uint64_t u2 = ua % 1000000000; |
1145 | uint64_t u3 = ua / 1000000000; |
1447 | uint64_t u3 = ua / 1000000000; |
1146 | |
1448 | |
1147 | // 2**31 is 19 digits, so the top is exactly one digit |
1449 | /* 2**31 is 19 digits, so the top is exactly one digit */ |
1148 | *ptr++ = u3 + '0'; |
1450 | *ptr++ = u3 + '0'; |
1149 | ptr = ecb_i2a_09 (ptr, u2); |
1451 | ptr = ecb_i2a_09 (ptr, u2); |
1150 | ptr = ecb_i2a_09 (ptr, u1); |
1452 | ptr = ecb_i2a_09 (ptr, u1); |
1151 | } |
1453 | } |
1152 | #else |
1454 | #else |