… | |
… | |
54 | #else |
54 | #else |
55 | #define ECB_PTRSIZE 4 |
55 | #define ECB_PTRSIZE 4 |
56 | typedef uint32_t uintptr_t; |
56 | typedef uint32_t uintptr_t; |
57 | typedef int32_t intptr_t; |
57 | typedef int32_t intptr_t; |
58 | #endif |
58 | #endif |
59 | typedef intptr_t ptrdiff_t; |
|
|
60 | #else |
59 | #else |
61 | #include <inttypes.h> |
60 | #include <inttypes.h> |
62 | #if UINTMAX_MAX > 0xffffffffU |
61 | #if UINTMAX_MAX > 0xffffffffU |
63 | #define ECB_PTRSIZE 8 |
62 | #define ECB_PTRSIZE 8 |
64 | #else |
63 | #else |
… | |
… | |
83 | |
82 | |
84 | #define ECB_C (__STDC__+0) /* this assumes that __STDC__ is either empty or a number */ |
83 | #define ECB_C (__STDC__+0) /* this assumes that __STDC__ is either empty or a number */ |
85 | #define ECB_C99 (__STDC_VERSION__ >= 199901L) |
84 | #define ECB_C99 (__STDC_VERSION__ >= 199901L) |
86 | #define ECB_C11 (__STDC_VERSION__ >= 201112L) |
85 | #define ECB_C11 (__STDC_VERSION__ >= 201112L) |
87 | #define ECB_CPP (__cplusplus+0) |
86 | #define ECB_CPP (__cplusplus+0) |
88 | #define ECB_CPP98 (__cplusplus >= 199711L) |
|
|
89 | #define ECB_CPP11 (__cplusplus >= 201103L) |
87 | #define ECB_CPP11 (__cplusplus >= 201103L) |
|
|
88 | |
|
|
89 | #if ECB_CPP |
|
|
90 | #define ECB_EXTERN_C extern "C" |
|
|
91 | #define ECB_EXTERN_C_BEG ECB_EXTERN_C { |
|
|
92 | #define ECB_EXTERN_C_END } |
|
|
93 | #else |
|
|
94 | #define ECB_EXTERN_C extern |
|
|
95 | #define ECB_EXTERN_C_BEG |
|
|
96 | #define ECB_EXTERN_C_END |
|
|
97 | #endif |
90 | |
98 | |
91 | /*****************************************************************************/ |
99 | /*****************************************************************************/ |
92 | |
100 | |
93 | /* ECB_NO_THREADS - ecb is not used by multiple threads, ever */ |
101 | /* ECB_NO_THREADS - ecb is not used by multiple threads, ever */ |
94 | /* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */ |
102 | /* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */ |
95 | |
103 | |
96 | #if ECB_NO_THREADS |
104 | #if ECB_NO_THREADS |
97 | # define ECB_NO_SMP 1 |
105 | #define ECB_NO_SMP 1 |
98 | #endif |
106 | #endif |
99 | |
107 | |
100 | #if ECB_NO_THREADS || ECB_NO_SMP |
108 | #if ECB_NO_SMP |
101 | #define ECB_MEMORY_FENCE do { } while (0) |
109 | #define ECB_MEMORY_FENCE do { } while (0) |
102 | #endif |
|
|
103 | |
|
|
104 | #ifndef ECB_MEMORY_FENCE |
|
|
105 | #if ECB_C11 && !defined __STDC_NO_ATOMICS__ |
|
|
106 | /* we assume that these memory fences work on all variables/all memory accesses, */ |
|
|
107 | /* not just C11 atomics and atomic accesses */ |
|
|
108 | #include <stdatomic.h> |
|
|
109 | #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_acq_rel) |
|
|
110 | #define ECB_MEMORY_FENCE_ACQUIRE atomic_thread_fence (memory_order_acquire) |
|
|
111 | #define ECB_MEMORY_FENCE_RELEASE atomic_thread_fence (memory_order_release) |
|
|
112 | #endif |
|
|
113 | #endif |
110 | #endif |
114 | |
111 | |
115 | #ifndef ECB_MEMORY_FENCE |
112 | #ifndef ECB_MEMORY_FENCE |
116 | #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 |
113 | #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 |
117 | #if __i386 || __i386__ |
114 | #if __i386 || __i386__ |
118 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") |
115 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") |
119 | #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE /* non-lock xchg might be enough */ |
116 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") |
120 | #define ECB_MEMORY_FENCE_RELEASE do { } while (0) /* unlikely to change in future cpus */ |
117 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") |
121 | #elif __amd64 || __amd64__ || __x86_64 || __x86_64__ |
118 | #elif __amd64 || __amd64__ || __x86_64 || __x86_64__ |
122 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") |
119 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") |
123 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("lfence" : : : "memory") |
120 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") |
124 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("sfence") /* play safe - not needed in any current cpu */ |
121 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") |
125 | #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ |
122 | #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ |
126 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") |
123 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") |
127 | #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \ |
124 | #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \ |
128 | || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ |
125 | || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ |
129 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") |
126 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") |
130 | #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ |
127 | #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ |
131 | || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__ |
128 | || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__ |
132 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") |
129 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") |
133 | #elif __sparc || __sparc__ |
130 | #elif __sparc || __sparc__ |
134 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad | " : : : "memory") |
131 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory") |
135 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory") |
132 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory") |
136 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore") |
133 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore") |
137 | #elif defined __s390__ || defined __s390x__ |
134 | #elif defined __s390__ || defined __s390x__ |
138 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory") |
135 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory") |
139 | #elif defined __mips__ |
136 | #elif defined __mips__ |
140 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") |
137 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") |
141 | #elif defined __alpha__ |
138 | #elif defined __alpha__ |
142 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory") |
139 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory") |
|
|
140 | #elif defined __hppa__ |
|
|
141 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory") |
|
|
142 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") |
|
|
143 | #elif defined __ia64__ |
|
|
144 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory") |
143 | #endif |
145 | #endif |
144 | #endif |
146 | #endif |
145 | #endif |
147 | #endif |
146 | |
148 | |
147 | #ifndef ECB_MEMORY_FENCE |
149 | #ifndef ECB_MEMORY_FENCE |
|
|
150 | #if ECB_GCC_VERSION(4,7) |
|
|
151 | /* see comment below (stdatomic.h) about the C11 memory model. */ |
|
|
152 | #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST) |
|
|
153 | /*#elif defined __clang && __has_feature (cxx_atomic)*/ |
|
|
154 | /* see comment below (stdatomic.h) about the C11 memory model. */ |
|
|
155 | /*#define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST)*/ |
148 | #if ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ |
156 | #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ |
149 | #define ECB_MEMORY_FENCE __sync_synchronize () |
157 | #define ECB_MEMORY_FENCE __sync_synchronize () |
150 | /*#define ECB_MEMORY_FENCE_ACQUIRE ({ char dummy = 0; __sync_lock_test_and_set (&dummy, 1); }) */ |
|
|
151 | /*#define ECB_MEMORY_FENCE_RELEASE ({ char dummy = 1; __sync_lock_release (&dummy ); }) */ |
|
|
152 | #elif _MSC_VER >= 1400 /* VC++ 2005 */ |
158 | #elif _MSC_VER >= 1400 /* VC++ 2005 */ |
153 | #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) |
159 | #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) |
154 | #define ECB_MEMORY_FENCE _ReadWriteBarrier () |
160 | #define ECB_MEMORY_FENCE _ReadWriteBarrier () |
155 | #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */ |
161 | #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */ |
156 | #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier () |
162 | #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier () |
… | |
… | |
166 | #define ECB_MEMORY_FENCE __sync () |
172 | #define ECB_MEMORY_FENCE __sync () |
167 | #endif |
173 | #endif |
168 | #endif |
174 | #endif |
169 | |
175 | |
170 | #ifndef ECB_MEMORY_FENCE |
176 | #ifndef ECB_MEMORY_FENCE |
|
|
177 | #if ECB_C11 && !defined __STDC_NO_ATOMICS__ |
|
|
178 | /* we assume that these memory fences work on all variables/all memory accesses, */ |
|
|
179 | /* not just C11 atomics and atomic accesses */ |
|
|
180 | #include <stdatomic.h> |
|
|
181 | /* Unfortunately, neither gcc 4.7 nor clang 3.1 generate any instructions for */ |
|
|
182 | /* any fence other than seq_cst, which isn't very efficient for us. */ |
|
|
183 | /* Why that is, we don't know - either the C11 memory model is quite useless */ |
|
|
184 | /* for most usages, or gcc and clang have a bug */ |
|
|
185 | /* I *currently* lean towards the latter, and inefficiently implement */ |
|
|
186 | /* all three of ecb's fences as a seq_cst fence */ |
|
|
187 | #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst) |
|
|
188 | #endif |
|
|
189 | #endif |
|
|
190 | |
|
|
191 | #ifndef ECB_MEMORY_FENCE |
171 | #if !ECB_AVOID_PTHREADS |
192 | #if !ECB_AVOID_PTHREADS |
172 | /* |
193 | /* |
173 | * if you get undefined symbol references to pthread_mutex_lock, |
194 | * if you get undefined symbol references to pthread_mutex_lock, |
174 | * or failure to find pthread.h, then you should implement |
195 | * or failure to find pthread.h, then you should implement |
175 | * the ECB_MEMORY_FENCE operations for your cpu/compiler |
196 | * the ECB_MEMORY_FENCE operations for your cpu/compiler |
… | |
… | |
447 | ecb_inline void ecb_unreachable (void) ecb_noreturn; |
468 | ecb_inline void ecb_unreachable (void) ecb_noreturn; |
448 | ecb_inline void ecb_unreachable (void) { } |
469 | ecb_inline void ecb_unreachable (void) { } |
449 | #endif |
470 | #endif |
450 | |
471 | |
451 | /* try to tell the compiler that some condition is definitely true */ |
472 | /* try to tell the compiler that some condition is definitely true */ |
452 | #define ecb_assume(cond) do { if (!(cond)) ecb_unreachable (); } while (0) |
473 | #define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0 |
453 | |
474 | |
454 | ecb_inline unsigned char ecb_byteorder_helper (void) ecb_const; |
475 | ecb_inline unsigned char ecb_byteorder_helper (void) ecb_const; |
455 | ecb_inline unsigned char |
476 | ecb_inline unsigned char |
456 | ecb_byteorder_helper (void) |
477 | ecb_byteorder_helper (void) |
457 | { |
478 | { |
458 | const uint32_t u = 0x11223344; |
479 | /* the union code still generates code under pressure in gcc, */ |
459 | return *(unsigned char *)&u; |
480 | /* but less than using pointers, and always seem to */ |
|
|
481 | /* successfully return a constant. */ |
|
|
482 | /* the reason why we have this horrible preprocessor mess */ |
|
|
483 | /* is to avoid it in all cases, at least on common architectures */ |
|
|
484 | /* and yes, gcc defines __BYTE_ORDER__, g++ does not */ |
|
|
485 | #if __i386 || __i386__ || _M_X86 || __amd64 || __amd64__ || _M_X64 |
|
|
486 | return 0x44; |
|
|
487 | #elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ |
|
|
488 | return 0x44; |
|
|
489 | #elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ |
|
|
490 | retrurn 0x11; |
|
|
491 | #else |
|
|
492 | union |
|
|
493 | { |
|
|
494 | uint32_t i; |
|
|
495 | uint8_t c; |
|
|
496 | } u = { 0x11223344 }; |
|
|
497 | return u.c; |
|
|
498 | #endif |
460 | } |
499 | } |
461 | |
500 | |
462 | ecb_inline ecb_bool ecb_big_endian (void) ecb_const; |
501 | ecb_inline ecb_bool ecb_big_endian (void) ecb_const; |
463 | ecb_inline ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; } |
502 | ecb_inline ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; } |
464 | ecb_inline ecb_bool ecb_little_endian (void) ecb_const; |
503 | ecb_inline ecb_bool ecb_little_endian (void) ecb_const; |
… | |
… | |
495 | } |
534 | } |
496 | #else |
535 | #else |
497 | #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) |
536 | #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) |
498 | #endif |
537 | #endif |
499 | |
538 | |
|
|
539 | #if __STDC_IEC_559__ |
|
|
540 | // we assume this is defined for most C and many C++ compilers |
|
|
541 | ecb_inline ecb_bool ecb_float_ieee (void) ecb_const; |
|
|
542 | ecb_inline ecb_bool ecb_float_ieee (void) { return 1; } |
|
|
543 | ecb_inline ecb_bool ecb_double_ieee (void) ecb_const; |
|
|
544 | ecb_inline ecb_bool ecb_double_ieee (void) { return 1; } |
|
|
545 | #elif ECB_CPP |
|
|
546 | #include <limits> |
|
|
547 | ecb_inline ecb_bool ecb_float_ieee (void) ecb_const; |
|
|
548 | ecb_inline ecb_bool ecb_float_ieee (void) { return std::numeric_limits<float >::is_iec559; } |
|
|
549 | ecb_inline ecb_bool ecb_double_ieee (void) ecb_const; |
|
|
550 | ecb_inline ecb_bool ecb_double_ieee (void) { return std::numeric_limits<double>::is_iec559; } |
|
|
551 | #else |
|
|
552 | ecb_inline ecb_bool ecb_float_ieee (void) ecb_const; |
|
|
553 | ecb_inline ecb_bool ecb_float_ieee (void) { return 0; } |
|
|
554 | ecb_inline ecb_bool ecb_double_ieee (void) ecb_const; |
|
|
555 | ecb_inline ecb_bool ecb_double_ieee (void) { return 0; } |
|
|
556 | #endif |
|
|
557 | |
|
|
558 | /*******************************************************************************/ |
|
|
559 | /* floating point stuff, can be disabled by defining ECB_NO_FP */ |
|
|
560 | |
|
|
561 | #ifndef ECB_NO_FP |
|
|
562 | |
|
|
563 | /* basically, everything uses "ieee pure-endian" floating point numbers */ |
|
|
564 | /* the only noteworthy exception is ancient armle, which uses order 43218765 */ |
|
|
565 | #if 0 \ |
|
|
566 | || __i386 || __i386__ \ |
|
|
567 | || __amd64 || __amd64__ || __x86_64 || __x86_64__ \ |
|
|
568 | || __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \ |
|
|
569 | || defined __arm__ && defined __ARM_EABI__ \ |
|
|
570 | || defined __s390__ || defined __s390x__ \ |
|
|
571 | || defined __mips__ \ |
|
|
572 | || defined __alpha__ \ |
|
|
573 | || defined __hppa__ \ |
|
|
574 | || defined __ia64__ \ |
|
|
575 | || defined _M_IX86 || defined _M_AMD64 || defined _M_IA64 |
|
|
576 | #define ECB_STDFP 1 |
|
|
577 | #else |
|
|
578 | #define ECB_STDFP 0 |
500 | #endif |
579 | #endif |
501 | |
580 | |
|
|
581 | // convert a float to ieee single/binary32 |
|
|
582 | ecb_function_ uint32_t ecb_float_to_binary32 (float x) ecb_const; |
|
|
583 | ecb_function_ uint32_t |
|
|
584 | ecb_float_to_binary32 (float x) |
|
|
585 | { |
|
|
586 | uint32_t r; |
|
|
587 | |
|
|
588 | #if ECB_STDFP |
|
|
589 | ((char *)&r) [0] = ((char *)&x)[0]; |
|
|
590 | ((char *)&r) [1] = ((char *)&x)[1]; |
|
|
591 | ((char *)&r) [2] = ((char *)&x)[2]; |
|
|
592 | ((char *)&r) [3] = ((char *)&x)[3]; |
|
|
593 | #else |
|
|
594 | /* slow emulation, works for anything but nan's and -0 */ |
|
|
595 | ECB_EXTERN_C float frexpf (float v, int *e); |
|
|
596 | uint32_t m; |
|
|
597 | int e; |
|
|
598 | |
|
|
599 | if (x == 0e0f ) return 0; |
|
|
600 | if (x > +3.40282346638528860e+38f) return 0x7f800000U; |
|
|
601 | if (x < -3.40282346638528860e+38f) return 0xff800000U; |
|
|
602 | |
|
|
603 | m = frexpf (x, &e) * 0x1000000U; |
|
|
604 | |
|
|
605 | r = m & 0x80000000U; |
|
|
606 | |
|
|
607 | if (r) |
|
|
608 | m = -m; |
|
|
609 | |
|
|
610 | if (e < -125) |
|
|
611 | { |
|
|
612 | m &= 0xffffffU; |
|
|
613 | m >>= (-125 - e); |
|
|
614 | e = -126; |
|
|
615 | } |
|
|
616 | |
|
|
617 | r |= (e + 126) << 23; |
|
|
618 | r |= m & 0x7fffffU; |
|
|
619 | #endif |
|
|
620 | |
|
|
621 | return r; |
|
|
622 | } |
|
|
623 | |
|
|
624 | // converts a ieee single/binary32 to a float |
|
|
625 | ecb_function_ float ecb_binary32_to_float (uint32_t x) ecb_const; |
|
|
626 | ecb_function_ float |
|
|
627 | ecb_binary32_to_float (uint32_t x) |
|
|
628 | { |
|
|
629 | float r; |
|
|
630 | |
|
|
631 | #if ECB_STDFP |
|
|
632 | ((char *)&r) [0] = ((char *)&x)[0]; |
|
|
633 | ((char *)&r) [1] = ((char *)&x)[1]; |
|
|
634 | ((char *)&r) [2] = ((char *)&x)[2]; |
|
|
635 | ((char *)&r) [3] = ((char *)&x)[3]; |
|
|
636 | #else |
|
|
637 | /* emulation, only works for normals and subnormals and +0 */ |
|
|
638 | ECB_EXTERN_C float ldexpf (float x, int e); |
|
|
639 | |
|
|
640 | int neg = x >> 31; |
|
|
641 | int e = (x >> 23) & 0xffU; |
|
|
642 | |
|
|
643 | x &= 0x7fffffU; |
|
|
644 | |
|
|
645 | if (e) |
|
|
646 | x |= 0x800000U; |
|
|
647 | |
|
|
648 | /* we distrust ldexpf a bit and do the 2**-24 scaling by an extra multiply */ |
|
|
649 | r = ldexpf (x * (1.f / 0x1000000U), e - 126); |
|
|
650 | |
|
|
651 | r = neg ? -r : r; |
|
|
652 | #endif |
|
|
653 | |
|
|
654 | return r; |
|
|
655 | } |
|
|
656 | |
|
|
657 | ecb_function_ uint64_t ecb_double_to_binary64 (double x) ecb_const; |
|
|
658 | ecb_function_ uint64_t |
|
|
659 | ecb_double_to_binary64 (double x) |
|
|
660 | { |
|
|
661 | } |
|
|
662 | |
|
|
663 | #endif |
|
|
664 | |
|
|
665 | #endif |
|
|
666 | |