ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libecb/ecb.h
(Generate patch)

Comparing libecb/ecb.h (file contents):
Revision 1.92 by root, Tue May 29 17:17:56 2012 UTC vs.
Revision 1.123 by root, Wed Nov 6 18:06:24 2013 UTC

1/* 1/*
2 * libecb - http://software.schmorp.de/pkg/libecb 2 * libecb - http://software.schmorp.de/pkg/libecb
3 * 3 *
4 * Copyright (©) 2009-2012 Marc Alexander Lehmann <libecb@schmorp.de> 4 * Copyright (©) 2009-2013 Marc Alexander Lehmann <libecb@schmorp.de>
5 * Copyright (©) 2011 Emanuele Giaquinta 5 * Copyright (©) 2011 Emanuele Giaquinta
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without modifica- 8 * Redistribution and use in source and binary forms, with or without modifica-
9 * tion, are permitted provided that the following conditions are met: 9 * tion, are permitted provided that the following conditions are met:
29 29
30#ifndef ECB_H 30#ifndef ECB_H
31#define ECB_H 31#define ECB_H
32 32
33/* 16 bits major, 16 bits minor */ 33/* 16 bits major, 16 bits minor */
34#define ECB_VERSION 0x00010001 34#define ECB_VERSION 0x00010003
35 35
36#ifdef _WIN32 36#ifdef _WIN32
37 typedef signed char int8_t; 37 typedef signed char int8_t;
38 typedef unsigned char uint8_t; 38 typedef unsigned char uint8_t;
39 typedef signed short int16_t; 39 typedef signed short int16_t;
54 #else 54 #else
55 #define ECB_PTRSIZE 4 55 #define ECB_PTRSIZE 4
56 typedef uint32_t uintptr_t; 56 typedef uint32_t uintptr_t;
57 typedef int32_t intptr_t; 57 typedef int32_t intptr_t;
58 #endif 58 #endif
59 typedef intptr_t ptrdiff_t;
60#else 59#else
61 #include <inttypes.h> 60 #include <inttypes.h>
62 #if UINTMAX_MAX > 0xffffffffU 61 #if UINTMAX_MAX > 0xffffffffU
63 #define ECB_PTRSIZE 8 62 #define ECB_PTRSIZE 8
64 #else 63 #else
65 #define ECB_PTRSIZE 4 64 #define ECB_PTRSIZE 4
65 #endif
66#endif
67
68/* work around x32 idiocy by defining proper macros */
69#if __x86_64 || _M_AMD64
70 #if _ILP32
71 #define ECB_AMD64_X32 1
72 #else
73 #define ECB_AMD64 1
66 #endif 74 #endif
67#endif 75#endif
68 76
69/* many compilers define _GNUC_ to some versions but then only implement 77/* many compilers define _GNUC_ to some versions but then only implement
70 * what their idiot authors think are the "more important" extensions, 78 * what their idiot authors think are the "more important" extensions,
83 91
84#define ECB_C (__STDC__+0) /* this assumes that __STDC__ is either empty or a number */ 92#define ECB_C (__STDC__+0) /* this assumes that __STDC__ is either empty or a number */
85#define ECB_C99 (__STDC_VERSION__ >= 199901L) 93#define ECB_C99 (__STDC_VERSION__ >= 199901L)
86#define ECB_C11 (__STDC_VERSION__ >= 201112L) 94#define ECB_C11 (__STDC_VERSION__ >= 201112L)
87#define ECB_CPP (__cplusplus+0) 95#define ECB_CPP (__cplusplus+0)
88#define ECB_CPP98 (__cplusplus >= 199711L)
89#define ECB_CPP11 (__cplusplus >= 201103L) 96#define ECB_CPP11 (__cplusplus >= 201103L)
97
98#if ECB_CPP
99 #define ECB_EXTERN_C extern "C"
100 #define ECB_EXTERN_C_BEG ECB_EXTERN_C {
101 #define ECB_EXTERN_C_END }
102#else
103 #define ECB_EXTERN_C extern
104 #define ECB_EXTERN_C_BEG
105 #define ECB_EXTERN_C_END
106#endif
90 107
91/*****************************************************************************/ 108/*****************************************************************************/
92 109
93/* ECB_NO_THREADS - ecb is not used by multiple threads, ever */ 110/* ECB_NO_THREADS - ecb is not used by multiple threads, ever */
94/* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */ 111/* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */
95 112
96#if ECB_NO_THREADS 113#if ECB_NO_THREADS
97# define ECB_NO_SMP 1 114 #define ECB_NO_SMP 1
98#endif 115#endif
99 116
100#if ECB_NO_THREADS || ECB_NO_SMP 117#if ECB_NO_SMP
101 #define ECB_MEMORY_FENCE do { } while (0) 118 #define ECB_MEMORY_FENCE do { } while (0)
102#endif
103
104#ifndef ECB_MEMORY_FENCE
105 #if ECB_C11 && !defined __STDC_NO_ATOMICS__
106 /* we assume that these memory fences work on all variables/all memory accesses, */
107 /* not just C11 atomics and atomic accesses */
108 #include <stdatomic.h>
109 #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_acq_rel)
110 #define ECB_MEMORY_FENCE_ACQUIRE atomic_thread_fence (memory_order_acquire)
111 #define ECB_MEMORY_FENCE_RELEASE atomic_thread_fence (memory_order_release)
112 #endif
113#endif
114
115#ifndef ECB_MEMORY_FENCE_RELEASE
116 #if ECB_GCC_VERSION(4,7)
117 #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_ACQ_REL)
118 #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE)
119 #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE)
120 #endif
121#endif 119#endif
122 120
123#ifndef ECB_MEMORY_FENCE 121#ifndef ECB_MEMORY_FENCE
124 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 122 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
125 #if __i386 || __i386__ 123 #if __i386 || __i386__
126 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") 124 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
127 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE /* non-lock xchg might be enough */ 125 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
128 #define ECB_MEMORY_FENCE_RELEASE do { } while (0) /* unlikely to change in future cpus */ 126 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
129 #elif __amd64 || __amd64__ || __x86_64 || __x86_64__ 127 #elif __amd64 || __amd64__ || __x86_64 || __x86_64__
130 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") 128 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory")
131 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("lfence" : : : "memory") 129 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
132 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("sfence") /* play safe - not needed in any current cpu */ 130 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
133 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ 131 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__
134 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") 132 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory")
135 #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \ 133 #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \
136 || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ 134 || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__
137 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") 135 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory")
138 #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ 136 #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \
139 || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__ 137 || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__
140 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") 138 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory")
141 #elif __sparc || __sparc__ 139 #elif __sparc || __sparc__
142 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad | " : : : "memory") 140 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory")
143 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory") 141 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory")
144 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore") 142 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore")
145 #elif defined __s390__ || defined __s390x__ 143 #elif defined __s390__ || defined __s390x__
146 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory") 144 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory")
147 #elif defined __mips__ 145 #elif defined __mips__
146 /* GNU/Linux emulates sync on mips1 architectures, so we force its use */
147 /* anybody else who still uses mips1 is supposed to send in their version, with detection code. */
148 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") 148 #define ECB_MEMORY_FENCE __asm__ __volatile__ (".set mips2; sync; .set mips0" : : : "memory")
149 #elif defined __alpha__ 149 #elif defined __alpha__
150 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory") 150 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory")
151 #elif defined __hppa__
152 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
153 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
154 #elif defined __ia64__
155 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory")
156 #elif defined __m68k__
157 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
158 #elif defined __m88k__
159 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("tb1 0,%%r0,128" : : : "memory")
160 #elif defined __sh__
161 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
151 #endif 162 #endif
152 #endif 163 #endif
153#endif 164#endif
154 165
155#ifndef ECB_MEMORY_FENCE 166#ifndef ECB_MEMORY_FENCE
167 #if ECB_GCC_VERSION(4,7)
168 /* see comment below (stdatomic.h) about the C11 memory model. */
169 #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST)
170
171 /* The __has_feature syntax from clang is so misdesigned that we cannot use it
172 * without risking compile time errors with other compilers. We *could*
173 * define our own ecb_clang_has_feature, but I just can't be bothered to work
174 * around this shit time and again.
175 * #elif defined __clang && __has_feature (cxx_atomic)
176 * // see comment below (stdatomic.h) about the C11 memory model.
177 * #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST)
178 */
179
156 #if ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ 180 #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__
157 #define ECB_MEMORY_FENCE __sync_synchronize () 181 #define ECB_MEMORY_FENCE __sync_synchronize ()
158 /*#define ECB_MEMORY_FENCE_ACQUIRE ({ char dummy = 0; __sync_lock_test_and_set (&dummy, 1); }) */
159 /*#define ECB_MEMORY_FENCE_RELEASE ({ char dummy = 1; __sync_lock_release (&dummy ); }) */
160 #elif _MSC_VER >= 1400 /* VC++ 2005 */ 182 #elif _MSC_VER >= 1400 /* VC++ 2005 */
161 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) 183 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
162 #define ECB_MEMORY_FENCE _ReadWriteBarrier () 184 #define ECB_MEMORY_FENCE _ReadWriteBarrier ()
163 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */ 185 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */
164 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier () 186 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier ()
174 #define ECB_MEMORY_FENCE __sync () 196 #define ECB_MEMORY_FENCE __sync ()
175 #endif 197 #endif
176#endif 198#endif
177 199
178#ifndef ECB_MEMORY_FENCE 200#ifndef ECB_MEMORY_FENCE
201 #if ECB_C11 && !defined __STDC_NO_ATOMICS__
202 /* we assume that these memory fences work on all variables/all memory accesses, */
203 /* not just C11 atomics and atomic accesses */
204 #include <stdatomic.h>
205 /* Unfortunately, neither gcc 4.7 nor clang 3.1 generate any instructions for */
206 /* any fence other than seq_cst, which isn't very efficient for us. */
207 /* Why that is, we don't know - either the C11 memory model is quite useless */
208 /* for most usages, or gcc and clang have a bug */
209 /* I *currently* lean towards the latter, and inefficiently implement */
210 /* all three of ecb's fences as a seq_cst fence */
211 #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst)
212 #endif
213#endif
214
215#ifndef ECB_MEMORY_FENCE
179 #if !ECB_AVOID_PTHREADS 216 #if !ECB_AVOID_PTHREADS
180 /* 217 /*
181 * if you get undefined symbol references to pthread_mutex_lock, 218 * if you get undefined symbol references to pthread_mutex_lock,
182 * or failure to find pthread.h, then you should implement 219 * or failure to find pthread.h, then you should implement
183 * the ECB_MEMORY_FENCE operations for your cpu/compiler 220 * the ECB_MEMORY_FENCE operations for your cpu/compiler
455 ecb_inline void ecb_unreachable (void) ecb_noreturn; 492 ecb_inline void ecb_unreachable (void) ecb_noreturn;
456 ecb_inline void ecb_unreachable (void) { } 493 ecb_inline void ecb_unreachable (void) { }
457#endif 494#endif
458 495
459/* try to tell the compiler that some condition is definitely true */ 496/* try to tell the compiler that some condition is definitely true */
460#define ecb_assume(cond) do { if (!(cond)) ecb_unreachable (); } while (0) 497#define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0
461 498
462ecb_inline unsigned char ecb_byteorder_helper (void) ecb_const; 499ecb_inline unsigned char ecb_byteorder_helper (void) ecb_const;
463ecb_inline unsigned char 500ecb_inline unsigned char
464ecb_byteorder_helper (void) 501ecb_byteorder_helper (void)
465{ 502{
466 const uint32_t u = 0x11223344; 503 /* the union code still generates code under pressure in gcc, */
467 return *(unsigned char *)&u; 504 /* but less than using pointers, and always seems to */
505 /* successfully return a constant. */
506 /* the reason why we have this horrible preprocessor mess */
507 /* is to avoid it in all cases, at least on common architectures */
508 /* or when using a recent enough gcc version (>= 4.6) */
509#if __i386 || __i386__ || _M_X86 || __amd64 || __amd64__ || _M_X64
510 return 0x44;
511#elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
512 return 0x44;
513#elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
514 return 0x11;
515#else
516 union
517 {
518 uint32_t i;
519 uint8_t c;
520 } u = { 0x11223344 };
521 return u.c;
522#endif
468} 523}
469 524
470ecb_inline ecb_bool ecb_big_endian (void) ecb_const; 525ecb_inline ecb_bool ecb_big_endian (void) ecb_const;
471ecb_inline ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; } 526ecb_inline ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; }
472ecb_inline ecb_bool ecb_little_endian (void) ecb_const; 527ecb_inline ecb_bool ecb_little_endian (void) ecb_const;
503 } 558 }
504#else 559#else
505 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) 560 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0]))
506#endif 561#endif
507 562
563/*******************************************************************************/
564/* floating point stuff, can be disabled by defining ECB_NO_LIBM */
565
566/* basically, everything uses "ieee pure-endian" floating point numbers */
567/* the only noteworthy exception is ancient armle, which uses order 43218765 */
568#if 0 \
569 || __i386 || __i386__ \
570 || __amd64 || __amd64__ || __x86_64 || __x86_64__ \
571 || __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \
572 || defined __arm__ && defined __ARM_EABI__ \
573 || defined __s390__ || defined __s390x__ \
574 || defined __mips__ \
575 || defined __alpha__ \
576 || defined __hppa__ \
577 || defined __ia64__ \
578 || defined __m68k__ \
579 || defined __m88k__ \
580 || defined __sh__ \
581 || defined _M_IX86 || defined _M_AMD64 || defined _M_IA64
582 #define ECB_STDFP 1
583 #include <string.h> /* for memcpy */
584#else
585 #define ECB_STDFP 0
586#endif
587
588#ifndef ECB_NO_LIBM
589
590 #include <math.h> /* for frexp*, ldexp*, INFINITY, NAN */
591
592 /* only the oldest of old doesn't have this one. solaris. */
593 #ifdef INFINITY
594 #define ECB_INFINITY INFINITY
595 #else
596 #define ECB_INFINITY HUGE_VAL
508#endif 597 #endif
509 598
599 #ifdef NAN
600 #define ECB_NAN NAN
601 #else
602 #define ECB_NAN ECB_INFINITY
603 #endif
604
605 /* converts an ieee half/binary16 to a float */
606 ecb_function_ float ecb_binary16_to_float (uint16_t x) ecb_const;
607 ecb_function_ float
608 ecb_binary16_to_float (uint16_t x)
609 {
610 int e = (x >> 10) & 0x1f;
611 int m = x & 0x3ff;
612 float r;
613
614 if (!e ) r = ldexpf (m , -24);
615 else if (e != 31) r = ldexpf (m + 0x400, e - 25);
616 else if (m ) r = ECB_NAN;
617 else r = ECB_INFINITY;
618
619 return x & 0x8000 ? -r : r;
620 }
621
622 /* convert a float to ieee single/binary32 */
623 ecb_function_ uint32_t ecb_float_to_binary32 (float x) ecb_const;
624 ecb_function_ uint32_t
625 ecb_float_to_binary32 (float x)
626 {
627 uint32_t r;
628
629 #if ECB_STDFP
630 memcpy (&r, &x, 4);
631 #else
632 /* slow emulation, works for anything but -0 */
633 uint32_t m;
634 int e;
635
636 if (x == 0e0f ) return 0x00000000U;
637 if (x > +3.40282346638528860e+38f) return 0x7f800000U;
638 if (x < -3.40282346638528860e+38f) return 0xff800000U;
639 if (x != x ) return 0x7fbfffffU;
640
641 m = frexpf (x, &e) * 0x1000000U;
642
643 r = m & 0x80000000U;
644
645 if (r)
646 m = -m;
647
648 if (e <= -126)
649 {
650 m &= 0xffffffU;
651 m >>= (-125 - e);
652 e = -126;
653 }
654
655 r |= (e + 126) << 23;
656 r |= m & 0x7fffffU;
657 #endif
658
659 return r;
660 }
661
662 /* converts an ieee single/binary32 to a float */
663 ecb_function_ float ecb_binary32_to_float (uint32_t x) ecb_const;
664 ecb_function_ float
665 ecb_binary32_to_float (uint32_t x)
666 {
667 float r;
668
669 #if ECB_STDFP
670 memcpy (&r, &x, 4);
671 #else
672 /* emulation, only works for normals and subnormals and +0 */
673 int neg = x >> 31;
674 int e = (x >> 23) & 0xffU;
675
676 x &= 0x7fffffU;
677
678 if (e)
679 x |= 0x800000U;
680 else
681 e = 1;
682
683 /* we distrust ldexpf a bit and do the 2**-24 scaling by an extra multiply */
684 r = ldexpf (x * (0.5f / 0x800000U), e - 126);
685
686 r = neg ? -r : r;
687 #endif
688
689 return r;
690 }
691
692 /* convert a double to ieee double/binary64 */
693 ecb_function_ uint64_t ecb_double_to_binary64 (double x) ecb_const;
694 ecb_function_ uint64_t
695 ecb_double_to_binary64 (double x)
696 {
697 uint64_t r;
698
699 #if ECB_STDFP
700 memcpy (&r, &x, 8);
701 #else
702 /* slow emulation, works for anything but -0 */
703 uint64_t m;
704 int e;
705
706 if (x == 0e0 ) return 0x0000000000000000U;
707 if (x > +1.79769313486231470e+308) return 0x7ff0000000000000U;
708 if (x < -1.79769313486231470e+308) return 0xfff0000000000000U;
709 if (x != x ) return 0X7ff7ffffffffffffU;
710
711 m = frexp (x, &e) * 0x20000000000000U;
712
713 r = m & 0x8000000000000000;;
714
715 if (r)
716 m = -m;
717
718 if (e <= -1022)
719 {
720 m &= 0x1fffffffffffffU;
721 m >>= (-1021 - e);
722 e = -1022;
723 }
724
725 r |= ((uint64_t)(e + 1022)) << 52;
726 r |= m & 0xfffffffffffffU;
727 #endif
728
729 return r;
730 }
731
732 /* converts an ieee double/binary64 to a double */
733 ecb_function_ double ecb_binary64_to_double (uint64_t x) ecb_const;
734 ecb_function_ double
735 ecb_binary64_to_double (uint64_t x)
736 {
737 double r;
738
739 #if ECB_STDFP
740 memcpy (&r, &x, 8);
741 #else
742 /* emulation, only works for normals and subnormals and +0 */
743 int neg = x >> 63;
744 int e = (x >> 52) & 0x7ffU;
745
746 x &= 0xfffffffffffffU;
747
748 if (e)
749 x |= 0x10000000000000U;
750 else
751 e = 1;
752
753 /* we distrust ldexp a bit and do the 2**-53 scaling by an extra multiply */
754 r = ldexp (x * (0.5 / 0x10000000000000U), e - 1022);
755
756 r = neg ? -r : r;
757 #endif
758
759 return r;
760 }
761
762#endif
763
764#endif
765

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines