ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libecb/ecb.h
(Generate patch)

Comparing libecb/ecb.h (file contents):
Revision 1.88 by root, Mon May 28 08:40:25 2012 UTC vs.
Revision 1.132 by root, Tue Mar 25 19:26:31 2014 UTC

1/* 1/*
2 * libecb - http://software.schmorp.de/pkg/libecb 2 * libecb - http://software.schmorp.de/pkg/libecb
3 * 3 *
4 * Copyright (©) 2009-2012 Marc Alexander Lehmann <libecb@schmorp.de> 4 * Copyright (©) 2009-2014 Marc Alexander Lehmann <libecb@schmorp.de>
5 * Copyright (©) 2011 Emanuele Giaquinta 5 * Copyright (©) 2011 Emanuele Giaquinta
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without modifica- 8 * Redistribution and use in source and binary forms, with or without modifica-
9 * tion, are permitted provided that the following conditions are met: 9 * tion, are permitted provided that the following conditions are met:
29 29
30#ifndef ECB_H 30#ifndef ECB_H
31#define ECB_H 31#define ECB_H
32 32
33/* 16 bits major, 16 bits minor */ 33/* 16 bits major, 16 bits minor */
34#define ECB_VERSION 0x00010001 34#define ECB_VERSION 0x00010003
35 35
36#ifdef _WIN32 36#ifdef _WIN32
37 typedef signed char int8_t; 37 typedef signed char int8_t;
38 typedef unsigned char uint8_t; 38 typedef unsigned char uint8_t;
39 typedef signed short int16_t; 39 typedef signed short int16_t;
63 #else 63 #else
64 #define ECB_PTRSIZE 4 64 #define ECB_PTRSIZE 4
65 #endif 65 #endif
66#endif 66#endif
67 67
68/* work around x32 idiocy by defining proper macros */
69#if __amd64 || __x86_64 || _M_AMD64 || _M_X64
70 #if _ILP32
71 #define ECB_AMD64_X32 1
72 #else
73 #define ECB_AMD64 1
74 #endif
75#endif
76
68/* many compilers define _GNUC_ to some versions but then only implement 77/* many compilers define _GNUC_ to some versions but then only implement
69 * what their idiot authors think are the "more important" extensions, 78 * what their idiot authors think are the "more important" extensions,
70 * causing enormous grief in return for some better fake benchmark numbers. 79 * causing enormous grief in return for some better fake benchmark numbers.
71 * or so. 80 * or so.
72 * we try to detect these and simply assume they are not gcc - if they have 81 * we try to detect these and simply assume they are not gcc - if they have
78 #else 87 #else
79 #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) 88 #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))
80 #endif 89 #endif
81#endif 90#endif
82 91
92#define ECB_CPP (__cplusplus+0)
93#define ECB_CPP11 (__cplusplus >= 201103L)
94
95#if ECB_CPP
96 #define ECB_C 0
97 #define ECB_STDC_VERSION 0
98#else
99 #define ECB_C 1
100 #define ECB_STDC_VERSION __STDC_VERSION__
101#endif
102
103#define ECB_C99 (ECB_STDC_VERSION >= 199901L)
104#define ECB_C11 (ECB_STDC_VERSION >= 201112L)
105
106#if ECB_CPP
107 #define ECB_EXTERN_C extern "C"
108 #define ECB_EXTERN_C_BEG ECB_EXTERN_C {
109 #define ECB_EXTERN_C_END }
110#else
111 #define ECB_EXTERN_C extern
112 #define ECB_EXTERN_C_BEG
113 #define ECB_EXTERN_C_END
114#endif
115
83/*****************************************************************************/ 116/*****************************************************************************/
84 117
85/* ECB_NO_THREADS - ecb is not used by multiple threads, ever */ 118/* ECB_NO_THREADS - ecb is not used by multiple threads, ever */
86/* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */ 119/* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */
87 120
88#if ECB_NO_THREADS 121#if ECB_NO_THREADS
89# define ECB_NO_SMP 1 122 #define ECB_NO_SMP 1
90#endif 123#endif
91 124
92#if ECB_NO_THREADS || ECB_NO_SMP 125#if ECB_NO_SMP
93 #define ECB_MEMORY_FENCE do { } while (0) 126 #define ECB_MEMORY_FENCE do { } while (0)
94#endif 127#endif
95 128
96#ifndef ECB_MEMORY_FENCE 129#ifndef ECB_MEMORY_FENCE
97 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 130 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
98 #if __i386 || __i386__ 131 #if __i386 || __i386__
99 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") 132 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
100 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE /* non-lock xchg might be enough */ 133 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
101 #define ECB_MEMORY_FENCE_RELEASE do { } while (0) /* unlikely to change in future cpus */ 134 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
102 #elif __amd64 || __amd64__ || __x86_64 || __x86_64__ 135 #elif __amd64 || __amd64__ || __x86_64 || __x86_64__
103 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") 136 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory")
104 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("lfence" : : : "memory") 137 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
105 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("sfence") /* play safe - not needed in any current cpu */ 138 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
106 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ 139 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__
107 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") 140 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory")
108 #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \ 141 #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \
109 || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ 142 || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__
110 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") 143 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory")
111 #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ 144 #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \
112 || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__ 145 || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__
113 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") 146 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory")
147 #elif __aarch64__
148 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory")
114 #elif __sparc || __sparc__ 149 #elif (__sparc || __sparc__) && !__sparcv8
115 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad | " : : : "memory") 150 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory")
116 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory") 151 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory")
117 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore") 152 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore")
118 #elif defined __s390__ || defined __s390x__ 153 #elif defined __s390__ || defined __s390x__
119 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory") 154 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory")
120 #elif defined __mips__ 155 #elif defined __mips__
156 /* GNU/Linux emulates sync on mips1 architectures, so we force its use */
157 /* anybody else who still uses mips1 is supposed to send in their version, with detection code. */
121 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") 158 #define ECB_MEMORY_FENCE __asm__ __volatile__ (".set mips2; sync; .set mips0" : : : "memory")
122 #elif defined __alpha__ 159 #elif defined __alpha__
123 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory") 160 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory")
161 #elif defined __hppa__
162 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
163 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
164 #elif defined __ia64__
165 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory")
166 #elif defined __m68k__
167 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
168 #elif defined __m88k__
169 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("tb1 0,%%r0,128" : : : "memory")
170 #elif defined __sh__
171 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
124 #endif 172 #endif
125 #endif 173 #endif
126#endif 174#endif
127 175
128#ifndef ECB_MEMORY_FENCE 176#ifndef ECB_MEMORY_FENCE
177 #if ECB_GCC_VERSION(4,7)
178 /* see comment below (stdatomic.h) about the C11 memory model. */
179 #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST)
180 #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE)
181 #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE)
182
183 /* The __has_feature syntax from clang is so misdesigned that we cannot use it
184 * without risking compile time errors with other compilers. We *could*
185 * define our own ecb_clang_has_feature, but I just can't be bothered to work
186 * around this shit time and again.
187 * #elif defined __clang && __has_feature (cxx_atomic)
188 * // see comment below (stdatomic.h) about the C11 memory model.
189 * #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST)
190 * #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE)
191 * #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE)
192 */
193
129 #if ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ 194 #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__
130 #define ECB_MEMORY_FENCE __sync_synchronize () 195 #define ECB_MEMORY_FENCE __sync_synchronize ()
131 /*#define ECB_MEMORY_FENCE_ACQUIRE ({ char dummy = 0; __sync_lock_test_and_set (&dummy, 1); }) */ 196 #elif _MSC_VER >= 1500 /* VC++ 2008 */
132 /*#define ECB_MEMORY_FENCE_RELEASE ({ char dummy = 1; __sync_lock_release (&dummy ); }) */ 197 /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */
198 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
199 #define ECB_MEMORY_FENCE _ReadWriteBarrier (); MemoryBarrier()
200 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier (); MemoryBarrier() /* according to msdn, _ReadBarrier is not a load fence */
201 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier (); MemoryBarrier()
133 #elif _MSC_VER >= 1400 /* VC++ 2005 */ 202 #elif _MSC_VER >= 1400 /* VC++ 2005 */
134 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) 203 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
135 #define ECB_MEMORY_FENCE _ReadWriteBarrier () 204 #define ECB_MEMORY_FENCE _ReadWriteBarrier ()
136 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */ 205 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */
137 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier () 206 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier ()
147 #define ECB_MEMORY_FENCE __sync () 216 #define ECB_MEMORY_FENCE __sync ()
148 #endif 217 #endif
149#endif 218#endif
150 219
151#ifndef ECB_MEMORY_FENCE 220#ifndef ECB_MEMORY_FENCE
221 #if ECB_C11 && !defined __STDC_NO_ATOMICS__
222 /* we assume that these memory fences work on all variables/all memory accesses, */
223 /* not just C11 atomics and atomic accesses */
224 #include <stdatomic.h>
225 /* Unfortunately, neither gcc 4.7 nor clang 3.1 generate any instructions for */
226 /* any fence other than seq_cst, which isn't very efficient for us. */
227 /* Why that is, we don't know - either the C11 memory model is quite useless */
228 /* for most usages, or gcc and clang have a bug */
229 /* I *currently* lean towards the latter, and inefficiently implement */
230 /* all three of ecb's fences as a seq_cst fence */
231 /* Update, gcc-4.8 generates mfence for all c++ fences, but nothing */
232 /* for all __atomic_thread_fence's except seq_cst */
233 #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst)
234 #endif
235#endif
236
237#ifndef ECB_MEMORY_FENCE
152 #if !ECB_AVOID_PTHREADS 238 #if !ECB_AVOID_PTHREADS
153 /* 239 /*
154 * if you get undefined symbol references to pthread_mutex_lock, 240 * if you get undefined symbol references to pthread_mutex_lock,
155 * or failure to find pthread.h, then you should implement 241 * or failure to find pthread.h, then you should implement
156 * the ECB_MEMORY_FENCE operations for your cpu/compiler 242 * the ECB_MEMORY_FENCE operations for your cpu/compiler
174 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE 260 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
175#endif 261#endif
176 262
177/*****************************************************************************/ 263/*****************************************************************************/
178 264
179#define ECB_C99 (__STDC_VERSION__ >= 199901L)
180
181#if __cplusplus 265#if __cplusplus
182 #define ecb_inline static inline 266 #define ecb_inline static inline
183#elif ECB_GCC_VERSION(2,5) 267#elif ECB_GCC_VERSION(2,5)
184 #define ecb_inline static __inline__ 268 #define ecb_inline static __inline__
185#elif ECB_C99 269#elif ECB_C99
210 #define ecb_is_constant(expr) __builtin_constant_p (expr) 294 #define ecb_is_constant(expr) __builtin_constant_p (expr)
211 #define ecb_expect(expr,value) __builtin_expect ((expr),(value)) 295 #define ecb_expect(expr,value) __builtin_expect ((expr),(value))
212 #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality) 296 #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality)
213#else 297#else
214 #define ecb_attribute(attrlist) 298 #define ecb_attribute(attrlist)
299
300 /* possible C11 impl for integral types
301 typedef struct ecb_is_constant_struct ecb_is_constant_struct;
302 #define ecb_is_constant(expr) _Generic ((1 ? (struct ecb_is_constant_struct *)0 : (void *)((expr) - (expr)), ecb_is_constant_struct *: 0, default: 1)) */
303
215 #define ecb_is_constant(expr) 0 304 #define ecb_is_constant(expr) 0
216 #define ecb_expect(expr,value) (expr) 305 #define ecb_expect(expr,value) (expr)
217 #define ecb_prefetch(addr,rw,locality) 306 #define ecb_prefetch(addr,rw,locality)
218#endif 307#endif
219 308
223#elif ECB_GCC_VERSION(3,0) 312#elif ECB_GCC_VERSION(3,0)
224 #define ecb_decltype(x) __typeof(x) 313 #define ecb_decltype(x) __typeof(x)
225#endif 314#endif
226 315
227#define ecb_noinline ecb_attribute ((__noinline__)) 316#define ecb_noinline ecb_attribute ((__noinline__))
228#define ecb_noreturn ecb_attribute ((__noreturn__))
229#define ecb_unused ecb_attribute ((__unused__)) 317#define ecb_unused ecb_attribute ((__unused__))
230#define ecb_const ecb_attribute ((__const__)) 318#define ecb_const ecb_attribute ((__const__))
231#define ecb_pure ecb_attribute ((__pure__)) 319#define ecb_pure ecb_attribute ((__pure__))
320
321#if ECB_C11
322 #define ecb_noreturn _Noreturn
323#else
324 #define ecb_noreturn ecb_attribute ((__noreturn__))
325#endif
232 326
233#if ECB_GCC_VERSION(4,3) 327#if ECB_GCC_VERSION(4,3)
234 #define ecb_artificial ecb_attribute ((__artificial__)) 328 #define ecb_artificial ecb_attribute ((__artificial__))
235 #define ecb_hot ecb_attribute ((__hot__)) 329 #define ecb_hot ecb_attribute ((__hot__))
236 #define ecb_cold ecb_attribute ((__cold__)) 330 #define ecb_cold ecb_attribute ((__cold__))
425 ecb_inline void ecb_unreachable (void) ecb_noreturn; 519 ecb_inline void ecb_unreachable (void) ecb_noreturn;
426 ecb_inline void ecb_unreachable (void) { } 520 ecb_inline void ecb_unreachable (void) { }
427#endif 521#endif
428 522
429/* try to tell the compiler that some condition is definitely true */ 523/* try to tell the compiler that some condition is definitely true */
430#define ecb_assume(cond) do { if (!(cond)) ecb_unreachable (); } while (0) 524#define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0
431 525
432ecb_inline unsigned char ecb_byteorder_helper (void) ecb_const; 526ecb_inline unsigned char ecb_byteorder_helper (void) ecb_const;
433ecb_inline unsigned char 527ecb_inline unsigned char
434ecb_byteorder_helper (void) 528ecb_byteorder_helper (void)
435{ 529{
436 const uint32_t u = 0x11223344; 530 /* the union code still generates code under pressure in gcc, */
437 return *(unsigned char *)&u; 531 /* but less than using pointers, and always seems to */
532 /* successfully return a constant. */
533 /* the reason why we have this horrible preprocessor mess */
534 /* is to avoid it in all cases, at least on common architectures */
535 /* or when using a recent enough gcc version (>= 4.6) */
536#if __i386 || __i386__ || _M_X86 || __amd64 || __amd64__ || _M_X64
537 return 0x44;
538#elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
539 return 0x44;
540#elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
541 return 0x11;
542#else
543 union
544 {
545 uint32_t i;
546 uint8_t c;
547 } u = { 0x11223344 };
548 return u.c;
549#endif
438} 550}
439 551
440ecb_inline ecb_bool ecb_big_endian (void) ecb_const; 552ecb_inline ecb_bool ecb_big_endian (void) ecb_const;
441ecb_inline ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; } 553ecb_inline ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; }
442ecb_inline ecb_bool ecb_little_endian (void) ecb_const; 554ecb_inline ecb_bool ecb_little_endian (void) ecb_const;
473 } 585 }
474#else 586#else
475 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) 587 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0]))
476#endif 588#endif
477 589
590/*******************************************************************************/
591/* floating point stuff, can be disabled by defining ECB_NO_LIBM */
592
593/* basically, everything uses "ieee pure-endian" floating point numbers */
594/* the only noteworthy exception is ancient armle, which uses order 43218765 */
595#if 0 \
596 || __i386 || __i386__ \
597 || __amd64 || __amd64__ || __x86_64 || __x86_64__ \
598 || __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \
599 || defined __s390__ || defined __s390x__ \
600 || defined __mips__ \
601 || defined __alpha__ \
602 || defined __hppa__ \
603 || defined __ia64__ \
604 || defined __m68k__ \
605 || defined __m88k__ \
606 || defined __sh__ \
607 || defined _M_IX86 || defined _M_AMD64 || defined _M_IA64 \
608 || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \
609 || defined __aarch64__
610 #define ECB_STDFP 1
611 #include <string.h> /* for memcpy */
612#else
613 #define ECB_STDFP 0
614#endif
615
616#ifndef ECB_NO_LIBM
617
618 #include <math.h> /* for frexp*, ldexp*, INFINITY, NAN */
619
620 /* only the oldest of old doesn't have this one. solaris. */
621 #ifdef INFINITY
622 #define ECB_INFINITY INFINITY
623 #else
624 #define ECB_INFINITY HUGE_VAL
478#endif 625 #endif
479 626
627 #ifdef NAN
628 #define ECB_NAN NAN
629 #else
630 #define ECB_NAN ECB_INFINITY
631 #endif
632
633 /* converts an ieee half/binary16 to a float */
634 ecb_function_ float ecb_binary16_to_float (uint16_t x) ecb_const;
635 ecb_function_ float
636 ecb_binary16_to_float (uint16_t x)
637 {
638 int e = (x >> 10) & 0x1f;
639 int m = x & 0x3ff;
640 float r;
641
642 if (!e ) r = ldexpf (m , -24);
643 else if (e != 31) r = ldexpf (m + 0x400, e - 25);
644 else if (m ) r = ECB_NAN;
645 else r = ECB_INFINITY;
646
647 return x & 0x8000 ? -r : r;
648 }
649
650 /* convert a float to ieee single/binary32 */
651 ecb_function_ uint32_t ecb_float_to_binary32 (float x) ecb_const;
652 ecb_function_ uint32_t
653 ecb_float_to_binary32 (float x)
654 {
655 uint32_t r;
656
657 #if ECB_STDFP
658 memcpy (&r, &x, 4);
659 #else
660 /* slow emulation, works for anything but -0 */
661 uint32_t m;
662 int e;
663
664 if (x == 0e0f ) return 0x00000000U;
665 if (x > +3.40282346638528860e+38f) return 0x7f800000U;
666 if (x < -3.40282346638528860e+38f) return 0xff800000U;
667 if (x != x ) return 0x7fbfffffU;
668
669 m = frexpf (x, &e) * 0x1000000U;
670
671 r = m & 0x80000000U;
672
673 if (r)
674 m = -m;
675
676 if (e <= -126)
677 {
678 m &= 0xffffffU;
679 m >>= (-125 - e);
680 e = -126;
681 }
682
683 r |= (e + 126) << 23;
684 r |= m & 0x7fffffU;
685 #endif
686
687 return r;
688 }
689
690 /* converts an ieee single/binary32 to a float */
691 ecb_function_ float ecb_binary32_to_float (uint32_t x) ecb_const;
692 ecb_function_ float
693 ecb_binary32_to_float (uint32_t x)
694 {
695 float r;
696
697 #if ECB_STDFP
698 memcpy (&r, &x, 4);
699 #else
700 /* emulation, only works for normals and subnormals and +0 */
701 int neg = x >> 31;
702 int e = (x >> 23) & 0xffU;
703
704 x &= 0x7fffffU;
705
706 if (e)
707 x |= 0x800000U;
708 else
709 e = 1;
710
711 /* we distrust ldexpf a bit and do the 2**-24 scaling by an extra multiply */
712 r = ldexpf (x * (0.5f / 0x800000U), e - 126);
713
714 r = neg ? -r : r;
715 #endif
716
717 return r;
718 }
719
720 /* convert a double to ieee double/binary64 */
721 ecb_function_ uint64_t ecb_double_to_binary64 (double x) ecb_const;
722 ecb_function_ uint64_t
723 ecb_double_to_binary64 (double x)
724 {
725 uint64_t r;
726
727 #if ECB_STDFP
728 memcpy (&r, &x, 8);
729 #else
730 /* slow emulation, works for anything but -0 */
731 uint64_t m;
732 int e;
733
734 if (x == 0e0 ) return 0x0000000000000000U;
735 if (x > +1.79769313486231470e+308) return 0x7ff0000000000000U;
736 if (x < -1.79769313486231470e+308) return 0xfff0000000000000U;
737 if (x != x ) return 0X7ff7ffffffffffffU;
738
739 m = frexp (x, &e) * 0x20000000000000U;
740
741 r = m & 0x8000000000000000;;
742
743 if (r)
744 m = -m;
745
746 if (e <= -1022)
747 {
748 m &= 0x1fffffffffffffU;
749 m >>= (-1021 - e);
750 e = -1022;
751 }
752
753 r |= ((uint64_t)(e + 1022)) << 52;
754 r |= m & 0xfffffffffffffU;
755 #endif
756
757 return r;
758 }
759
760 /* converts an ieee double/binary64 to a double */
761 ecb_function_ double ecb_binary64_to_double (uint64_t x) ecb_const;
762 ecb_function_ double
763 ecb_binary64_to_double (uint64_t x)
764 {
765 double r;
766
767 #if ECB_STDFP
768 memcpy (&r, &x, 8);
769 #else
770 /* emulation, only works for normals and subnormals and +0 */
771 int neg = x >> 63;
772 int e = (x >> 52) & 0x7ffU;
773
774 x &= 0xfffffffffffffU;
775
776 if (e)
777 x |= 0x10000000000000U;
778 else
779 e = 1;
780
781 /* we distrust ldexp a bit and do the 2**-53 scaling by an extra multiply */
782 r = ldexp (x * (0.5 / 0x10000000000000U), e - 1022);
783
784 r = neg ? -r : r;
785 #endif
786
787 return r;
788 }
789
790#endif
791
792#endif
793

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines