ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libecb/ecb.h
Revision: 1.138
Committed: Tue Oct 14 14:38:13 2014 UTC (9 years, 8 months ago) by sf-exg
Content type: text/plain
Branch: MAIN
Changes since 1.137: +28 -7 lines
Log Message:
Add support for clang __has_builtin.

File Contents

# Content
1 /*
2 * libecb - http://software.schmorp.de/pkg/libecb
3 *
4 * Copyright (©) 2009-2014 Marc Alexander Lehmann <libecb@schmorp.de>
5 * Copyright (©) 2011 Emanuele Giaquinta
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without modifica-
9 * tion, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 *
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
20 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
21 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
22 * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
26 * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
27 * OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * Alternatively, the contents of this file may be used under the terms of
30 * the GNU General Public License ("GPL") version 2 or any later version,
31 * in which case the provisions of the GPL are applicable instead of
32 * the above. If you wish to allow the use of your version of this file
33 * only under the terms of the GPL and not to allow others to use your
34 * version of this file under the BSD license, indicate your decision
35 * by deleting the provisions above and replace them with the notice
36 * and other provisions required by the GPL. If you do not delete the
37 * provisions above, a recipient may use your version of this file under
38 * either the BSD or the GPL.
39 */
40
41 #ifndef ECB_H
42 #define ECB_H
43
44 /* 16 bits major, 16 bits minor */
45 #define ECB_VERSION 0x00010003
46
47 #ifdef _WIN32
48 typedef signed char int8_t;
49 typedef unsigned char uint8_t;
50 typedef signed short int16_t;
51 typedef unsigned short uint16_t;
52 typedef signed int int32_t;
53 typedef unsigned int uint32_t;
54 #if __GNUC__
55 typedef signed long long int64_t;
56 typedef unsigned long long uint64_t;
57 #else /* _MSC_VER || __BORLANDC__ */
58 typedef signed __int64 int64_t;
59 typedef unsigned __int64 uint64_t;
60 #endif
61 #ifdef _WIN64
62 #define ECB_PTRSIZE 8
63 typedef uint64_t uintptr_t;
64 typedef int64_t intptr_t;
65 #else
66 #define ECB_PTRSIZE 4
67 typedef uint32_t uintptr_t;
68 typedef int32_t intptr_t;
69 #endif
70 #else
71 #include <inttypes.h>
72 #if UINTMAX_MAX > 0xffffffffU
73 #define ECB_PTRSIZE 8
74 #else
75 #define ECB_PTRSIZE 4
76 #endif
77 #endif
78
79 /* work around x32 idiocy by defining proper macros */
80 #if __amd64 || __x86_64 || _M_AMD64 || _M_X64
81 #if _ILP32
82 #define ECB_AMD64_X32 1
83 #else
84 #define ECB_AMD64 1
85 #endif
86 #endif
87
88 /* many compilers define _GNUC_ to some versions but then only implement
89 * what their idiot authors think are the "more important" extensions,
90 * causing enormous grief in return for some better fake benchmark numbers.
91 * or so.
92 * we try to detect these and simply assume they are not gcc - if they have
93 * an issue with that they should have done it right in the first place.
94 */
95 #if !defined __GNUC_MINOR__ || defined __INTEL_COMPILER || defined __SUNPRO_C || defined __SUNPRO_CC || defined __llvm__ || defined __clang__
96 #define ECB_GCC_VERSION(major,minor) 0
97 #else
98 #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))
99 #endif
100
101 #define ECB_CLANG_VERSION(major,minor) (__clang_major__ > (major) || (__clang_major__ == (major) && __clang_minor__ >= (minor)))
102
103 #if __clang__ && defined(__has_builtin)
104 #define ECB_CLANG_BUILTIN(x) __has_builtin(x)
105 #else
106 #define ECB_CLANG_BUILTIN(x) 0
107 #endif
108
109 #define ECB_CPP (__cplusplus+0)
110 #define ECB_CPP11 (__cplusplus >= 201103L)
111
112 #if ECB_CPP
113 #define ECB_C 0
114 #define ECB_STDC_VERSION 0
115 #else
116 #define ECB_C 1
117 #define ECB_STDC_VERSION __STDC_VERSION__
118 #endif
119
120 #define ECB_C99 (ECB_STDC_VERSION >= 199901L)
121 #define ECB_C11 (ECB_STDC_VERSION >= 201112L)
122
123 #if ECB_CPP
124 #define ECB_EXTERN_C extern "C"
125 #define ECB_EXTERN_C_BEG ECB_EXTERN_C {
126 #define ECB_EXTERN_C_END }
127 #else
128 #define ECB_EXTERN_C extern
129 #define ECB_EXTERN_C_BEG
130 #define ECB_EXTERN_C_END
131 #endif
132
133 /*****************************************************************************/
134
135 /* ECB_NO_THREADS - ecb is not used by multiple threads, ever */
136 /* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */
137
138 #if ECB_NO_THREADS
139 #define ECB_NO_SMP 1
140 #endif
141
142 #if ECB_NO_SMP
143 #define ECB_MEMORY_FENCE do { } while (0)
144 #endif
145
146 #ifndef ECB_MEMORY_FENCE
147 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
148 #if __i386 || __i386__
149 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
150 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
151 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
152 #elif __amd64 || __amd64__ || __x86_64 || __x86_64__
153 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory")
154 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
155 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
156 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__
157 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory")
158 #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \
159 || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__
160 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory")
161 #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \
162 || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__
163 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory")
164 #elif __aarch64__
165 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory")
166 #elif (__sparc || __sparc__) && !__sparcv8
167 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory")
168 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory")
169 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore")
170 #elif defined __s390__ || defined __s390x__
171 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory")
172 #elif defined __mips__
173 /* GNU/Linux emulates sync on mips1 architectures, so we force its use */
174 /* anybody else who still uses mips1 is supposed to send in their version, with detection code. */
175 #define ECB_MEMORY_FENCE __asm__ __volatile__ (".set mips2; sync; .set mips0" : : : "memory")
176 #elif defined __alpha__
177 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory")
178 #elif defined __hppa__
179 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
180 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
181 #elif defined __ia64__
182 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory")
183 #elif defined __m68k__
184 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
185 #elif defined __m88k__
186 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("tb1 0,%%r0,128" : : : "memory")
187 #elif defined __sh__
188 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
189 #endif
190 #endif
191 #endif
192
193 #ifndef ECB_MEMORY_FENCE
194 #if ECB_GCC_VERSION(4,7)
195 /* see comment below (stdatomic.h) about the C11 memory model. */
196 #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST)
197 #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE)
198 #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE)
199
200 /* The __has_feature syntax from clang is so misdesigned that we cannot use it
201 * without risking compile time errors with other compilers. We *could*
202 * define our own ecb_clang_has_feature, but I just can't be bothered to work
203 * around this shit time and again.
204 * #elif defined __clang && __has_feature (cxx_atomic)
205 * // see comment below (stdatomic.h) about the C11 memory model.
206 * #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST)
207 * #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE)
208 * #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE)
209 */
210
211 #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__
212 #define ECB_MEMORY_FENCE __sync_synchronize ()
213 #elif _MSC_VER >= 1500 /* VC++ 2008 */
214 /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */
215 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
216 #define ECB_MEMORY_FENCE _ReadWriteBarrier (); MemoryBarrier()
217 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier (); MemoryBarrier() /* according to msdn, _ReadBarrier is not a load fence */
218 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier (); MemoryBarrier()
219 #elif _MSC_VER >= 1400 /* VC++ 2005 */
220 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
221 #define ECB_MEMORY_FENCE _ReadWriteBarrier ()
222 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */
223 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier ()
224 #elif defined _WIN32
225 #include <WinNT.h>
226 #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */
227 #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
228 #include <mbarrier.h>
229 #define ECB_MEMORY_FENCE __machine_rw_barrier ()
230 #define ECB_MEMORY_FENCE_ACQUIRE __machine_r_barrier ()
231 #define ECB_MEMORY_FENCE_RELEASE __machine_w_barrier ()
232 #elif __xlC__
233 #define ECB_MEMORY_FENCE __sync ()
234 #endif
235 #endif
236
237 #ifndef ECB_MEMORY_FENCE
238 #if ECB_C11 && !defined __STDC_NO_ATOMICS__
239 /* we assume that these memory fences work on all variables/all memory accesses, */
240 /* not just C11 atomics and atomic accesses */
241 #include <stdatomic.h>
242 /* Unfortunately, neither gcc 4.7 nor clang 3.1 generate any instructions for */
243 /* any fence other than seq_cst, which isn't very efficient for us. */
244 /* Why that is, we don't know - either the C11 memory model is quite useless */
245 /* for most usages, or gcc and clang have a bug */
246 /* I *currently* lean towards the latter, and inefficiently implement */
247 /* all three of ecb's fences as a seq_cst fence */
248 /* Update, gcc-4.8 generates mfence for all c++ fences, but nothing */
249 /* for all __atomic_thread_fence's except seq_cst */
250 #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst)
251 #endif
252 #endif
253
254 #ifndef ECB_MEMORY_FENCE
255 #if !ECB_AVOID_PTHREADS
256 /*
257 * if you get undefined symbol references to pthread_mutex_lock,
258 * or failure to find pthread.h, then you should implement
259 * the ECB_MEMORY_FENCE operations for your cpu/compiler
260 * OR provide pthread.h and link against the posix thread library
261 * of your system.
262 */
263 #include <pthread.h>
264 #define ECB_NEEDS_PTHREADS 1
265 #define ECB_MEMORY_FENCE_NEEDS_PTHREADS 1
266
267 static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER;
268 #define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0)
269 #endif
270 #endif
271
272 #if !defined ECB_MEMORY_FENCE_ACQUIRE && defined ECB_MEMORY_FENCE
273 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
274 #endif
275
276 #if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE
277 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
278 #endif
279
280 /*****************************************************************************/
281
282 #if __cplusplus
283 #define ecb_inline static inline
284 #elif ECB_GCC_VERSION(2,5)
285 #define ecb_inline static __inline__
286 #elif ECB_C99
287 #define ecb_inline static inline
288 #else
289 #define ecb_inline static
290 #endif
291
292 #if ECB_GCC_VERSION(3,3)
293 #define ecb_restrict __restrict__
294 #elif ECB_C99
295 #define ecb_restrict restrict
296 #else
297 #define ecb_restrict
298 #endif
299
300 typedef int ecb_bool;
301
302 #define ECB_CONCAT_(a, b) a ## b
303 #define ECB_CONCAT(a, b) ECB_CONCAT_(a, b)
304 #define ECB_STRINGIFY_(a) # a
305 #define ECB_STRINGIFY(a) ECB_STRINGIFY_(a)
306
307 #define ecb_function_ ecb_inline
308
309 #if ECB_GCC_VERSION(3,1) || ECB_CLANG_VERSION(2,8)
310 #define ecb_attribute(attrlist) __attribute__(attrlist)
311 #else
312 #define ecb_attribute(attrlist)
313 #endif
314
315 #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_constant_p)
316 #define ecb_is_constant(expr) __builtin_constant_p (expr)
317 #else
318 /* possible C11 impl for integral types
319 typedef struct ecb_is_constant_struct ecb_is_constant_struct;
320 #define ecb_is_constant(expr) _Generic ((1 ? (struct ecb_is_constant_struct *)0 : (void *)((expr) - (expr)), ecb_is_constant_struct *: 0, default: 1)) */
321
322 #define ecb_is_constant(expr) 0
323 #endif
324
325 #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_expect)
326 #define ecb_expect(expr,value) __builtin_expect ((expr),(value))
327 #else
328 #define ecb_expect(expr,value) (expr)
329 #endif
330
331 #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_prefetch)
332 #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality)
333 #else
334 #define ecb_prefetch(addr,rw,locality)
335 #endif
336
337 /* no emulation for ecb_decltype */
338 #if ECB_GCC_VERSION(4,5)
339 #define ecb_decltype(x) __decltype(x)
340 #elif ECB_GCC_VERSION(3,0)
341 #define ecb_decltype(x) __typeof(x)
342 #endif
343
344 #if _MSC_VER >= 1300
345 #define ecb_deprecated __declspec(deprecated)
346 #else
347 #define ecb_deprecated ecb_attribute ((__deprecated__))
348 #endif
349
350 #define ecb_noinline ecb_attribute ((__noinline__))
351 #define ecb_unused ecb_attribute ((__unused__))
352 #define ecb_const ecb_attribute ((__const__))
353 #define ecb_pure ecb_attribute ((__pure__))
354
355 /* http://msdn.microsoft.com/en-us/library/k6ktzx3s.aspx __declspec(noreturn) */
356 #if ECB_C11
357 #define ecb_noreturn _Noreturn
358 #else
359 #define ecb_noreturn ecb_attribute ((__noreturn__))
360 #endif
361
362 #if ECB_GCC_VERSION(4,3)
363 #define ecb_artificial ecb_attribute ((__artificial__))
364 #define ecb_hot ecb_attribute ((__hot__))
365 #define ecb_cold ecb_attribute ((__cold__))
366 #else
367 #define ecb_artificial
368 #define ecb_hot
369 #define ecb_cold
370 #endif
371
372 /* put around conditional expressions if you are very sure that the */
373 /* expression is mostly true or mostly false. note that these return */
374 /* booleans, not the expression. */
375 #define ecb_expect_false(expr) ecb_expect (!!(expr), 0)
376 #define ecb_expect_true(expr) ecb_expect (!!(expr), 1)
377 /* for compatibility to the rest of the world */
378 #define ecb_likely(expr) ecb_expect_true (expr)
379 #define ecb_unlikely(expr) ecb_expect_false (expr)
380
381 /* count trailing zero bits and count # of one bits */
382 #if ECB_GCC_VERSION(3,4) || (ECB_CLANG_BUILTIN(__builtin_clz) && ECB_CLANG_BUILTIN(__builtin_clzll) \
383 && ECB_CLANG_BUILTIN(__builtin_ctz) && ECB_CLANG_BUILTIN(__builtin_ctzll) \
384 && ECB_CLANG_BUILTIN(__builtin_popcount))
385 /* we assume int == 32 bit, long == 32 or 64 bit and long long == 64 bit */
386 #define ecb_ld32(x) (__builtin_clz (x) ^ 31)
387 #define ecb_ld64(x) (__builtin_clzll (x) ^ 63)
388 #define ecb_ctz32(x) __builtin_ctz (x)
389 #define ecb_ctz64(x) __builtin_ctzll (x)
390 #define ecb_popcount32(x) __builtin_popcount (x)
391 /* no popcountll */
392 #else
393 ecb_function_ int ecb_ctz32 (uint32_t x) ecb_const;
394 ecb_function_ int
395 ecb_ctz32 (uint32_t x)
396 {
397 int r = 0;
398
399 x &= ~x + 1; /* this isolates the lowest bit */
400
401 #if ECB_branchless_on_i386
402 r += !!(x & 0xaaaaaaaa) << 0;
403 r += !!(x & 0xcccccccc) << 1;
404 r += !!(x & 0xf0f0f0f0) << 2;
405 r += !!(x & 0xff00ff00) << 3;
406 r += !!(x & 0xffff0000) << 4;
407 #else
408 if (x & 0xaaaaaaaa) r += 1;
409 if (x & 0xcccccccc) r += 2;
410 if (x & 0xf0f0f0f0) r += 4;
411 if (x & 0xff00ff00) r += 8;
412 if (x & 0xffff0000) r += 16;
413 #endif
414
415 return r;
416 }
417
418 ecb_function_ int ecb_ctz64 (uint64_t x) ecb_const;
419 ecb_function_ int
420 ecb_ctz64 (uint64_t x)
421 {
422 int shift = x & 0xffffffffU ? 0 : 32;
423 return ecb_ctz32 (x >> shift) + shift;
424 }
425
426 ecb_function_ int ecb_popcount32 (uint32_t x) ecb_const;
427 ecb_function_ int
428 ecb_popcount32 (uint32_t x)
429 {
430 x -= (x >> 1) & 0x55555555;
431 x = ((x >> 2) & 0x33333333) + (x & 0x33333333);
432 x = ((x >> 4) + x) & 0x0f0f0f0f;
433 x *= 0x01010101;
434
435 return x >> 24;
436 }
437
438 ecb_function_ int ecb_ld32 (uint32_t x) ecb_const;
439 ecb_function_ int ecb_ld32 (uint32_t x)
440 {
441 int r = 0;
442
443 if (x >> 16) { x >>= 16; r += 16; }
444 if (x >> 8) { x >>= 8; r += 8; }
445 if (x >> 4) { x >>= 4; r += 4; }
446 if (x >> 2) { x >>= 2; r += 2; }
447 if (x >> 1) { r += 1; }
448
449 return r;
450 }
451
452 ecb_function_ int ecb_ld64 (uint64_t x) ecb_const;
453 ecb_function_ int ecb_ld64 (uint64_t x)
454 {
455 int r = 0;
456
457 if (x >> 32) { x >>= 32; r += 32; }
458
459 return r + ecb_ld32 (x);
460 }
461 #endif
462
463 ecb_function_ ecb_bool ecb_is_pot32 (uint32_t x) ecb_const;
464 ecb_function_ ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); }
465 ecb_function_ ecb_bool ecb_is_pot64 (uint64_t x) ecb_const;
466 ecb_function_ ecb_bool ecb_is_pot64 (uint64_t x) { return !(x & (x - 1)); }
467
468 ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) ecb_const;
469 ecb_function_ uint8_t ecb_bitrev8 (uint8_t x)
470 {
471 return ( (x * 0x0802U & 0x22110U)
472 | (x * 0x8020U & 0x88440U)) * 0x10101U >> 16;
473 }
474
475 ecb_function_ uint16_t ecb_bitrev16 (uint16_t x) ecb_const;
476 ecb_function_ uint16_t ecb_bitrev16 (uint16_t x)
477 {
478 x = ((x >> 1) & 0x5555) | ((x & 0x5555) << 1);
479 x = ((x >> 2) & 0x3333) | ((x & 0x3333) << 2);
480 x = ((x >> 4) & 0x0f0f) | ((x & 0x0f0f) << 4);
481 x = ( x >> 8 ) | ( x << 8);
482
483 return x;
484 }
485
486 ecb_function_ uint32_t ecb_bitrev32 (uint32_t x) ecb_const;
487 ecb_function_ uint32_t ecb_bitrev32 (uint32_t x)
488 {
489 x = ((x >> 1) & 0x55555555) | ((x & 0x55555555) << 1);
490 x = ((x >> 2) & 0x33333333) | ((x & 0x33333333) << 2);
491 x = ((x >> 4) & 0x0f0f0f0f) | ((x & 0x0f0f0f0f) << 4);
492 x = ((x >> 8) & 0x00ff00ff) | ((x & 0x00ff00ff) << 8);
493 x = ( x >> 16 ) | ( x << 16);
494
495 return x;
496 }
497
498 /* popcount64 is only available on 64 bit cpus as gcc builtin */
499 /* so for this version we are lazy */
500 ecb_function_ int ecb_popcount64 (uint64_t x) ecb_const;
501 ecb_function_ int
502 ecb_popcount64 (uint64_t x)
503 {
504 return ecb_popcount32 (x) + ecb_popcount32 (x >> 32);
505 }
506
507 ecb_inline uint8_t ecb_rotl8 (uint8_t x, unsigned int count) ecb_const;
508 ecb_inline uint8_t ecb_rotr8 (uint8_t x, unsigned int count) ecb_const;
509 ecb_inline uint16_t ecb_rotl16 (uint16_t x, unsigned int count) ecb_const;
510 ecb_inline uint16_t ecb_rotr16 (uint16_t x, unsigned int count) ecb_const;
511 ecb_inline uint32_t ecb_rotl32 (uint32_t x, unsigned int count) ecb_const;
512 ecb_inline uint32_t ecb_rotr32 (uint32_t x, unsigned int count) ecb_const;
513 ecb_inline uint64_t ecb_rotl64 (uint64_t x, unsigned int count) ecb_const;
514 ecb_inline uint64_t ecb_rotr64 (uint64_t x, unsigned int count) ecb_const;
515
516 ecb_inline uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> ( 8 - count)) | (x << count); }
517 ecb_inline uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << ( 8 - count)) | (x >> count); }
518 ecb_inline uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (16 - count)) | (x << count); }
519 ecb_inline uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (16 - count)) | (x >> count); }
520 ecb_inline uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); }
521 ecb_inline uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); }
522 ecb_inline uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); }
523 ecb_inline uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); }
524
525 #if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64))
526 #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16)
527 #define ecb_bswap32(x) __builtin_bswap32 (x)
528 #define ecb_bswap64(x) __builtin_bswap64 (x)
529 #else
530 ecb_function_ uint16_t ecb_bswap16 (uint16_t x) ecb_const;
531 ecb_function_ uint16_t
532 ecb_bswap16 (uint16_t x)
533 {
534 return ecb_rotl16 (x, 8);
535 }
536
537 ecb_function_ uint32_t ecb_bswap32 (uint32_t x) ecb_const;
538 ecb_function_ uint32_t
539 ecb_bswap32 (uint32_t x)
540 {
541 return (((uint32_t)ecb_bswap16 (x)) << 16) | ecb_bswap16 (x >> 16);
542 }
543
544 ecb_function_ uint64_t ecb_bswap64 (uint64_t x) ecb_const;
545 ecb_function_ uint64_t
546 ecb_bswap64 (uint64_t x)
547 {
548 return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32);
549 }
550 #endif
551
552 #if ECB_GCC_VERSION(4,5) || ECB_CLANG_BUILTIN(__builtin_unreachable)
553 #define ecb_unreachable() __builtin_unreachable ()
554 #else
555 /* this seems to work fine, but gcc always emits a warning for it :/ */
556 ecb_inline void ecb_unreachable (void) ecb_noreturn;
557 ecb_inline void ecb_unreachable (void) { }
558 #endif
559
560 /* try to tell the compiler that some condition is definitely true */
561 #define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0
562
563 ecb_inline unsigned char ecb_byteorder_helper (void) ecb_const;
564 ecb_inline unsigned char
565 ecb_byteorder_helper (void)
566 {
567 /* the union code still generates code under pressure in gcc, */
568 /* but less than using pointers, and always seems to */
569 /* successfully return a constant. */
570 /* the reason why we have this horrible preprocessor mess */
571 /* is to avoid it in all cases, at least on common architectures */
572 /* or when using a recent enough gcc version (>= 4.6) */
573 #if __i386 || __i386__ || _M_X86 || __amd64 || __amd64__ || _M_X64
574 return 0x44;
575 #elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
576 return 0x44;
577 #elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
578 return 0x11;
579 #else
580 union
581 {
582 uint32_t i;
583 uint8_t c;
584 } u = { 0x11223344 };
585 return u.c;
586 #endif
587 }
588
589 ecb_inline ecb_bool ecb_big_endian (void) ecb_const;
590 ecb_inline ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; }
591 ecb_inline ecb_bool ecb_little_endian (void) ecb_const;
592 ecb_inline ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44; }
593
594 #if ECB_GCC_VERSION(3,0) || ECB_C99
595 #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0))
596 #else
597 #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n)))
598 #endif
599
600 #if __cplusplus
601 template<typename T>
602 static inline T ecb_div_rd (T val, T div)
603 {
604 return val < 0 ? - ((-val + div - 1) / div) : (val ) / div;
605 }
606 template<typename T>
607 static inline T ecb_div_ru (T val, T div)
608 {
609 return val < 0 ? - ((-val ) / div) : (val + div - 1) / div;
610 }
611 #else
612 #define ecb_div_rd(val,div) ((val) < 0 ? - ((-(val) + (div) - 1) / (div)) : ((val) ) / (div))
613 #define ecb_div_ru(val,div) ((val) < 0 ? - ((-(val) ) / (div)) : ((val) + (div) - 1) / (div))
614 #endif
615
616 #if ecb_cplusplus_does_not_suck
617 /* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */
618 template<typename T, int N>
619 static inline int ecb_array_length (const T (&arr)[N])
620 {
621 return N;
622 }
623 #else
624 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0]))
625 #endif
626
627 /*******************************************************************************/
628 /* floating point stuff, can be disabled by defining ECB_NO_LIBM */
629
630 /* basically, everything uses "ieee pure-endian" floating point numbers */
631 /* the only noteworthy exception is ancient armle, which uses order 43218765 */
632 #if 0 \
633 || __i386 || __i386__ \
634 || __amd64 || __amd64__ || __x86_64 || __x86_64__ \
635 || __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \
636 || defined __s390__ || defined __s390x__ \
637 || defined __mips__ \
638 || defined __alpha__ \
639 || defined __hppa__ \
640 || defined __ia64__ \
641 || defined __m68k__ \
642 || defined __m88k__ \
643 || defined __sh__ \
644 || defined _M_IX86 || defined _M_AMD64 || defined _M_IA64 \
645 || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \
646 || defined __aarch64__
647 #define ECB_STDFP 1
648 #include <string.h> /* for memcpy */
649 #else
650 #define ECB_STDFP 0
651 #endif
652
653 #ifndef ECB_NO_LIBM
654
655 #include <math.h> /* for frexp*, ldexp*, INFINITY, NAN */
656
657 /* only the oldest of old doesn't have this one. solaris. */
658 #ifdef INFINITY
659 #define ECB_INFINITY INFINITY
660 #else
661 #define ECB_INFINITY HUGE_VAL
662 #endif
663
664 #ifdef NAN
665 #define ECB_NAN NAN
666 #else
667 #define ECB_NAN ECB_INFINITY
668 #endif
669
670 /* converts an ieee half/binary16 to a float */
671 ecb_function_ float ecb_binary16_to_float (uint16_t x) ecb_const;
672 ecb_function_ float
673 ecb_binary16_to_float (uint16_t x)
674 {
675 int e = (x >> 10) & 0x1f;
676 int m = x & 0x3ff;
677 float r;
678
679 if (!e ) r = ldexpf (m , -24);
680 else if (e != 31) r = ldexpf (m + 0x400, e - 25);
681 else if (m ) r = ECB_NAN;
682 else r = ECB_INFINITY;
683
684 return x & 0x8000 ? -r : r;
685 }
686
687 /* convert a float to ieee single/binary32 */
688 ecb_function_ uint32_t ecb_float_to_binary32 (float x) ecb_const;
689 ecb_function_ uint32_t
690 ecb_float_to_binary32 (float x)
691 {
692 uint32_t r;
693
694 #if ECB_STDFP
695 memcpy (&r, &x, 4);
696 #else
697 /* slow emulation, works for anything but -0 */
698 uint32_t m;
699 int e;
700
701 if (x == 0e0f ) return 0x00000000U;
702 if (x > +3.40282346638528860e+38f) return 0x7f800000U;
703 if (x < -3.40282346638528860e+38f) return 0xff800000U;
704 if (x != x ) return 0x7fbfffffU;
705
706 m = frexpf (x, &e) * 0x1000000U;
707
708 r = m & 0x80000000U;
709
710 if (r)
711 m = -m;
712
713 if (e <= -126)
714 {
715 m &= 0xffffffU;
716 m >>= (-125 - e);
717 e = -126;
718 }
719
720 r |= (e + 126) << 23;
721 r |= m & 0x7fffffU;
722 #endif
723
724 return r;
725 }
726
727 /* converts an ieee single/binary32 to a float */
728 ecb_function_ float ecb_binary32_to_float (uint32_t x) ecb_const;
729 ecb_function_ float
730 ecb_binary32_to_float (uint32_t x)
731 {
732 float r;
733
734 #if ECB_STDFP
735 memcpy (&r, &x, 4);
736 #else
737 /* emulation, only works for normals and subnormals and +0 */
738 int neg = x >> 31;
739 int e = (x >> 23) & 0xffU;
740
741 x &= 0x7fffffU;
742
743 if (e)
744 x |= 0x800000U;
745 else
746 e = 1;
747
748 /* we distrust ldexpf a bit and do the 2**-24 scaling by an extra multiply */
749 r = ldexpf (x * (0.5f / 0x800000U), e - 126);
750
751 r = neg ? -r : r;
752 #endif
753
754 return r;
755 }
756
757 /* convert a double to ieee double/binary64 */
758 ecb_function_ uint64_t ecb_double_to_binary64 (double x) ecb_const;
759 ecb_function_ uint64_t
760 ecb_double_to_binary64 (double x)
761 {
762 uint64_t r;
763
764 #if ECB_STDFP
765 memcpy (&r, &x, 8);
766 #else
767 /* slow emulation, works for anything but -0 */
768 uint64_t m;
769 int e;
770
771 if (x == 0e0 ) return 0x0000000000000000U;
772 if (x > +1.79769313486231470e+308) return 0x7ff0000000000000U;
773 if (x < -1.79769313486231470e+308) return 0xfff0000000000000U;
774 if (x != x ) return 0X7ff7ffffffffffffU;
775
776 m = frexp (x, &e) * 0x20000000000000U;
777
778 r = m & 0x8000000000000000;;
779
780 if (r)
781 m = -m;
782
783 if (e <= -1022)
784 {
785 m &= 0x1fffffffffffffU;
786 m >>= (-1021 - e);
787 e = -1022;
788 }
789
790 r |= ((uint64_t)(e + 1022)) << 52;
791 r |= m & 0xfffffffffffffU;
792 #endif
793
794 return r;
795 }
796
797 /* converts an ieee double/binary64 to a double */
798 ecb_function_ double ecb_binary64_to_double (uint64_t x) ecb_const;
799 ecb_function_ double
800 ecb_binary64_to_double (uint64_t x)
801 {
802 double r;
803
804 #if ECB_STDFP
805 memcpy (&r, &x, 8);
806 #else
807 /* emulation, only works for normals and subnormals and +0 */
808 int neg = x >> 63;
809 int e = (x >> 52) & 0x7ffU;
810
811 x &= 0xfffffffffffffU;
812
813 if (e)
814 x |= 0x10000000000000U;
815 else
816 e = 1;
817
818 /* we distrust ldexp a bit and do the 2**-53 scaling by an extra multiply */
819 r = ldexp (x * (0.5 / 0x10000000000000U), e - 1022);
820
821 r = neg ? -r : r;
822 #endif
823
824 return r;
825 }
826
827 #endif
828
829 #endif
830