ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libecb/ecb.h
Revision: 1.139
Committed: Tue Oct 14 14:39:06 2014 UTC (9 years, 7 months ago) by root
Content type: text/plain
Branch: MAIN
Changes since 1.138: +4 -3 lines
Log Message:
*** empty log message ***

File Contents

# Content
1 /*
2 * libecb - http://software.schmorp.de/pkg/libecb
3 *
4 * Copyright (©) 2009-2014 Marc Alexander Lehmann <libecb@schmorp.de>
5 * Copyright (©) 2011 Emanuele Giaquinta
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without modifica-
9 * tion, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 *
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
20 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
21 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
22 * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
26 * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
27 * OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * Alternatively, the contents of this file may be used under the terms of
30 * the GNU General Public License ("GPL") version 2 or any later version,
31 * in which case the provisions of the GPL are applicable instead of
32 * the above. If you wish to allow the use of your version of this file
33 * only under the terms of the GPL and not to allow others to use your
34 * version of this file under the BSD license, indicate your decision
35 * by deleting the provisions above and replace them with the notice
36 * and other provisions required by the GPL. If you do not delete the
37 * provisions above, a recipient may use your version of this file under
38 * either the BSD or the GPL.
39 */
40
41 #ifndef ECB_H
42 #define ECB_H
43
44 /* 16 bits major, 16 bits minor */
45 #define ECB_VERSION 0x00010003
46
47 #ifdef _WIN32
48 typedef signed char int8_t;
49 typedef unsigned char uint8_t;
50 typedef signed short int16_t;
51 typedef unsigned short uint16_t;
52 typedef signed int int32_t;
53 typedef unsigned int uint32_t;
54 #if __GNUC__
55 typedef signed long long int64_t;
56 typedef unsigned long long uint64_t;
57 #else /* _MSC_VER || __BORLANDC__ */
58 typedef signed __int64 int64_t;
59 typedef unsigned __int64 uint64_t;
60 #endif
61 #ifdef _WIN64
62 #define ECB_PTRSIZE 8
63 typedef uint64_t uintptr_t;
64 typedef int64_t intptr_t;
65 #else
66 #define ECB_PTRSIZE 4
67 typedef uint32_t uintptr_t;
68 typedef int32_t intptr_t;
69 #endif
70 #else
71 #include <inttypes.h>
72 #if UINTMAX_MAX > 0xffffffffU
73 #define ECB_PTRSIZE 8
74 #else
75 #define ECB_PTRSIZE 4
76 #endif
77 #endif
78
79 /* work around x32 idiocy by defining proper macros */
80 #if __amd64 || __x86_64 || _M_AMD64 || _M_X64
81 #if _ILP32
82 #define ECB_AMD64_X32 1
83 #else
84 #define ECB_AMD64 1
85 #endif
86 #endif
87
88 /* many compilers define _GNUC_ to some versions but then only implement
89 * what their idiot authors think are the "more important" extensions,
90 * causing enormous grief in return for some better fake benchmark numbers.
91 * or so.
92 * we try to detect these and simply assume they are not gcc - if they have
93 * an issue with that they should have done it right in the first place.
94 */
95 #if !defined __GNUC_MINOR__ || defined __INTEL_COMPILER || defined __SUNPRO_C || defined __SUNPRO_CC || defined __llvm__ || defined __clang__
96 #define ECB_GCC_VERSION(major,minor) 0
97 #else
98 #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))
99 #endif
100
101 #define ECB_CLANG_VERSION(major,minor) (__clang_major__ > (major) || (__clang_major__ == (major) && __clang_minor__ >= (minor)))
102
103 #if __clang__ && defined(__has_builtin)
104 #define ECB_CLANG_BUILTIN(x) __has_builtin(x)
105 #else
106 #define ECB_CLANG_BUILTIN(x) 0
107 #endif
108
109 #define ECB_CPP (__cplusplus+0)
110 #define ECB_CPP11 (__cplusplus >= 201103L)
111
112 #if ECB_CPP
113 #define ECB_C 0
114 #define ECB_STDC_VERSION 0
115 #else
116 #define ECB_C 1
117 #define ECB_STDC_VERSION __STDC_VERSION__
118 #endif
119
120 #define ECB_C99 (ECB_STDC_VERSION >= 199901L)
121 #define ECB_C11 (ECB_STDC_VERSION >= 201112L)
122
123 #if ECB_CPP
124 #define ECB_EXTERN_C extern "C"
125 #define ECB_EXTERN_C_BEG ECB_EXTERN_C {
126 #define ECB_EXTERN_C_END }
127 #else
128 #define ECB_EXTERN_C extern
129 #define ECB_EXTERN_C_BEG
130 #define ECB_EXTERN_C_END
131 #endif
132
133 /*****************************************************************************/
134
135 /* ECB_NO_THREADS - ecb is not used by multiple threads, ever */
136 /* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */
137
138 #if ECB_NO_THREADS
139 #define ECB_NO_SMP 1
140 #endif
141
142 #if ECB_NO_SMP
143 #define ECB_MEMORY_FENCE do { } while (0)
144 #endif
145
146 #ifndef ECB_MEMORY_FENCE
147 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
148 #if __i386 || __i386__
149 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
150 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
151 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
152 #elif __amd64 || __amd64__ || __x86_64 || __x86_64__
153 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory")
154 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
155 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
156 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__
157 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory")
158 #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \
159 || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__
160 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory")
161 #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \
162 || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__
163 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory")
164 #elif __aarch64__
165 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory")
166 #elif (__sparc || __sparc__) && !__sparcv8
167 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory")
168 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory")
169 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore")
170 #elif defined __s390__ || defined __s390x__
171 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory")
172 #elif defined __mips__
173 /* GNU/Linux emulates sync on mips1 architectures, so we force its use */
174 /* anybody else who still uses mips1 is supposed to send in their version, with detection code. */
175 #define ECB_MEMORY_FENCE __asm__ __volatile__ (".set mips2; sync; .set mips0" : : : "memory")
176 #elif defined __alpha__
177 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory")
178 #elif defined __hppa__
179 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
180 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
181 #elif defined __ia64__
182 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory")
183 #elif defined __m68k__
184 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
185 #elif defined __m88k__
186 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("tb1 0,%%r0,128" : : : "memory")
187 #elif defined __sh__
188 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
189 #endif
190 #endif
191 #endif
192
193 #ifndef ECB_MEMORY_FENCE
194 #if ECB_GCC_VERSION(4,7)
195 /* see comment below (stdatomic.h) about the C11 memory model. */
196 #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST)
197 #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE)
198 #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE)
199
200 /* The __has_feature syntax from clang is so misdesigned that we cannot use it
201 * without risking compile time errors with other compilers. We *could*
202 * define our own ecb_clang_has_feature, but I just can't be bothered to work
203 * around this shit time and again.
204 * #elif defined __clang && __has_feature (cxx_atomic)
205 * // see comment below (stdatomic.h) about the C11 memory model.
206 * #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST)
207 * #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE)
208 * #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE)
209 */
210
211 #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__
212 #define ECB_MEMORY_FENCE __sync_synchronize ()
213 #elif _MSC_VER >= 1500 /* VC++ 2008 */
214 /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */
215 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
216 #define ECB_MEMORY_FENCE _ReadWriteBarrier (); MemoryBarrier()
217 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier (); MemoryBarrier() /* according to msdn, _ReadBarrier is not a load fence */
218 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier (); MemoryBarrier()
219 #elif _MSC_VER >= 1400 /* VC++ 2005 */
220 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
221 #define ECB_MEMORY_FENCE _ReadWriteBarrier ()
222 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */
223 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier ()
224 #elif defined _WIN32
225 #include <WinNT.h>
226 #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */
227 #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
228 #include <mbarrier.h>
229 #define ECB_MEMORY_FENCE __machine_rw_barrier ()
230 #define ECB_MEMORY_FENCE_ACQUIRE __machine_r_barrier ()
231 #define ECB_MEMORY_FENCE_RELEASE __machine_w_barrier ()
232 #elif __xlC__
233 #define ECB_MEMORY_FENCE __sync ()
234 #endif
235 #endif
236
237 #ifndef ECB_MEMORY_FENCE
238 #if ECB_C11 && !defined __STDC_NO_ATOMICS__
239 /* we assume that these memory fences work on all variables/all memory accesses, */
240 /* not just C11 atomics and atomic accesses */
241 #include <stdatomic.h>
242 /* Unfortunately, neither gcc 4.7 nor clang 3.1 generate any instructions for */
243 /* any fence other than seq_cst, which isn't very efficient for us. */
244 /* Why that is, we don't know - either the C11 memory model is quite useless */
245 /* for most usages, or gcc and clang have a bug */
246 /* I *currently* lean towards the latter, and inefficiently implement */
247 /* all three of ecb's fences as a seq_cst fence */
248 /* Update, gcc-4.8 generates mfence for all c++ fences, but nothing */
249 /* for all __atomic_thread_fence's except seq_cst */
250 #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst)
251 #endif
252 #endif
253
254 #ifndef ECB_MEMORY_FENCE
255 #if !ECB_AVOID_PTHREADS
256 /*
257 * if you get undefined symbol references to pthread_mutex_lock,
258 * or failure to find pthread.h, then you should implement
259 * the ECB_MEMORY_FENCE operations for your cpu/compiler
260 * OR provide pthread.h and link against the posix thread library
261 * of your system.
262 */
263 #include <pthread.h>
264 #define ECB_NEEDS_PTHREADS 1
265 #define ECB_MEMORY_FENCE_NEEDS_PTHREADS 1
266
267 static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER;
268 #define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0)
269 #endif
270 #endif
271
272 #if !defined ECB_MEMORY_FENCE_ACQUIRE && defined ECB_MEMORY_FENCE
273 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
274 #endif
275
276 #if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE
277 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
278 #endif
279
280 /*****************************************************************************/
281
282 #if __cplusplus
283 #define ecb_inline static inline
284 #elif ECB_GCC_VERSION(2,5)
285 #define ecb_inline static __inline__
286 #elif ECB_C99
287 #define ecb_inline static inline
288 #else
289 #define ecb_inline static
290 #endif
291
292 #if ECB_GCC_VERSION(3,3)
293 #define ecb_restrict __restrict__
294 #elif ECB_C99
295 #define ecb_restrict restrict
296 #else
297 #define ecb_restrict
298 #endif
299
300 typedef int ecb_bool;
301
302 #define ECB_CONCAT_(a, b) a ## b
303 #define ECB_CONCAT(a, b) ECB_CONCAT_(a, b)
304 #define ECB_STRINGIFY_(a) # a
305 #define ECB_STRINGIFY(a) ECB_STRINGIFY_(a)
306
307 #define ecb_function_ ecb_inline
308
309 #if ECB_GCC_VERSION(3,1) || ECB_CLANG_VERSION(2,8)
310 #define ecb_attribute(attrlist) __attribute__(attrlist)
311 #else
312 #define ecb_attribute(attrlist)
313 #endif
314
315 #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_constant_p)
316 #define ecb_is_constant(expr) __builtin_constant_p (expr)
317 #else
318 /* possible C11 impl for integral types
319 typedef struct ecb_is_constant_struct ecb_is_constant_struct;
320 #define ecb_is_constant(expr) _Generic ((1 ? (struct ecb_is_constant_struct *)0 : (void *)((expr) - (expr)), ecb_is_constant_struct *: 0, default: 1)) */
321
322 #define ecb_is_constant(expr) 0
323 #endif
324
325 #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_expect)
326 #define ecb_expect(expr,value) __builtin_expect ((expr),(value))
327 #else
328 #define ecb_expect(expr,value) (expr)
329 #endif
330
331 #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_prefetch)
332 #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality)
333 #else
334 #define ecb_prefetch(addr,rw,locality)
335 #endif
336
337 /* no emulation for ecb_decltype */
338 #if ECB_GCC_VERSION(4,5)
339 #define ecb_decltype(x) __decltype(x)
340 #elif ECB_GCC_VERSION(3,0)
341 #define ecb_decltype(x) __typeof(x)
342 #endif
343
344 #if _MSC_VER >= 1300
345 #define ecb_deprecated __declspec(deprecated)
346 #else
347 #define ecb_deprecated ecb_attribute ((__deprecated__))
348 #endif
349
350 #define ecb_noinline ecb_attribute ((__noinline__))
351 #define ecb_unused ecb_attribute ((__unused__))
352 #define ecb_const ecb_attribute ((__const__))
353 #define ecb_pure ecb_attribute ((__pure__))
354
355 /* http://msdn.microsoft.com/en-us/library/k6ktzx3s.aspx __declspec(noreturn) */
356 #if ECB_C11
357 #define ecb_noreturn _Noreturn
358 #else
359 #define ecb_noreturn ecb_attribute ((__noreturn__))
360 #endif
361
362 #if ECB_GCC_VERSION(4,3)
363 #define ecb_artificial ecb_attribute ((__artificial__))
364 #define ecb_hot ecb_attribute ((__hot__))
365 #define ecb_cold ecb_attribute ((__cold__))
366 #else
367 #define ecb_artificial
368 #define ecb_hot
369 #define ecb_cold
370 #endif
371
372 /* put around conditional expressions if you are very sure that the */
373 /* expression is mostly true or mostly false. note that these return */
374 /* booleans, not the expression. */
375 #define ecb_expect_false(expr) ecb_expect (!!(expr), 0)
376 #define ecb_expect_true(expr) ecb_expect (!!(expr), 1)
377 /* for compatibility to the rest of the world */
378 #define ecb_likely(expr) ecb_expect_true (expr)
379 #define ecb_unlikely(expr) ecb_expect_false (expr)
380
381 /* count trailing zero bits and count # of one bits */
382 #if ECB_GCC_VERSION(3,4) \
383 || (ECB_CLANG_BUILTIN(__builtin_clz) && ECB_CLANG_BUILTIN(__builtin_clzll) \
384 && ECB_CLANG_BUILTIN(__builtin_ctz) && ECB_CLANG_BUILTIN(__builtin_ctzll) \
385 && ECB_CLANG_BUILTIN(__builtin_popcount))
386 /* we assume int == 32 bit, long == 32 or 64 bit and long long == 64 bit */
387 #define ecb_ld32(x) (__builtin_clz (x) ^ 31)
388 #define ecb_ld64(x) (__builtin_clzll (x) ^ 63)
389 #define ecb_ctz32(x) __builtin_ctz (x)
390 #define ecb_ctz64(x) __builtin_ctzll (x)
391 #define ecb_popcount32(x) __builtin_popcount (x)
392 /* no popcountll */
393 #else
394 ecb_function_ int ecb_ctz32 (uint32_t x) ecb_const;
395 ecb_function_ int
396 ecb_ctz32 (uint32_t x)
397 {
398 int r = 0;
399
400 x &= ~x + 1; /* this isolates the lowest bit */
401
402 #if ECB_branchless_on_i386
403 r += !!(x & 0xaaaaaaaa) << 0;
404 r += !!(x & 0xcccccccc) << 1;
405 r += !!(x & 0xf0f0f0f0) << 2;
406 r += !!(x & 0xff00ff00) << 3;
407 r += !!(x & 0xffff0000) << 4;
408 #else
409 if (x & 0xaaaaaaaa) r += 1;
410 if (x & 0xcccccccc) r += 2;
411 if (x & 0xf0f0f0f0) r += 4;
412 if (x & 0xff00ff00) r += 8;
413 if (x & 0xffff0000) r += 16;
414 #endif
415
416 return r;
417 }
418
419 ecb_function_ int ecb_ctz64 (uint64_t x) ecb_const;
420 ecb_function_ int
421 ecb_ctz64 (uint64_t x)
422 {
423 int shift = x & 0xffffffffU ? 0 : 32;
424 return ecb_ctz32 (x >> shift) + shift;
425 }
426
427 ecb_function_ int ecb_popcount32 (uint32_t x) ecb_const;
428 ecb_function_ int
429 ecb_popcount32 (uint32_t x)
430 {
431 x -= (x >> 1) & 0x55555555;
432 x = ((x >> 2) & 0x33333333) + (x & 0x33333333);
433 x = ((x >> 4) + x) & 0x0f0f0f0f;
434 x *= 0x01010101;
435
436 return x >> 24;
437 }
438
439 ecb_function_ int ecb_ld32 (uint32_t x) ecb_const;
440 ecb_function_ int ecb_ld32 (uint32_t x)
441 {
442 int r = 0;
443
444 if (x >> 16) { x >>= 16; r += 16; }
445 if (x >> 8) { x >>= 8; r += 8; }
446 if (x >> 4) { x >>= 4; r += 4; }
447 if (x >> 2) { x >>= 2; r += 2; }
448 if (x >> 1) { r += 1; }
449
450 return r;
451 }
452
453 ecb_function_ int ecb_ld64 (uint64_t x) ecb_const;
454 ecb_function_ int ecb_ld64 (uint64_t x)
455 {
456 int r = 0;
457
458 if (x >> 32) { x >>= 32; r += 32; }
459
460 return r + ecb_ld32 (x);
461 }
462 #endif
463
464 ecb_function_ ecb_bool ecb_is_pot32 (uint32_t x) ecb_const;
465 ecb_function_ ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); }
466 ecb_function_ ecb_bool ecb_is_pot64 (uint64_t x) ecb_const;
467 ecb_function_ ecb_bool ecb_is_pot64 (uint64_t x) { return !(x & (x - 1)); }
468
469 ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) ecb_const;
470 ecb_function_ uint8_t ecb_bitrev8 (uint8_t x)
471 {
472 return ( (x * 0x0802U & 0x22110U)
473 | (x * 0x8020U & 0x88440U)) * 0x10101U >> 16;
474 }
475
476 ecb_function_ uint16_t ecb_bitrev16 (uint16_t x) ecb_const;
477 ecb_function_ uint16_t ecb_bitrev16 (uint16_t x)
478 {
479 x = ((x >> 1) & 0x5555) | ((x & 0x5555) << 1);
480 x = ((x >> 2) & 0x3333) | ((x & 0x3333) << 2);
481 x = ((x >> 4) & 0x0f0f) | ((x & 0x0f0f) << 4);
482 x = ( x >> 8 ) | ( x << 8);
483
484 return x;
485 }
486
487 ecb_function_ uint32_t ecb_bitrev32 (uint32_t x) ecb_const;
488 ecb_function_ uint32_t ecb_bitrev32 (uint32_t x)
489 {
490 x = ((x >> 1) & 0x55555555) | ((x & 0x55555555) << 1);
491 x = ((x >> 2) & 0x33333333) | ((x & 0x33333333) << 2);
492 x = ((x >> 4) & 0x0f0f0f0f) | ((x & 0x0f0f0f0f) << 4);
493 x = ((x >> 8) & 0x00ff00ff) | ((x & 0x00ff00ff) << 8);
494 x = ( x >> 16 ) | ( x << 16);
495
496 return x;
497 }
498
499 /* popcount64 is only available on 64 bit cpus as gcc builtin */
500 /* so for this version we are lazy */
501 ecb_function_ int ecb_popcount64 (uint64_t x) ecb_const;
502 ecb_function_ int
503 ecb_popcount64 (uint64_t x)
504 {
505 return ecb_popcount32 (x) + ecb_popcount32 (x >> 32);
506 }
507
508 ecb_inline uint8_t ecb_rotl8 (uint8_t x, unsigned int count) ecb_const;
509 ecb_inline uint8_t ecb_rotr8 (uint8_t x, unsigned int count) ecb_const;
510 ecb_inline uint16_t ecb_rotl16 (uint16_t x, unsigned int count) ecb_const;
511 ecb_inline uint16_t ecb_rotr16 (uint16_t x, unsigned int count) ecb_const;
512 ecb_inline uint32_t ecb_rotl32 (uint32_t x, unsigned int count) ecb_const;
513 ecb_inline uint32_t ecb_rotr32 (uint32_t x, unsigned int count) ecb_const;
514 ecb_inline uint64_t ecb_rotl64 (uint64_t x, unsigned int count) ecb_const;
515 ecb_inline uint64_t ecb_rotr64 (uint64_t x, unsigned int count) ecb_const;
516
517 ecb_inline uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> ( 8 - count)) | (x << count); }
518 ecb_inline uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << ( 8 - count)) | (x >> count); }
519 ecb_inline uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (16 - count)) | (x << count); }
520 ecb_inline uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (16 - count)) | (x >> count); }
521 ecb_inline uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); }
522 ecb_inline uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); }
523 ecb_inline uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); }
524 ecb_inline uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); }
525
526 #if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64))
527 #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16)
528 #define ecb_bswap32(x) __builtin_bswap32 (x)
529 #define ecb_bswap64(x) __builtin_bswap64 (x)
530 #else
531 ecb_function_ uint16_t ecb_bswap16 (uint16_t x) ecb_const;
532 ecb_function_ uint16_t
533 ecb_bswap16 (uint16_t x)
534 {
535 return ecb_rotl16 (x, 8);
536 }
537
538 ecb_function_ uint32_t ecb_bswap32 (uint32_t x) ecb_const;
539 ecb_function_ uint32_t
540 ecb_bswap32 (uint32_t x)
541 {
542 return (((uint32_t)ecb_bswap16 (x)) << 16) | ecb_bswap16 (x >> 16);
543 }
544
545 ecb_function_ uint64_t ecb_bswap64 (uint64_t x) ecb_const;
546 ecb_function_ uint64_t
547 ecb_bswap64 (uint64_t x)
548 {
549 return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32);
550 }
551 #endif
552
553 #if ECB_GCC_VERSION(4,5) || ECB_CLANG_BUILTIN(__builtin_unreachable)
554 #define ecb_unreachable() __builtin_unreachable ()
555 #else
556 /* this seems to work fine, but gcc always emits a warning for it :/ */
557 ecb_inline void ecb_unreachable (void) ecb_noreturn;
558 ecb_inline void ecb_unreachable (void) { }
559 #endif
560
561 /* try to tell the compiler that some condition is definitely true */
562 #define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0
563
564 ecb_inline unsigned char ecb_byteorder_helper (void) ecb_const;
565 ecb_inline unsigned char
566 ecb_byteorder_helper (void)
567 {
568 /* the union code still generates code under pressure in gcc, */
569 /* but less than using pointers, and always seems to */
570 /* successfully return a constant. */
571 /* the reason why we have this horrible preprocessor mess */
572 /* is to avoid it in all cases, at least on common architectures */
573 /* or when using a recent enough gcc version (>= 4.6) */
574 #if __i386 || __i386__ || _M_X86 || __amd64 || __amd64__ || _M_X64
575 return 0x44;
576 #elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
577 return 0x44;
578 #elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
579 return 0x11;
580 #else
581 union
582 {
583 uint32_t i;
584 uint8_t c;
585 } u = { 0x11223344 };
586 return u.c;
587 #endif
588 }
589
590 ecb_inline ecb_bool ecb_big_endian (void) ecb_const;
591 ecb_inline ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; }
592 ecb_inline ecb_bool ecb_little_endian (void) ecb_const;
593 ecb_inline ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44; }
594
595 #if ECB_GCC_VERSION(3,0) || ECB_C99
596 #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0))
597 #else
598 #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n)))
599 #endif
600
601 #if __cplusplus
602 template<typename T>
603 static inline T ecb_div_rd (T val, T div)
604 {
605 return val < 0 ? - ((-val + div - 1) / div) : (val ) / div;
606 }
607 template<typename T>
608 static inline T ecb_div_ru (T val, T div)
609 {
610 return val < 0 ? - ((-val ) / div) : (val + div - 1) / div;
611 }
612 #else
613 #define ecb_div_rd(val,div) ((val) < 0 ? - ((-(val) + (div) - 1) / (div)) : ((val) ) / (div))
614 #define ecb_div_ru(val,div) ((val) < 0 ? - ((-(val) ) / (div)) : ((val) + (div) - 1) / (div))
615 #endif
616
617 #if ecb_cplusplus_does_not_suck
618 /* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */
619 template<typename T, int N>
620 static inline int ecb_array_length (const T (&arr)[N])
621 {
622 return N;
623 }
624 #else
625 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0]))
626 #endif
627
628 /*******************************************************************************/
629 /* floating point stuff, can be disabled by defining ECB_NO_LIBM */
630
631 /* basically, everything uses "ieee pure-endian" floating point numbers */
632 /* the only noteworthy exception is ancient armle, which uses order 43218765 */
633 #if 0 \
634 || __i386 || __i386__ \
635 || __amd64 || __amd64__ || __x86_64 || __x86_64__ \
636 || __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \
637 || defined __s390__ || defined __s390x__ \
638 || defined __mips__ \
639 || defined __alpha__ \
640 || defined __hppa__ \
641 || defined __ia64__ \
642 || defined __m68k__ \
643 || defined __m88k__ \
644 || defined __sh__ \
645 || defined _M_IX86 || defined _M_AMD64 || defined _M_IA64 \
646 || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \
647 || defined __aarch64__
648 #define ECB_STDFP 1
649 #include <string.h> /* for memcpy */
650 #else
651 #define ECB_STDFP 0
652 #endif
653
654 #ifndef ECB_NO_LIBM
655
656 #include <math.h> /* for frexp*, ldexp*, INFINITY, NAN */
657
658 /* only the oldest of old doesn't have this one. solaris. */
659 #ifdef INFINITY
660 #define ECB_INFINITY INFINITY
661 #else
662 #define ECB_INFINITY HUGE_VAL
663 #endif
664
665 #ifdef NAN
666 #define ECB_NAN NAN
667 #else
668 #define ECB_NAN ECB_INFINITY
669 #endif
670
671 /* converts an ieee half/binary16 to a float */
672 ecb_function_ float ecb_binary16_to_float (uint16_t x) ecb_const;
673 ecb_function_ float
674 ecb_binary16_to_float (uint16_t x)
675 {
676 int e = (x >> 10) & 0x1f;
677 int m = x & 0x3ff;
678 float r;
679
680 if (!e ) r = ldexpf (m , -24);
681 else if (e != 31) r = ldexpf (m + 0x400, e - 25);
682 else if (m ) r = ECB_NAN;
683 else r = ECB_INFINITY;
684
685 return x & 0x8000 ? -r : r;
686 }
687
688 /* convert a float to ieee single/binary32 */
689 ecb_function_ uint32_t ecb_float_to_binary32 (float x) ecb_const;
690 ecb_function_ uint32_t
691 ecb_float_to_binary32 (float x)
692 {
693 uint32_t r;
694
695 #if ECB_STDFP
696 memcpy (&r, &x, 4);
697 #else
698 /* slow emulation, works for anything but -0 */
699 uint32_t m;
700 int e;
701
702 if (x == 0e0f ) return 0x00000000U;
703 if (x > +3.40282346638528860e+38f) return 0x7f800000U;
704 if (x < -3.40282346638528860e+38f) return 0xff800000U;
705 if (x != x ) return 0x7fbfffffU;
706
707 m = frexpf (x, &e) * 0x1000000U;
708
709 r = m & 0x80000000U;
710
711 if (r)
712 m = -m;
713
714 if (e <= -126)
715 {
716 m &= 0xffffffU;
717 m >>= (-125 - e);
718 e = -126;
719 }
720
721 r |= (e + 126) << 23;
722 r |= m & 0x7fffffU;
723 #endif
724
725 return r;
726 }
727
728 /* converts an ieee single/binary32 to a float */
729 ecb_function_ float ecb_binary32_to_float (uint32_t x) ecb_const;
730 ecb_function_ float
731 ecb_binary32_to_float (uint32_t x)
732 {
733 float r;
734
735 #if ECB_STDFP
736 memcpy (&r, &x, 4);
737 #else
738 /* emulation, only works for normals and subnormals and +0 */
739 int neg = x >> 31;
740 int e = (x >> 23) & 0xffU;
741
742 x &= 0x7fffffU;
743
744 if (e)
745 x |= 0x800000U;
746 else
747 e = 1;
748
749 /* we distrust ldexpf a bit and do the 2**-24 scaling by an extra multiply */
750 r = ldexpf (x * (0.5f / 0x800000U), e - 126);
751
752 r = neg ? -r : r;
753 #endif
754
755 return r;
756 }
757
758 /* convert a double to ieee double/binary64 */
759 ecb_function_ uint64_t ecb_double_to_binary64 (double x) ecb_const;
760 ecb_function_ uint64_t
761 ecb_double_to_binary64 (double x)
762 {
763 uint64_t r;
764
765 #if ECB_STDFP
766 memcpy (&r, &x, 8);
767 #else
768 /* slow emulation, works for anything but -0 */
769 uint64_t m;
770 int e;
771
772 if (x == 0e0 ) return 0x0000000000000000U;
773 if (x > +1.79769313486231470e+308) return 0x7ff0000000000000U;
774 if (x < -1.79769313486231470e+308) return 0xfff0000000000000U;
775 if (x != x ) return 0X7ff7ffffffffffffU;
776
777 m = frexp (x, &e) * 0x20000000000000U;
778
779 r = m & 0x8000000000000000;;
780
781 if (r)
782 m = -m;
783
784 if (e <= -1022)
785 {
786 m &= 0x1fffffffffffffU;
787 m >>= (-1021 - e);
788 e = -1022;
789 }
790
791 r |= ((uint64_t)(e + 1022)) << 52;
792 r |= m & 0xfffffffffffffU;
793 #endif
794
795 return r;
796 }
797
798 /* converts an ieee double/binary64 to a double */
799 ecb_function_ double ecb_binary64_to_double (uint64_t x) ecb_const;
800 ecb_function_ double
801 ecb_binary64_to_double (uint64_t x)
802 {
803 double r;
804
805 #if ECB_STDFP
806 memcpy (&r, &x, 8);
807 #else
808 /* emulation, only works for normals and subnormals and +0 */
809 int neg = x >> 63;
810 int e = (x >> 52) & 0x7ffU;
811
812 x &= 0xfffffffffffffU;
813
814 if (e)
815 x |= 0x10000000000000U;
816 else
817 e = 1;
818
819 /* we distrust ldexp a bit and do the 2**-53 scaling by an extra multiply */
820 r = ldexp (x * (0.5 / 0x10000000000000U), e - 1022);
821
822 r = neg ? -r : r;
823 #endif
824
825 return r;
826 }
827
828 #endif
829
830 #endif
831