ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libecb/ecb.h
Revision: 1.135
Committed: Tue Jun 10 01:07:18 2014 UTC (10 years ago) by root
Content type: text/plain
Branch: MAIN
Changes since 1.134: +6 -1 lines
Log Message:
*** empty log message ***

File Contents

# Content
1 /*
2 * libecb - http://software.schmorp.de/pkg/libecb
3 *
4 * Copyright (©) 2009-2014 Marc Alexander Lehmann <libecb@schmorp.de>
5 * Copyright (©) 2011 Emanuele Giaquinta
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without modifica-
9 * tion, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 *
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
20 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
21 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
22 * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
26 * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
27 * OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * Alternatively, the contents of this file may be used under the terms of
30 * the GNU General Public License ("GPL") version 2 or any later version,
31 * in which case the provisions of the GPL are applicable instead of
32 * the above. If you wish to allow the use of your version of this file
33 * only under the terms of the GPL and not to allow others to use your
34 * version of this file under the BSD license, indicate your decision
35 * by deleting the provisions above and replace them with the notice
36 * and other provisions required by the GPL. If you do not delete the
37 * provisions above, a recipient may use your version of this file under
38 * either the BSD or the GPL.
39 */
40
41 #ifndef ECB_H
42 #define ECB_H
43
44 /* 16 bits major, 16 bits minor */
45 #define ECB_VERSION 0x00010003
46
47 #ifdef _WIN32
48 typedef signed char int8_t;
49 typedef unsigned char uint8_t;
50 typedef signed short int16_t;
51 typedef unsigned short uint16_t;
52 typedef signed int int32_t;
53 typedef unsigned int uint32_t;
54 #if __GNUC__
55 typedef signed long long int64_t;
56 typedef unsigned long long uint64_t;
57 #else /* _MSC_VER || __BORLANDC__ */
58 typedef signed __int64 int64_t;
59 typedef unsigned __int64 uint64_t;
60 #endif
61 #ifdef _WIN64
62 #define ECB_PTRSIZE 8
63 typedef uint64_t uintptr_t;
64 typedef int64_t intptr_t;
65 #else
66 #define ECB_PTRSIZE 4
67 typedef uint32_t uintptr_t;
68 typedef int32_t intptr_t;
69 #endif
70 #else
71 #include <inttypes.h>
72 #if UINTMAX_MAX > 0xffffffffU
73 #define ECB_PTRSIZE 8
74 #else
75 #define ECB_PTRSIZE 4
76 #endif
77 #endif
78
79 /* work around x32 idiocy by defining proper macros */
80 #if __amd64 || __x86_64 || _M_AMD64 || _M_X64
81 #if _ILP32
82 #define ECB_AMD64_X32 1
83 #else
84 #define ECB_AMD64 1
85 #endif
86 #endif
87
88 /* many compilers define _GNUC_ to some versions but then only implement
89 * what their idiot authors think are the "more important" extensions,
90 * causing enormous grief in return for some better fake benchmark numbers.
91 * or so.
92 * we try to detect these and simply assume they are not gcc - if they have
93 * an issue with that they should have done it right in the first place.
94 */
95 #ifndef ECB_GCC_VERSION
96 #if !defined __GNUC_MINOR__ || defined __INTEL_COMPILER || defined __SUNPRO_C || defined __SUNPRO_CC || defined __llvm__ || defined __clang__
97 #define ECB_GCC_VERSION(major,minor) 0
98 #else
99 #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))
100 #endif
101 #endif
102
103 #define ECB_CPP (__cplusplus+0)
104 #define ECB_CPP11 (__cplusplus >= 201103L)
105
106 #if ECB_CPP
107 #define ECB_C 0
108 #define ECB_STDC_VERSION 0
109 #else
110 #define ECB_C 1
111 #define ECB_STDC_VERSION __STDC_VERSION__
112 #endif
113
114 #define ECB_C99 (ECB_STDC_VERSION >= 199901L)
115 #define ECB_C11 (ECB_STDC_VERSION >= 201112L)
116
117 #if ECB_CPP
118 #define ECB_EXTERN_C extern "C"
119 #define ECB_EXTERN_C_BEG ECB_EXTERN_C {
120 #define ECB_EXTERN_C_END }
121 #else
122 #define ECB_EXTERN_C extern
123 #define ECB_EXTERN_C_BEG
124 #define ECB_EXTERN_C_END
125 #endif
126
127 /*****************************************************************************/
128
129 /* ECB_NO_THREADS - ecb is not used by multiple threads, ever */
130 /* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */
131
132 #if ECB_NO_THREADS
133 #define ECB_NO_SMP 1
134 #endif
135
136 #if ECB_NO_SMP
137 #define ECB_MEMORY_FENCE do { } while (0)
138 #endif
139
140 #ifndef ECB_MEMORY_FENCE
141 #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
142 #if __i386 || __i386__
143 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
144 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
145 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
146 #elif __amd64 || __amd64__ || __x86_64 || __x86_64__
147 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory")
148 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
149 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
150 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__
151 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory")
152 #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \
153 || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__
154 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory")
155 #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \
156 || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__
157 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory")
158 #elif __aarch64__
159 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory")
160 #elif (__sparc || __sparc__) && !__sparcv8
161 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory")
162 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory")
163 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore")
164 #elif defined __s390__ || defined __s390x__
165 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory")
166 #elif defined __mips__
167 /* GNU/Linux emulates sync on mips1 architectures, so we force its use */
168 /* anybody else who still uses mips1 is supposed to send in their version, with detection code. */
169 #define ECB_MEMORY_FENCE __asm__ __volatile__ (".set mips2; sync; .set mips0" : : : "memory")
170 #elif defined __alpha__
171 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory")
172 #elif defined __hppa__
173 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
174 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
175 #elif defined __ia64__
176 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory")
177 #elif defined __m68k__
178 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
179 #elif defined __m88k__
180 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("tb1 0,%%r0,128" : : : "memory")
181 #elif defined __sh__
182 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
183 #endif
184 #endif
185 #endif
186
187 #ifndef ECB_MEMORY_FENCE
188 #if ECB_GCC_VERSION(4,7)
189 /* see comment below (stdatomic.h) about the C11 memory model. */
190 #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST)
191 #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE)
192 #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE)
193
194 /* The __has_feature syntax from clang is so misdesigned that we cannot use it
195 * without risking compile time errors with other compilers. We *could*
196 * define our own ecb_clang_has_feature, but I just can't be bothered to work
197 * around this shit time and again.
198 * #elif defined __clang && __has_feature (cxx_atomic)
199 * // see comment below (stdatomic.h) about the C11 memory model.
200 * #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST)
201 * #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE)
202 * #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE)
203 */
204
205 #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__
206 #define ECB_MEMORY_FENCE __sync_synchronize ()
207 #elif _MSC_VER >= 1500 /* VC++ 2008 */
208 /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */
209 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
210 #define ECB_MEMORY_FENCE _ReadWriteBarrier (); MemoryBarrier()
211 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier (); MemoryBarrier() /* according to msdn, _ReadBarrier is not a load fence */
212 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier (); MemoryBarrier()
213 #elif _MSC_VER >= 1400 /* VC++ 2005 */
214 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
215 #define ECB_MEMORY_FENCE _ReadWriteBarrier ()
216 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */
217 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier ()
218 #elif defined _WIN32
219 #include <WinNT.h>
220 #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */
221 #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
222 #include <mbarrier.h>
223 #define ECB_MEMORY_FENCE __machine_rw_barrier ()
224 #define ECB_MEMORY_FENCE_ACQUIRE __machine_r_barrier ()
225 #define ECB_MEMORY_FENCE_RELEASE __machine_w_barrier ()
226 #elif __xlC__
227 #define ECB_MEMORY_FENCE __sync ()
228 #endif
229 #endif
230
231 #ifndef ECB_MEMORY_FENCE
232 #if ECB_C11 && !defined __STDC_NO_ATOMICS__
233 /* we assume that these memory fences work on all variables/all memory accesses, */
234 /* not just C11 atomics and atomic accesses */
235 #include <stdatomic.h>
236 /* Unfortunately, neither gcc 4.7 nor clang 3.1 generate any instructions for */
237 /* any fence other than seq_cst, which isn't very efficient for us. */
238 /* Why that is, we don't know - either the C11 memory model is quite useless */
239 /* for most usages, or gcc and clang have a bug */
240 /* I *currently* lean towards the latter, and inefficiently implement */
241 /* all three of ecb's fences as a seq_cst fence */
242 /* Update, gcc-4.8 generates mfence for all c++ fences, but nothing */
243 /* for all __atomic_thread_fence's except seq_cst */
244 #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst)
245 #endif
246 #endif
247
248 #ifndef ECB_MEMORY_FENCE
249 #if !ECB_AVOID_PTHREADS
250 /*
251 * if you get undefined symbol references to pthread_mutex_lock,
252 * or failure to find pthread.h, then you should implement
253 * the ECB_MEMORY_FENCE operations for your cpu/compiler
254 * OR provide pthread.h and link against the posix thread library
255 * of your system.
256 */
257 #include <pthread.h>
258 #define ECB_NEEDS_PTHREADS 1
259 #define ECB_MEMORY_FENCE_NEEDS_PTHREADS 1
260
261 static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER;
262 #define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0)
263 #endif
264 #endif
265
266 #if !defined ECB_MEMORY_FENCE_ACQUIRE && defined ECB_MEMORY_FENCE
267 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
268 #endif
269
270 #if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE
271 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
272 #endif
273
274 /*****************************************************************************/
275
276 #if __cplusplus
277 #define ecb_inline static inline
278 #elif ECB_GCC_VERSION(2,5)
279 #define ecb_inline static __inline__
280 #elif ECB_C99
281 #define ecb_inline static inline
282 #else
283 #define ecb_inline static
284 #endif
285
286 #if ECB_GCC_VERSION(3,3)
287 #define ecb_restrict __restrict__
288 #elif ECB_C99
289 #define ecb_restrict restrict
290 #else
291 #define ecb_restrict
292 #endif
293
294 typedef int ecb_bool;
295
296 #define ECB_CONCAT_(a, b) a ## b
297 #define ECB_CONCAT(a, b) ECB_CONCAT_(a, b)
298 #define ECB_STRINGIFY_(a) # a
299 #define ECB_STRINGIFY(a) ECB_STRINGIFY_(a)
300
301 #define ecb_function_ ecb_inline
302
303 #if ECB_GCC_VERSION(3,1)
304 #define ecb_attribute(attrlist) __attribute__(attrlist)
305 #define ecb_is_constant(expr) __builtin_constant_p (expr)
306 #define ecb_expect(expr,value) __builtin_expect ((expr),(value))
307 #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality)
308 #else
309 #define ecb_attribute(attrlist)
310
311 /* possible C11 impl for integral types
312 typedef struct ecb_is_constant_struct ecb_is_constant_struct;
313 #define ecb_is_constant(expr) _Generic ((1 ? (struct ecb_is_constant_struct *)0 : (void *)((expr) - (expr)), ecb_is_constant_struct *: 0, default: 1)) */
314
315 #define ecb_is_constant(expr) 0
316 #define ecb_expect(expr,value) (expr)
317 #define ecb_prefetch(addr,rw,locality)
318 #endif
319
320 /* no emulation for ecb_decltype */
321 #if ECB_GCC_VERSION(4,5)
322 #define ecb_decltype(x) __decltype(x)
323 #elif ECB_GCC_VERSION(3,0)
324 #define ecb_decltype(x) __typeof(x)
325 #endif
326
327 #if _MSC_VER >= 1300
328 #define ecb_deprecated __declspec(deprecated)
329 #else
330 #define ecb_deprecated ecb_attribute ((__deprecated__))
331 #endif
332
333 #define ecb_noinline ecb_attribute ((__noinline__))
334 #define ecb_unused ecb_attribute ((__unused__))
335 #define ecb_const ecb_attribute ((__const__))
336 #define ecb_pure ecb_attribute ((__pure__))
337
338 #if ECB_C11
339 #define ecb_noreturn _Noreturn
340 #else
341 #define ecb_noreturn ecb_attribute ((__noreturn__))
342 #endif
343
344 #if ECB_GCC_VERSION(4,3)
345 #define ecb_artificial ecb_attribute ((__artificial__))
346 #define ecb_hot ecb_attribute ((__hot__))
347 #define ecb_cold ecb_attribute ((__cold__))
348 #else
349 #define ecb_artificial
350 #define ecb_hot
351 #define ecb_cold
352 #endif
353
354 /* put around conditional expressions if you are very sure that the */
355 /* expression is mostly true or mostly false. note that these return */
356 /* booleans, not the expression. */
357 #define ecb_expect_false(expr) ecb_expect (!!(expr), 0)
358 #define ecb_expect_true(expr) ecb_expect (!!(expr), 1)
359 /* for compatibility to the rest of the world */
360 #define ecb_likely(expr) ecb_expect_true (expr)
361 #define ecb_unlikely(expr) ecb_expect_false (expr)
362
363 /* count trailing zero bits and count # of one bits */
364 #if ECB_GCC_VERSION(3,4)
365 /* we assume int == 32 bit, long == 32 or 64 bit and long long == 64 bit */
366 #define ecb_ld32(x) (__builtin_clz (x) ^ 31)
367 #define ecb_ld64(x) (__builtin_clzll (x) ^ 63)
368 #define ecb_ctz32(x) __builtin_ctz (x)
369 #define ecb_ctz64(x) __builtin_ctzll (x)
370 #define ecb_popcount32(x) __builtin_popcount (x)
371 /* no popcountll */
372 #else
373 ecb_function_ int ecb_ctz32 (uint32_t x) ecb_const;
374 ecb_function_ int
375 ecb_ctz32 (uint32_t x)
376 {
377 int r = 0;
378
379 x &= ~x + 1; /* this isolates the lowest bit */
380
381 #if ECB_branchless_on_i386
382 r += !!(x & 0xaaaaaaaa) << 0;
383 r += !!(x & 0xcccccccc) << 1;
384 r += !!(x & 0xf0f0f0f0) << 2;
385 r += !!(x & 0xff00ff00) << 3;
386 r += !!(x & 0xffff0000) << 4;
387 #else
388 if (x & 0xaaaaaaaa) r += 1;
389 if (x & 0xcccccccc) r += 2;
390 if (x & 0xf0f0f0f0) r += 4;
391 if (x & 0xff00ff00) r += 8;
392 if (x & 0xffff0000) r += 16;
393 #endif
394
395 return r;
396 }
397
398 ecb_function_ int ecb_ctz64 (uint64_t x) ecb_const;
399 ecb_function_ int
400 ecb_ctz64 (uint64_t x)
401 {
402 int shift = x & 0xffffffffU ? 0 : 32;
403 return ecb_ctz32 (x >> shift) + shift;
404 }
405
406 ecb_function_ int ecb_popcount32 (uint32_t x) ecb_const;
407 ecb_function_ int
408 ecb_popcount32 (uint32_t x)
409 {
410 x -= (x >> 1) & 0x55555555;
411 x = ((x >> 2) & 0x33333333) + (x & 0x33333333);
412 x = ((x >> 4) + x) & 0x0f0f0f0f;
413 x *= 0x01010101;
414
415 return x >> 24;
416 }
417
418 ecb_function_ int ecb_ld32 (uint32_t x) ecb_const;
419 ecb_function_ int ecb_ld32 (uint32_t x)
420 {
421 int r = 0;
422
423 if (x >> 16) { x >>= 16; r += 16; }
424 if (x >> 8) { x >>= 8; r += 8; }
425 if (x >> 4) { x >>= 4; r += 4; }
426 if (x >> 2) { x >>= 2; r += 2; }
427 if (x >> 1) { r += 1; }
428
429 return r;
430 }
431
432 ecb_function_ int ecb_ld64 (uint64_t x) ecb_const;
433 ecb_function_ int ecb_ld64 (uint64_t x)
434 {
435 int r = 0;
436
437 if (x >> 32) { x >>= 32; r += 32; }
438
439 return r + ecb_ld32 (x);
440 }
441 #endif
442
443 ecb_function_ ecb_bool ecb_is_pot32 (uint32_t x) ecb_const;
444 ecb_function_ ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); }
445 ecb_function_ ecb_bool ecb_is_pot64 (uint64_t x) ecb_const;
446 ecb_function_ ecb_bool ecb_is_pot64 (uint64_t x) { return !(x & (x - 1)); }
447
448 ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) ecb_const;
449 ecb_function_ uint8_t ecb_bitrev8 (uint8_t x)
450 {
451 return ( (x * 0x0802U & 0x22110U)
452 | (x * 0x8020U & 0x88440U)) * 0x10101U >> 16;
453 }
454
455 ecb_function_ uint16_t ecb_bitrev16 (uint16_t x) ecb_const;
456 ecb_function_ uint16_t ecb_bitrev16 (uint16_t x)
457 {
458 x = ((x >> 1) & 0x5555) | ((x & 0x5555) << 1);
459 x = ((x >> 2) & 0x3333) | ((x & 0x3333) << 2);
460 x = ((x >> 4) & 0x0f0f) | ((x & 0x0f0f) << 4);
461 x = ( x >> 8 ) | ( x << 8);
462
463 return x;
464 }
465
466 ecb_function_ uint32_t ecb_bitrev32 (uint32_t x) ecb_const;
467 ecb_function_ uint32_t ecb_bitrev32 (uint32_t x)
468 {
469 x = ((x >> 1) & 0x55555555) | ((x & 0x55555555) << 1);
470 x = ((x >> 2) & 0x33333333) | ((x & 0x33333333) << 2);
471 x = ((x >> 4) & 0x0f0f0f0f) | ((x & 0x0f0f0f0f) << 4);
472 x = ((x >> 8) & 0x00ff00ff) | ((x & 0x00ff00ff) << 8);
473 x = ( x >> 16 ) | ( x << 16);
474
475 return x;
476 }
477
478 /* popcount64 is only available on 64 bit cpus as gcc builtin */
479 /* so for this version we are lazy */
480 ecb_function_ int ecb_popcount64 (uint64_t x) ecb_const;
481 ecb_function_ int
482 ecb_popcount64 (uint64_t x)
483 {
484 return ecb_popcount32 (x) + ecb_popcount32 (x >> 32);
485 }
486
487 ecb_inline uint8_t ecb_rotl8 (uint8_t x, unsigned int count) ecb_const;
488 ecb_inline uint8_t ecb_rotr8 (uint8_t x, unsigned int count) ecb_const;
489 ecb_inline uint16_t ecb_rotl16 (uint16_t x, unsigned int count) ecb_const;
490 ecb_inline uint16_t ecb_rotr16 (uint16_t x, unsigned int count) ecb_const;
491 ecb_inline uint32_t ecb_rotl32 (uint32_t x, unsigned int count) ecb_const;
492 ecb_inline uint32_t ecb_rotr32 (uint32_t x, unsigned int count) ecb_const;
493 ecb_inline uint64_t ecb_rotl64 (uint64_t x, unsigned int count) ecb_const;
494 ecb_inline uint64_t ecb_rotr64 (uint64_t x, unsigned int count) ecb_const;
495
496 ecb_inline uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> ( 8 - count)) | (x << count); }
497 ecb_inline uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << ( 8 - count)) | (x >> count); }
498 ecb_inline uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (16 - count)) | (x << count); }
499 ecb_inline uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (16 - count)) | (x >> count); }
500 ecb_inline uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); }
501 ecb_inline uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); }
502 ecb_inline uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); }
503 ecb_inline uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); }
504
505 #if ECB_GCC_VERSION(4,3)
506 #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16)
507 #define ecb_bswap32(x) __builtin_bswap32 (x)
508 #define ecb_bswap64(x) __builtin_bswap64 (x)
509 #else
510 ecb_function_ uint16_t ecb_bswap16 (uint16_t x) ecb_const;
511 ecb_function_ uint16_t
512 ecb_bswap16 (uint16_t x)
513 {
514 return ecb_rotl16 (x, 8);
515 }
516
517 ecb_function_ uint32_t ecb_bswap32 (uint32_t x) ecb_const;
518 ecb_function_ uint32_t
519 ecb_bswap32 (uint32_t x)
520 {
521 return (((uint32_t)ecb_bswap16 (x)) << 16) | ecb_bswap16 (x >> 16);
522 }
523
524 ecb_function_ uint64_t ecb_bswap64 (uint64_t x) ecb_const;
525 ecb_function_ uint64_t
526 ecb_bswap64 (uint64_t x)
527 {
528 return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32);
529 }
530 #endif
531
532 #if ECB_GCC_VERSION(4,5)
533 #define ecb_unreachable() __builtin_unreachable ()
534 #else
535 /* this seems to work fine, but gcc always emits a warning for it :/ */
536 ecb_inline void ecb_unreachable (void) ecb_noreturn;
537 ecb_inline void ecb_unreachable (void) { }
538 #endif
539
540 /* try to tell the compiler that some condition is definitely true */
541 #define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0
542
543 ecb_inline unsigned char ecb_byteorder_helper (void) ecb_const;
544 ecb_inline unsigned char
545 ecb_byteorder_helper (void)
546 {
547 /* the union code still generates code under pressure in gcc, */
548 /* but less than using pointers, and always seems to */
549 /* successfully return a constant. */
550 /* the reason why we have this horrible preprocessor mess */
551 /* is to avoid it in all cases, at least on common architectures */
552 /* or when using a recent enough gcc version (>= 4.6) */
553 #if __i386 || __i386__ || _M_X86 || __amd64 || __amd64__ || _M_X64
554 return 0x44;
555 #elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
556 return 0x44;
557 #elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
558 return 0x11;
559 #else
560 union
561 {
562 uint32_t i;
563 uint8_t c;
564 } u = { 0x11223344 };
565 return u.c;
566 #endif
567 }
568
569 ecb_inline ecb_bool ecb_big_endian (void) ecb_const;
570 ecb_inline ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; }
571 ecb_inline ecb_bool ecb_little_endian (void) ecb_const;
572 ecb_inline ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44; }
573
574 #if ECB_GCC_VERSION(3,0) || ECB_C99
575 #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0))
576 #else
577 #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n)))
578 #endif
579
580 #if __cplusplus
581 template<typename T>
582 static inline T ecb_div_rd (T val, T div)
583 {
584 return val < 0 ? - ((-val + div - 1) / div) : (val ) / div;
585 }
586 template<typename T>
587 static inline T ecb_div_ru (T val, T div)
588 {
589 return val < 0 ? - ((-val ) / div) : (val + div - 1) / div;
590 }
591 #else
592 #define ecb_div_rd(val,div) ((val) < 0 ? - ((-(val) + (div) - 1) / (div)) : ((val) ) / (div))
593 #define ecb_div_ru(val,div) ((val) < 0 ? - ((-(val) ) / (div)) : ((val) + (div) - 1) / (div))
594 #endif
595
596 #if ecb_cplusplus_does_not_suck
597 /* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */
598 template<typename T, int N>
599 static inline int ecb_array_length (const T (&arr)[N])
600 {
601 return N;
602 }
603 #else
604 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0]))
605 #endif
606
607 /*******************************************************************************/
608 /* floating point stuff, can be disabled by defining ECB_NO_LIBM */
609
610 /* basically, everything uses "ieee pure-endian" floating point numbers */
611 /* the only noteworthy exception is ancient armle, which uses order 43218765 */
612 #if 0 \
613 || __i386 || __i386__ \
614 || __amd64 || __amd64__ || __x86_64 || __x86_64__ \
615 || __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \
616 || defined __s390__ || defined __s390x__ \
617 || defined __mips__ \
618 || defined __alpha__ \
619 || defined __hppa__ \
620 || defined __ia64__ \
621 || defined __m68k__ \
622 || defined __m88k__ \
623 || defined __sh__ \
624 || defined _M_IX86 || defined _M_AMD64 || defined _M_IA64 \
625 || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \
626 || defined __aarch64__
627 #define ECB_STDFP 1
628 #include <string.h> /* for memcpy */
629 #else
630 #define ECB_STDFP 0
631 #endif
632
633 #ifndef ECB_NO_LIBM
634
635 #include <math.h> /* for frexp*, ldexp*, INFINITY, NAN */
636
637 /* only the oldest of old doesn't have this one. solaris. */
638 #ifdef INFINITY
639 #define ECB_INFINITY INFINITY
640 #else
641 #define ECB_INFINITY HUGE_VAL
642 #endif
643
644 #ifdef NAN
645 #define ECB_NAN NAN
646 #else
647 #define ECB_NAN ECB_INFINITY
648 #endif
649
650 /* converts an ieee half/binary16 to a float */
651 ecb_function_ float ecb_binary16_to_float (uint16_t x) ecb_const;
652 ecb_function_ float
653 ecb_binary16_to_float (uint16_t x)
654 {
655 int e = (x >> 10) & 0x1f;
656 int m = x & 0x3ff;
657 float r;
658
659 if (!e ) r = ldexpf (m , -24);
660 else if (e != 31) r = ldexpf (m + 0x400, e - 25);
661 else if (m ) r = ECB_NAN;
662 else r = ECB_INFINITY;
663
664 return x & 0x8000 ? -r : r;
665 }
666
667 /* convert a float to ieee single/binary32 */
668 ecb_function_ uint32_t ecb_float_to_binary32 (float x) ecb_const;
669 ecb_function_ uint32_t
670 ecb_float_to_binary32 (float x)
671 {
672 uint32_t r;
673
674 #if ECB_STDFP
675 memcpy (&r, &x, 4);
676 #else
677 /* slow emulation, works for anything but -0 */
678 uint32_t m;
679 int e;
680
681 if (x == 0e0f ) return 0x00000000U;
682 if (x > +3.40282346638528860e+38f) return 0x7f800000U;
683 if (x < -3.40282346638528860e+38f) return 0xff800000U;
684 if (x != x ) return 0x7fbfffffU;
685
686 m = frexpf (x, &e) * 0x1000000U;
687
688 r = m & 0x80000000U;
689
690 if (r)
691 m = -m;
692
693 if (e <= -126)
694 {
695 m &= 0xffffffU;
696 m >>= (-125 - e);
697 e = -126;
698 }
699
700 r |= (e + 126) << 23;
701 r |= m & 0x7fffffU;
702 #endif
703
704 return r;
705 }
706
707 /* converts an ieee single/binary32 to a float */
708 ecb_function_ float ecb_binary32_to_float (uint32_t x) ecb_const;
709 ecb_function_ float
710 ecb_binary32_to_float (uint32_t x)
711 {
712 float r;
713
714 #if ECB_STDFP
715 memcpy (&r, &x, 4);
716 #else
717 /* emulation, only works for normals and subnormals and +0 */
718 int neg = x >> 31;
719 int e = (x >> 23) & 0xffU;
720
721 x &= 0x7fffffU;
722
723 if (e)
724 x |= 0x800000U;
725 else
726 e = 1;
727
728 /* we distrust ldexpf a bit and do the 2**-24 scaling by an extra multiply */
729 r = ldexpf (x * (0.5f / 0x800000U), e - 126);
730
731 r = neg ? -r : r;
732 #endif
733
734 return r;
735 }
736
737 /* convert a double to ieee double/binary64 */
738 ecb_function_ uint64_t ecb_double_to_binary64 (double x) ecb_const;
739 ecb_function_ uint64_t
740 ecb_double_to_binary64 (double x)
741 {
742 uint64_t r;
743
744 #if ECB_STDFP
745 memcpy (&r, &x, 8);
746 #else
747 /* slow emulation, works for anything but -0 */
748 uint64_t m;
749 int e;
750
751 if (x == 0e0 ) return 0x0000000000000000U;
752 if (x > +1.79769313486231470e+308) return 0x7ff0000000000000U;
753 if (x < -1.79769313486231470e+308) return 0xfff0000000000000U;
754 if (x != x ) return 0X7ff7ffffffffffffU;
755
756 m = frexp (x, &e) * 0x20000000000000U;
757
758 r = m & 0x8000000000000000;;
759
760 if (r)
761 m = -m;
762
763 if (e <= -1022)
764 {
765 m &= 0x1fffffffffffffU;
766 m >>= (-1021 - e);
767 e = -1022;
768 }
769
770 r |= ((uint64_t)(e + 1022)) << 52;
771 r |= m & 0xfffffffffffffU;
772 #endif
773
774 return r;
775 }
776
777 /* converts an ieee double/binary64 to a double */
778 ecb_function_ double ecb_binary64_to_double (uint64_t x) ecb_const;
779 ecb_function_ double
780 ecb_binary64_to_double (uint64_t x)
781 {
782 double r;
783
784 #if ECB_STDFP
785 memcpy (&r, &x, 8);
786 #else
787 /* emulation, only works for normals and subnormals and +0 */
788 int neg = x >> 63;
789 int e = (x >> 52) & 0x7ffU;
790
791 x &= 0xfffffffffffffU;
792
793 if (e)
794 x |= 0x10000000000000U;
795 else
796 e = 1;
797
798 /* we distrust ldexp a bit and do the 2**-53 scaling by an extra multiply */
799 r = ldexp (x * (0.5 / 0x10000000000000U), e - 1022);
800
801 r = neg ? -r : r;
802 #endif
803
804 return r;
805 }
806
807 #endif
808
809 #endif
810