1 |
/* |
2 |
* libecb - http://software.schmorp.de/pkg/libecb |
3 |
* |
4 |
* Copyright (©) 2009-2014 Marc Alexander Lehmann <libecb@schmorp.de> |
5 |
* Copyright (©) 2011 Emanuele Giaquinta |
6 |
* All rights reserved. |
7 |
* |
8 |
* Redistribution and use in source and binary forms, with or without modifica- |
9 |
* tion, are permitted provided that the following conditions are met: |
10 |
* |
11 |
* 1. Redistributions of source code must retain the above copyright notice, |
12 |
* this list of conditions and the following disclaimer. |
13 |
* |
14 |
* 2. Redistributions in binary form must reproduce the above copyright |
15 |
* notice, this list of conditions and the following disclaimer in the |
16 |
* documentation and/or other materials provided with the distribution. |
17 |
* |
18 |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
19 |
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- |
20 |
* CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO |
21 |
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- |
22 |
* CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
23 |
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; |
24 |
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
25 |
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- |
26 |
* ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED |
27 |
* OF THE POSSIBILITY OF SUCH DAMAGE. |
28 |
* |
29 |
* Alternatively, the contents of this file may be used under the terms of |
30 |
* the GNU General Public License ("GPL") version 2 or any later version, |
31 |
* in which case the provisions of the GPL are applicable instead of |
32 |
* the above. If you wish to allow the use of your version of this file |
33 |
* only under the terms of the GPL and not to allow others to use your |
34 |
* version of this file under the BSD license, indicate your decision |
35 |
* by deleting the provisions above and replace them with the notice |
36 |
* and other provisions required by the GPL. If you do not delete the |
37 |
* provisions above, a recipient may use your version of this file under |
38 |
* either the BSD or the GPL. |
39 |
*/ |
40 |
|
41 |
#ifndef ECB_H |
42 |
#define ECB_H |
43 |
|
44 |
/* 16 bits major, 16 bits minor */ |
45 |
#define ECB_VERSION 0x00010003 |
46 |
|
47 |
#ifdef _WIN32 |
48 |
typedef signed char int8_t; |
49 |
typedef unsigned char uint8_t; |
50 |
typedef signed short int16_t; |
51 |
typedef unsigned short uint16_t; |
52 |
typedef signed int int32_t; |
53 |
typedef unsigned int uint32_t; |
54 |
#if __GNUC__ |
55 |
typedef signed long long int64_t; |
56 |
typedef unsigned long long uint64_t; |
57 |
#else /* _MSC_VER || __BORLANDC__ */ |
58 |
typedef signed __int64 int64_t; |
59 |
typedef unsigned __int64 uint64_t; |
60 |
#endif |
61 |
#ifdef _WIN64 |
62 |
#define ECB_PTRSIZE 8 |
63 |
typedef uint64_t uintptr_t; |
64 |
typedef int64_t intptr_t; |
65 |
#else |
66 |
#define ECB_PTRSIZE 4 |
67 |
typedef uint32_t uintptr_t; |
68 |
typedef int32_t intptr_t; |
69 |
#endif |
70 |
#else |
71 |
#include <inttypes.h> |
72 |
#if UINTMAX_MAX > 0xffffffffU |
73 |
#define ECB_PTRSIZE 8 |
74 |
#else |
75 |
#define ECB_PTRSIZE 4 |
76 |
#endif |
77 |
#endif |
78 |
|
79 |
/* work around x32 idiocy by defining proper macros */ |
80 |
#if __amd64 || __x86_64 || _M_AMD64 || _M_X64 |
81 |
#if _ILP32 |
82 |
#define ECB_AMD64_X32 1 |
83 |
#else |
84 |
#define ECB_AMD64 1 |
85 |
#endif |
86 |
#endif |
87 |
|
88 |
/* many compilers define _GNUC_ to some versions but then only implement |
89 |
* what their idiot authors think are the "more important" extensions, |
90 |
* causing enormous grief in return for some better fake benchmark numbers. |
91 |
* or so. |
92 |
* we try to detect these and simply assume they are not gcc - if they have |
93 |
* an issue with that they should have done it right in the first place. |
94 |
*/ |
95 |
#ifndef ECB_GCC_VERSION |
96 |
#if !defined __GNUC_MINOR__ || defined __INTEL_COMPILER || defined __SUNPRO_C || defined __SUNPRO_CC || defined __llvm__ || defined __clang__ |
97 |
#define ECB_GCC_VERSION(major,minor) 0 |
98 |
#else |
99 |
#define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) |
100 |
#endif |
101 |
#endif |
102 |
|
103 |
#define ECB_CPP (__cplusplus+0) |
104 |
#define ECB_CPP11 (__cplusplus >= 201103L) |
105 |
|
106 |
#if ECB_CPP |
107 |
#define ECB_C 0 |
108 |
#define ECB_STDC_VERSION 0 |
109 |
#else |
110 |
#define ECB_C 1 |
111 |
#define ECB_STDC_VERSION __STDC_VERSION__ |
112 |
#endif |
113 |
|
114 |
#define ECB_C99 (ECB_STDC_VERSION >= 199901L) |
115 |
#define ECB_C11 (ECB_STDC_VERSION >= 201112L) |
116 |
|
117 |
#if ECB_CPP |
118 |
#define ECB_EXTERN_C extern "C" |
119 |
#define ECB_EXTERN_C_BEG ECB_EXTERN_C { |
120 |
#define ECB_EXTERN_C_END } |
121 |
#else |
122 |
#define ECB_EXTERN_C extern |
123 |
#define ECB_EXTERN_C_BEG |
124 |
#define ECB_EXTERN_C_END |
125 |
#endif |
126 |
|
127 |
/*****************************************************************************/ |
128 |
|
129 |
/* ECB_NO_THREADS - ecb is not used by multiple threads, ever */ |
130 |
/* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */ |
131 |
|
132 |
#if ECB_NO_THREADS |
133 |
#define ECB_NO_SMP 1 |
134 |
#endif |
135 |
|
136 |
#if ECB_NO_SMP |
137 |
#define ECB_MEMORY_FENCE do { } while (0) |
138 |
#endif |
139 |
|
140 |
#ifndef ECB_MEMORY_FENCE |
141 |
#if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 |
142 |
#if __i386 || __i386__ |
143 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") |
144 |
#define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") |
145 |
#define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") |
146 |
#elif __amd64 || __amd64__ || __x86_64 || __x86_64__ |
147 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") |
148 |
#define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") |
149 |
#define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") |
150 |
#elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ |
151 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") |
152 |
#elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \ |
153 |
|| defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ |
154 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") |
155 |
#elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ |
156 |
|| defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__ |
157 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") |
158 |
#elif __aarch64__ |
159 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory") |
160 |
#elif (__sparc || __sparc__) && !__sparcv8 |
161 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory") |
162 |
#define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory") |
163 |
#define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore") |
164 |
#elif defined __s390__ || defined __s390x__ |
165 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory") |
166 |
#elif defined __mips__ |
167 |
/* GNU/Linux emulates sync on mips1 architectures, so we force its use */ |
168 |
/* anybody else who still uses mips1 is supposed to send in their version, with detection code. */ |
169 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ (".set mips2; sync; .set mips0" : : : "memory") |
170 |
#elif defined __alpha__ |
171 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory") |
172 |
#elif defined __hppa__ |
173 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory") |
174 |
#define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") |
175 |
#elif defined __ia64__ |
176 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory") |
177 |
#elif defined __m68k__ |
178 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory") |
179 |
#elif defined __m88k__ |
180 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("tb1 0,%%r0,128" : : : "memory") |
181 |
#elif defined __sh__ |
182 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory") |
183 |
#endif |
184 |
#endif |
185 |
#endif |
186 |
|
187 |
#ifndef ECB_MEMORY_FENCE |
188 |
#if ECB_GCC_VERSION(4,7) |
189 |
/* see comment below (stdatomic.h) about the C11 memory model. */ |
190 |
#define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST) |
191 |
#define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE) |
192 |
#define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE) |
193 |
|
194 |
/* The __has_feature syntax from clang is so misdesigned that we cannot use it |
195 |
* without risking compile time errors with other compilers. We *could* |
196 |
* define our own ecb_clang_has_feature, but I just can't be bothered to work |
197 |
* around this shit time and again. |
198 |
* #elif defined __clang && __has_feature (cxx_atomic) |
199 |
* // see comment below (stdatomic.h) about the C11 memory model. |
200 |
* #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST) |
201 |
* #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE) |
202 |
* #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE) |
203 |
*/ |
204 |
|
205 |
#elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ |
206 |
#define ECB_MEMORY_FENCE __sync_synchronize () |
207 |
#elif _MSC_VER >= 1500 /* VC++ 2008 */ |
208 |
/* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */ |
209 |
#pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) |
210 |
#define ECB_MEMORY_FENCE _ReadWriteBarrier (); MemoryBarrier() |
211 |
#define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier (); MemoryBarrier() /* according to msdn, _ReadBarrier is not a load fence */ |
212 |
#define ECB_MEMORY_FENCE_RELEASE _WriteBarrier (); MemoryBarrier() |
213 |
#elif _MSC_VER >= 1400 /* VC++ 2005 */ |
214 |
#pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) |
215 |
#define ECB_MEMORY_FENCE _ReadWriteBarrier () |
216 |
#define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */ |
217 |
#define ECB_MEMORY_FENCE_RELEASE _WriteBarrier () |
218 |
#elif defined _WIN32 |
219 |
#include <WinNT.h> |
220 |
#define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */ |
221 |
#elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 |
222 |
#include <mbarrier.h> |
223 |
#define ECB_MEMORY_FENCE __machine_rw_barrier () |
224 |
#define ECB_MEMORY_FENCE_ACQUIRE __machine_r_barrier () |
225 |
#define ECB_MEMORY_FENCE_RELEASE __machine_w_barrier () |
226 |
#elif __xlC__ |
227 |
#define ECB_MEMORY_FENCE __sync () |
228 |
#endif |
229 |
#endif |
230 |
|
231 |
#ifndef ECB_MEMORY_FENCE |
232 |
#if ECB_C11 && !defined __STDC_NO_ATOMICS__ |
233 |
/* we assume that these memory fences work on all variables/all memory accesses, */ |
234 |
/* not just C11 atomics and atomic accesses */ |
235 |
#include <stdatomic.h> |
236 |
/* Unfortunately, neither gcc 4.7 nor clang 3.1 generate any instructions for */ |
237 |
/* any fence other than seq_cst, which isn't very efficient for us. */ |
238 |
/* Why that is, we don't know - either the C11 memory model is quite useless */ |
239 |
/* for most usages, or gcc and clang have a bug */ |
240 |
/* I *currently* lean towards the latter, and inefficiently implement */ |
241 |
/* all three of ecb's fences as a seq_cst fence */ |
242 |
/* Update, gcc-4.8 generates mfence for all c++ fences, but nothing */ |
243 |
/* for all __atomic_thread_fence's except seq_cst */ |
244 |
#define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst) |
245 |
#endif |
246 |
#endif |
247 |
|
248 |
#ifndef ECB_MEMORY_FENCE |
249 |
#if !ECB_AVOID_PTHREADS |
250 |
/* |
251 |
* if you get undefined symbol references to pthread_mutex_lock, |
252 |
* or failure to find pthread.h, then you should implement |
253 |
* the ECB_MEMORY_FENCE operations for your cpu/compiler |
254 |
* OR provide pthread.h and link against the posix thread library |
255 |
* of your system. |
256 |
*/ |
257 |
#include <pthread.h> |
258 |
#define ECB_NEEDS_PTHREADS 1 |
259 |
#define ECB_MEMORY_FENCE_NEEDS_PTHREADS 1 |
260 |
|
261 |
static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER; |
262 |
#define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0) |
263 |
#endif |
264 |
#endif |
265 |
|
266 |
#if !defined ECB_MEMORY_FENCE_ACQUIRE && defined ECB_MEMORY_FENCE |
267 |
#define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE |
268 |
#endif |
269 |
|
270 |
#if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE |
271 |
#define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE |
272 |
#endif |
273 |
|
274 |
/*****************************************************************************/ |
275 |
|
276 |
#if __cplusplus |
277 |
#define ecb_inline static inline |
278 |
#elif ECB_GCC_VERSION(2,5) |
279 |
#define ecb_inline static __inline__ |
280 |
#elif ECB_C99 |
281 |
#define ecb_inline static inline |
282 |
#else |
283 |
#define ecb_inline static |
284 |
#endif |
285 |
|
286 |
#if ECB_GCC_VERSION(3,3) |
287 |
#define ecb_restrict __restrict__ |
288 |
#elif ECB_C99 |
289 |
#define ecb_restrict restrict |
290 |
#else |
291 |
#define ecb_restrict |
292 |
#endif |
293 |
|
294 |
typedef int ecb_bool; |
295 |
|
296 |
#define ECB_CONCAT_(a, b) a ## b |
297 |
#define ECB_CONCAT(a, b) ECB_CONCAT_(a, b) |
298 |
#define ECB_STRINGIFY_(a) # a |
299 |
#define ECB_STRINGIFY(a) ECB_STRINGIFY_(a) |
300 |
|
301 |
#define ecb_function_ ecb_inline |
302 |
|
303 |
#if ECB_GCC_VERSION(3,1) |
304 |
#define ecb_attribute(attrlist) __attribute__(attrlist) |
305 |
#define ecb_is_constant(expr) __builtin_constant_p (expr) |
306 |
#define ecb_expect(expr,value) __builtin_expect ((expr),(value)) |
307 |
#define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality) |
308 |
#else |
309 |
#define ecb_attribute(attrlist) |
310 |
|
311 |
/* possible C11 impl for integral types |
312 |
typedef struct ecb_is_constant_struct ecb_is_constant_struct; |
313 |
#define ecb_is_constant(expr) _Generic ((1 ? (struct ecb_is_constant_struct *)0 : (void *)((expr) - (expr)), ecb_is_constant_struct *: 0, default: 1)) */ |
314 |
|
315 |
#define ecb_is_constant(expr) 0 |
316 |
#define ecb_expect(expr,value) (expr) |
317 |
#define ecb_prefetch(addr,rw,locality) |
318 |
#endif |
319 |
|
320 |
/* no emulation for ecb_decltype */ |
321 |
#if ECB_GCC_VERSION(4,5) |
322 |
#define ecb_decltype(x) __decltype(x) |
323 |
#elif ECB_GCC_VERSION(3,0) |
324 |
#define ecb_decltype(x) __typeof(x) |
325 |
#endif |
326 |
|
327 |
#define ecb_noinline ecb_attribute ((__noinline__)) |
328 |
#define ecb_unused ecb_attribute ((__unused__)) |
329 |
#define ecb_const ecb_attribute ((__const__)) |
330 |
#define ecb_pure ecb_attribute ((__pure__)) |
331 |
|
332 |
#if ECB_C11 |
333 |
#define ecb_noreturn _Noreturn |
334 |
#else |
335 |
#define ecb_noreturn ecb_attribute ((__noreturn__)) |
336 |
#endif |
337 |
|
338 |
#if ECB_GCC_VERSION(4,3) |
339 |
#define ecb_artificial ecb_attribute ((__artificial__)) |
340 |
#define ecb_hot ecb_attribute ((__hot__)) |
341 |
#define ecb_cold ecb_attribute ((__cold__)) |
342 |
#else |
343 |
#define ecb_artificial |
344 |
#define ecb_hot |
345 |
#define ecb_cold |
346 |
#endif |
347 |
|
348 |
/* put around conditional expressions if you are very sure that the */ |
349 |
/* expression is mostly true or mostly false. note that these return */ |
350 |
/* booleans, not the expression. */ |
351 |
#define ecb_expect_false(expr) ecb_expect (!!(expr), 0) |
352 |
#define ecb_expect_true(expr) ecb_expect (!!(expr), 1) |
353 |
/* for compatibility to the rest of the world */ |
354 |
#define ecb_likely(expr) ecb_expect_true (expr) |
355 |
#define ecb_unlikely(expr) ecb_expect_false (expr) |
356 |
|
357 |
/* count trailing zero bits and count # of one bits */ |
358 |
#if ECB_GCC_VERSION(3,4) |
359 |
/* we assume int == 32 bit, long == 32 or 64 bit and long long == 64 bit */ |
360 |
#define ecb_ld32(x) (__builtin_clz (x) ^ 31) |
361 |
#define ecb_ld64(x) (__builtin_clzll (x) ^ 63) |
362 |
#define ecb_ctz32(x) __builtin_ctz (x) |
363 |
#define ecb_ctz64(x) __builtin_ctzll (x) |
364 |
#define ecb_popcount32(x) __builtin_popcount (x) |
365 |
/* no popcountll */ |
366 |
#else |
367 |
ecb_function_ int ecb_ctz32 (uint32_t x) ecb_const; |
368 |
ecb_function_ int |
369 |
ecb_ctz32 (uint32_t x) |
370 |
{ |
371 |
int r = 0; |
372 |
|
373 |
x &= ~x + 1; /* this isolates the lowest bit */ |
374 |
|
375 |
#if ECB_branchless_on_i386 |
376 |
r += !!(x & 0xaaaaaaaa) << 0; |
377 |
r += !!(x & 0xcccccccc) << 1; |
378 |
r += !!(x & 0xf0f0f0f0) << 2; |
379 |
r += !!(x & 0xff00ff00) << 3; |
380 |
r += !!(x & 0xffff0000) << 4; |
381 |
#else |
382 |
if (x & 0xaaaaaaaa) r += 1; |
383 |
if (x & 0xcccccccc) r += 2; |
384 |
if (x & 0xf0f0f0f0) r += 4; |
385 |
if (x & 0xff00ff00) r += 8; |
386 |
if (x & 0xffff0000) r += 16; |
387 |
#endif |
388 |
|
389 |
return r; |
390 |
} |
391 |
|
392 |
ecb_function_ int ecb_ctz64 (uint64_t x) ecb_const; |
393 |
ecb_function_ int |
394 |
ecb_ctz64 (uint64_t x) |
395 |
{ |
396 |
int shift = x & 0xffffffffU ? 0 : 32; |
397 |
return ecb_ctz32 (x >> shift) + shift; |
398 |
} |
399 |
|
400 |
ecb_function_ int ecb_popcount32 (uint32_t x) ecb_const; |
401 |
ecb_function_ int |
402 |
ecb_popcount32 (uint32_t x) |
403 |
{ |
404 |
x -= (x >> 1) & 0x55555555; |
405 |
x = ((x >> 2) & 0x33333333) + (x & 0x33333333); |
406 |
x = ((x >> 4) + x) & 0x0f0f0f0f; |
407 |
x *= 0x01010101; |
408 |
|
409 |
return x >> 24; |
410 |
} |
411 |
|
412 |
ecb_function_ int ecb_ld32 (uint32_t x) ecb_const; |
413 |
ecb_function_ int ecb_ld32 (uint32_t x) |
414 |
{ |
415 |
int r = 0; |
416 |
|
417 |
if (x >> 16) { x >>= 16; r += 16; } |
418 |
if (x >> 8) { x >>= 8; r += 8; } |
419 |
if (x >> 4) { x >>= 4; r += 4; } |
420 |
if (x >> 2) { x >>= 2; r += 2; } |
421 |
if (x >> 1) { r += 1; } |
422 |
|
423 |
return r; |
424 |
} |
425 |
|
426 |
ecb_function_ int ecb_ld64 (uint64_t x) ecb_const; |
427 |
ecb_function_ int ecb_ld64 (uint64_t x) |
428 |
{ |
429 |
int r = 0; |
430 |
|
431 |
if (x >> 32) { x >>= 32; r += 32; } |
432 |
|
433 |
return r + ecb_ld32 (x); |
434 |
} |
435 |
#endif |
436 |
|
437 |
ecb_function_ ecb_bool ecb_is_pot32 (uint32_t x) ecb_const; |
438 |
ecb_function_ ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); } |
439 |
ecb_function_ ecb_bool ecb_is_pot64 (uint64_t x) ecb_const; |
440 |
ecb_function_ ecb_bool ecb_is_pot64 (uint64_t x) { return !(x & (x - 1)); } |
441 |
|
442 |
ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) ecb_const; |
443 |
ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) |
444 |
{ |
445 |
return ( (x * 0x0802U & 0x22110U) |
446 |
| (x * 0x8020U & 0x88440U)) * 0x10101U >> 16; |
447 |
} |
448 |
|
449 |
ecb_function_ uint16_t ecb_bitrev16 (uint16_t x) ecb_const; |
450 |
ecb_function_ uint16_t ecb_bitrev16 (uint16_t x) |
451 |
{ |
452 |
x = ((x >> 1) & 0x5555) | ((x & 0x5555) << 1); |
453 |
x = ((x >> 2) & 0x3333) | ((x & 0x3333) << 2); |
454 |
x = ((x >> 4) & 0x0f0f) | ((x & 0x0f0f) << 4); |
455 |
x = ( x >> 8 ) | ( x << 8); |
456 |
|
457 |
return x; |
458 |
} |
459 |
|
460 |
ecb_function_ uint32_t ecb_bitrev32 (uint32_t x) ecb_const; |
461 |
ecb_function_ uint32_t ecb_bitrev32 (uint32_t x) |
462 |
{ |
463 |
x = ((x >> 1) & 0x55555555) | ((x & 0x55555555) << 1); |
464 |
x = ((x >> 2) & 0x33333333) | ((x & 0x33333333) << 2); |
465 |
x = ((x >> 4) & 0x0f0f0f0f) | ((x & 0x0f0f0f0f) << 4); |
466 |
x = ((x >> 8) & 0x00ff00ff) | ((x & 0x00ff00ff) << 8); |
467 |
x = ( x >> 16 ) | ( x << 16); |
468 |
|
469 |
return x; |
470 |
} |
471 |
|
472 |
/* popcount64 is only available on 64 bit cpus as gcc builtin */ |
473 |
/* so for this version we are lazy */ |
474 |
ecb_function_ int ecb_popcount64 (uint64_t x) ecb_const; |
475 |
ecb_function_ int |
476 |
ecb_popcount64 (uint64_t x) |
477 |
{ |
478 |
return ecb_popcount32 (x) + ecb_popcount32 (x >> 32); |
479 |
} |
480 |
|
481 |
ecb_inline uint8_t ecb_rotl8 (uint8_t x, unsigned int count) ecb_const; |
482 |
ecb_inline uint8_t ecb_rotr8 (uint8_t x, unsigned int count) ecb_const; |
483 |
ecb_inline uint16_t ecb_rotl16 (uint16_t x, unsigned int count) ecb_const; |
484 |
ecb_inline uint16_t ecb_rotr16 (uint16_t x, unsigned int count) ecb_const; |
485 |
ecb_inline uint32_t ecb_rotl32 (uint32_t x, unsigned int count) ecb_const; |
486 |
ecb_inline uint32_t ecb_rotr32 (uint32_t x, unsigned int count) ecb_const; |
487 |
ecb_inline uint64_t ecb_rotl64 (uint64_t x, unsigned int count) ecb_const; |
488 |
ecb_inline uint64_t ecb_rotr64 (uint64_t x, unsigned int count) ecb_const; |
489 |
|
490 |
ecb_inline uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> ( 8 - count)) | (x << count); } |
491 |
ecb_inline uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << ( 8 - count)) | (x >> count); } |
492 |
ecb_inline uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (16 - count)) | (x << count); } |
493 |
ecb_inline uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (16 - count)) | (x >> count); } |
494 |
ecb_inline uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); } |
495 |
ecb_inline uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); } |
496 |
ecb_inline uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); } |
497 |
ecb_inline uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); } |
498 |
|
499 |
#if ECB_GCC_VERSION(4,3) |
500 |
#define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16) |
501 |
#define ecb_bswap32(x) __builtin_bswap32 (x) |
502 |
#define ecb_bswap64(x) __builtin_bswap64 (x) |
503 |
#else |
504 |
ecb_function_ uint16_t ecb_bswap16 (uint16_t x) ecb_const; |
505 |
ecb_function_ uint16_t |
506 |
ecb_bswap16 (uint16_t x) |
507 |
{ |
508 |
return ecb_rotl16 (x, 8); |
509 |
} |
510 |
|
511 |
ecb_function_ uint32_t ecb_bswap32 (uint32_t x) ecb_const; |
512 |
ecb_function_ uint32_t |
513 |
ecb_bswap32 (uint32_t x) |
514 |
{ |
515 |
return (((uint32_t)ecb_bswap16 (x)) << 16) | ecb_bswap16 (x >> 16); |
516 |
} |
517 |
|
518 |
ecb_function_ uint64_t ecb_bswap64 (uint64_t x) ecb_const; |
519 |
ecb_function_ uint64_t |
520 |
ecb_bswap64 (uint64_t x) |
521 |
{ |
522 |
return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32); |
523 |
} |
524 |
#endif |
525 |
|
526 |
#if ECB_GCC_VERSION(4,5) |
527 |
#define ecb_unreachable() __builtin_unreachable () |
528 |
#else |
529 |
/* this seems to work fine, but gcc always emits a warning for it :/ */ |
530 |
ecb_inline void ecb_unreachable (void) ecb_noreturn; |
531 |
ecb_inline void ecb_unreachable (void) { } |
532 |
#endif |
533 |
|
534 |
/* try to tell the compiler that some condition is definitely true */ |
535 |
#define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0 |
536 |
|
537 |
ecb_inline unsigned char ecb_byteorder_helper (void) ecb_const; |
538 |
ecb_inline unsigned char |
539 |
ecb_byteorder_helper (void) |
540 |
{ |
541 |
/* the union code still generates code under pressure in gcc, */ |
542 |
/* but less than using pointers, and always seems to */ |
543 |
/* successfully return a constant. */ |
544 |
/* the reason why we have this horrible preprocessor mess */ |
545 |
/* is to avoid it in all cases, at least on common architectures */ |
546 |
/* or when using a recent enough gcc version (>= 4.6) */ |
547 |
#if __i386 || __i386__ || _M_X86 || __amd64 || __amd64__ || _M_X64 |
548 |
return 0x44; |
549 |
#elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ |
550 |
return 0x44; |
551 |
#elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ |
552 |
return 0x11; |
553 |
#else |
554 |
union |
555 |
{ |
556 |
uint32_t i; |
557 |
uint8_t c; |
558 |
} u = { 0x11223344 }; |
559 |
return u.c; |
560 |
#endif |
561 |
} |
562 |
|
563 |
ecb_inline ecb_bool ecb_big_endian (void) ecb_const; |
564 |
ecb_inline ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; } |
565 |
ecb_inline ecb_bool ecb_little_endian (void) ecb_const; |
566 |
ecb_inline ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44; } |
567 |
|
568 |
#if ECB_GCC_VERSION(3,0) || ECB_C99 |
569 |
#define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0)) |
570 |
#else |
571 |
#define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n))) |
572 |
#endif |
573 |
|
574 |
#if __cplusplus |
575 |
template<typename T> |
576 |
static inline T ecb_div_rd (T val, T div) |
577 |
{ |
578 |
return val < 0 ? - ((-val + div - 1) / div) : (val ) / div; |
579 |
} |
580 |
template<typename T> |
581 |
static inline T ecb_div_ru (T val, T div) |
582 |
{ |
583 |
return val < 0 ? - ((-val ) / div) : (val + div - 1) / div; |
584 |
} |
585 |
#else |
586 |
#define ecb_div_rd(val,div) ((val) < 0 ? - ((-(val) + (div) - 1) / (div)) : ((val) ) / (div)) |
587 |
#define ecb_div_ru(val,div) ((val) < 0 ? - ((-(val) ) / (div)) : ((val) + (div) - 1) / (div)) |
588 |
#endif |
589 |
|
590 |
#if ecb_cplusplus_does_not_suck |
591 |
/* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */ |
592 |
template<typename T, int N> |
593 |
static inline int ecb_array_length (const T (&arr)[N]) |
594 |
{ |
595 |
return N; |
596 |
} |
597 |
#else |
598 |
#define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) |
599 |
#endif |
600 |
|
601 |
/*******************************************************************************/ |
602 |
/* floating point stuff, can be disabled by defining ECB_NO_LIBM */ |
603 |
|
604 |
/* basically, everything uses "ieee pure-endian" floating point numbers */ |
605 |
/* the only noteworthy exception is ancient armle, which uses order 43218765 */ |
606 |
#if 0 \ |
607 |
|| __i386 || __i386__ \ |
608 |
|| __amd64 || __amd64__ || __x86_64 || __x86_64__ \ |
609 |
|| __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \ |
610 |
|| defined __s390__ || defined __s390x__ \ |
611 |
|| defined __mips__ \ |
612 |
|| defined __alpha__ \ |
613 |
|| defined __hppa__ \ |
614 |
|| defined __ia64__ \ |
615 |
|| defined __m68k__ \ |
616 |
|| defined __m88k__ \ |
617 |
|| defined __sh__ \ |
618 |
|| defined _M_IX86 || defined _M_AMD64 || defined _M_IA64 \ |
619 |
|| (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \ |
620 |
|| defined __aarch64__ |
621 |
#define ECB_STDFP 1 |
622 |
#include <string.h> /* for memcpy */ |
623 |
#else |
624 |
#define ECB_STDFP 0 |
625 |
#endif |
626 |
|
627 |
#ifndef ECB_NO_LIBM |
628 |
|
629 |
#include <math.h> /* for frexp*, ldexp*, INFINITY, NAN */ |
630 |
|
631 |
/* only the oldest of old doesn't have this one. solaris. */ |
632 |
#ifdef INFINITY |
633 |
#define ECB_INFINITY INFINITY |
634 |
#else |
635 |
#define ECB_INFINITY HUGE_VAL |
636 |
#endif |
637 |
|
638 |
#ifdef NAN |
639 |
#define ECB_NAN NAN |
640 |
#else |
641 |
#define ECB_NAN ECB_INFINITY |
642 |
#endif |
643 |
|
644 |
/* converts an ieee half/binary16 to a float */ |
645 |
ecb_function_ float ecb_binary16_to_float (uint16_t x) ecb_const; |
646 |
ecb_function_ float |
647 |
ecb_binary16_to_float (uint16_t x) |
648 |
{ |
649 |
int e = (x >> 10) & 0x1f; |
650 |
int m = x & 0x3ff; |
651 |
float r; |
652 |
|
653 |
if (!e ) r = ldexpf (m , -24); |
654 |
else if (e != 31) r = ldexpf (m + 0x400, e - 25); |
655 |
else if (m ) r = ECB_NAN; |
656 |
else r = ECB_INFINITY; |
657 |
|
658 |
return x & 0x8000 ? -r : r; |
659 |
} |
660 |
|
661 |
/* convert a float to ieee single/binary32 */ |
662 |
ecb_function_ uint32_t ecb_float_to_binary32 (float x) ecb_const; |
663 |
ecb_function_ uint32_t |
664 |
ecb_float_to_binary32 (float x) |
665 |
{ |
666 |
uint32_t r; |
667 |
|
668 |
#if ECB_STDFP |
669 |
memcpy (&r, &x, 4); |
670 |
#else |
671 |
/* slow emulation, works for anything but -0 */ |
672 |
uint32_t m; |
673 |
int e; |
674 |
|
675 |
if (x == 0e0f ) return 0x00000000U; |
676 |
if (x > +3.40282346638528860e+38f) return 0x7f800000U; |
677 |
if (x < -3.40282346638528860e+38f) return 0xff800000U; |
678 |
if (x != x ) return 0x7fbfffffU; |
679 |
|
680 |
m = frexpf (x, &e) * 0x1000000U; |
681 |
|
682 |
r = m & 0x80000000U; |
683 |
|
684 |
if (r) |
685 |
m = -m; |
686 |
|
687 |
if (e <= -126) |
688 |
{ |
689 |
m &= 0xffffffU; |
690 |
m >>= (-125 - e); |
691 |
e = -126; |
692 |
} |
693 |
|
694 |
r |= (e + 126) << 23; |
695 |
r |= m & 0x7fffffU; |
696 |
#endif |
697 |
|
698 |
return r; |
699 |
} |
700 |
|
701 |
/* converts an ieee single/binary32 to a float */ |
702 |
ecb_function_ float ecb_binary32_to_float (uint32_t x) ecb_const; |
703 |
ecb_function_ float |
704 |
ecb_binary32_to_float (uint32_t x) |
705 |
{ |
706 |
float r; |
707 |
|
708 |
#if ECB_STDFP |
709 |
memcpy (&r, &x, 4); |
710 |
#else |
711 |
/* emulation, only works for normals and subnormals and +0 */ |
712 |
int neg = x >> 31; |
713 |
int e = (x >> 23) & 0xffU; |
714 |
|
715 |
x &= 0x7fffffU; |
716 |
|
717 |
if (e) |
718 |
x |= 0x800000U; |
719 |
else |
720 |
e = 1; |
721 |
|
722 |
/* we distrust ldexpf a bit and do the 2**-24 scaling by an extra multiply */ |
723 |
r = ldexpf (x * (0.5f / 0x800000U), e - 126); |
724 |
|
725 |
r = neg ? -r : r; |
726 |
#endif |
727 |
|
728 |
return r; |
729 |
} |
730 |
|
731 |
/* convert a double to ieee double/binary64 */ |
732 |
ecb_function_ uint64_t ecb_double_to_binary64 (double x) ecb_const; |
733 |
ecb_function_ uint64_t |
734 |
ecb_double_to_binary64 (double x) |
735 |
{ |
736 |
uint64_t r; |
737 |
|
738 |
#if ECB_STDFP |
739 |
memcpy (&r, &x, 8); |
740 |
#else |
741 |
/* slow emulation, works for anything but -0 */ |
742 |
uint64_t m; |
743 |
int e; |
744 |
|
745 |
if (x == 0e0 ) return 0x0000000000000000U; |
746 |
if (x > +1.79769313486231470e+308) return 0x7ff0000000000000U; |
747 |
if (x < -1.79769313486231470e+308) return 0xfff0000000000000U; |
748 |
if (x != x ) return 0X7ff7ffffffffffffU; |
749 |
|
750 |
m = frexp (x, &e) * 0x20000000000000U; |
751 |
|
752 |
r = m & 0x8000000000000000;; |
753 |
|
754 |
if (r) |
755 |
m = -m; |
756 |
|
757 |
if (e <= -1022) |
758 |
{ |
759 |
m &= 0x1fffffffffffffU; |
760 |
m >>= (-1021 - e); |
761 |
e = -1022; |
762 |
} |
763 |
|
764 |
r |= ((uint64_t)(e + 1022)) << 52; |
765 |
r |= m & 0xfffffffffffffU; |
766 |
#endif |
767 |
|
768 |
return r; |
769 |
} |
770 |
|
771 |
/* converts an ieee double/binary64 to a double */ |
772 |
ecb_function_ double ecb_binary64_to_double (uint64_t x) ecb_const; |
773 |
ecb_function_ double |
774 |
ecb_binary64_to_double (uint64_t x) |
775 |
{ |
776 |
double r; |
777 |
|
778 |
#if ECB_STDFP |
779 |
memcpy (&r, &x, 8); |
780 |
#else |
781 |
/* emulation, only works for normals and subnormals and +0 */ |
782 |
int neg = x >> 63; |
783 |
int e = (x >> 52) & 0x7ffU; |
784 |
|
785 |
x &= 0xfffffffffffffU; |
786 |
|
787 |
if (e) |
788 |
x |= 0x10000000000000U; |
789 |
else |
790 |
e = 1; |
791 |
|
792 |
/* we distrust ldexp a bit and do the 2**-53 scaling by an extra multiply */ |
793 |
r = ldexp (x * (0.5 / 0x10000000000000U), e - 1022); |
794 |
|
795 |
r = neg ? -r : r; |
796 |
#endif |
797 |
|
798 |
return r; |
799 |
} |
800 |
|
801 |
#endif |
802 |
|
803 |
#endif |
804 |
|