1 |
/* |
2 |
* libecb - http://software.schmorp.de/pkg/libecb |
3 |
* |
4 |
* Copyright (©) 2009-2015 Marc Alexander Lehmann <libecb@schmorp.de> |
5 |
* Copyright (©) 2011 Emanuele Giaquinta |
6 |
* All rights reserved. |
7 |
* |
8 |
* Redistribution and use in source and binary forms, with or without modifica- |
9 |
* tion, are permitted provided that the following conditions are met: |
10 |
* |
11 |
* 1. Redistributions of source code must retain the above copyright notice, |
12 |
* this list of conditions and the following disclaimer. |
13 |
* |
14 |
* 2. Redistributions in binary form must reproduce the above copyright |
15 |
* notice, this list of conditions and the following disclaimer in the |
16 |
* documentation and/or other materials provided with the distribution. |
17 |
* |
18 |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
19 |
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- |
20 |
* CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO |
21 |
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- |
22 |
* CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
23 |
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; |
24 |
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
25 |
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- |
26 |
* ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED |
27 |
* OF THE POSSIBILITY OF SUCH DAMAGE. |
28 |
* |
29 |
* Alternatively, the contents of this file may be used under the terms of |
30 |
* the GNU General Public License ("GPL") version 2 or any later version, |
31 |
* in which case the provisions of the GPL are applicable instead of |
32 |
* the above. If you wish to allow the use of your version of this file |
33 |
* only under the terms of the GPL and not to allow others to use your |
34 |
* version of this file under the BSD license, indicate your decision |
35 |
* by deleting the provisions above and replace them with the notice |
36 |
* and other provisions required by the GPL. If you do not delete the |
37 |
* provisions above, a recipient may use your version of this file under |
38 |
* either the BSD or the GPL. |
39 |
*/ |
40 |
|
41 |
#ifndef ECB_H |
42 |
#define ECB_H |
43 |
|
44 |
/* 16 bits major, 16 bits minor */ |
45 |
#define ECB_VERSION 0x00010004 |
46 |
|
47 |
#ifdef _WIN32 |
48 |
typedef signed char int8_t; |
49 |
typedef unsigned char uint8_t; |
50 |
typedef signed short int16_t; |
51 |
typedef unsigned short uint16_t; |
52 |
typedef signed int int32_t; |
53 |
typedef unsigned int uint32_t; |
54 |
#if __GNUC__ |
55 |
typedef signed long long int64_t; |
56 |
typedef unsigned long long uint64_t; |
57 |
#else /* _MSC_VER || __BORLANDC__ */ |
58 |
typedef signed __int64 int64_t; |
59 |
typedef unsigned __int64 uint64_t; |
60 |
#endif |
61 |
#ifdef _WIN64 |
62 |
#define ECB_PTRSIZE 8 |
63 |
typedef uint64_t uintptr_t; |
64 |
typedef int64_t intptr_t; |
65 |
#else |
66 |
#define ECB_PTRSIZE 4 |
67 |
typedef uint32_t uintptr_t; |
68 |
typedef int32_t intptr_t; |
69 |
#endif |
70 |
#else |
71 |
#include <inttypes.h> |
72 |
#if UINTMAX_MAX > 0xffffffffU |
73 |
#define ECB_PTRSIZE 8 |
74 |
#else |
75 |
#define ECB_PTRSIZE 4 |
76 |
#endif |
77 |
#endif |
78 |
|
79 |
/* work around x32 idiocy by defining proper macros */ |
80 |
#if __amd64 || __x86_64 || _M_AMD64 || _M_X64 |
81 |
#if _ILP32 |
82 |
#define ECB_AMD64_X32 1 |
83 |
#else |
84 |
#define ECB_AMD64 1 |
85 |
#endif |
86 |
#endif |
87 |
|
88 |
/* many compilers define _GNUC_ to some versions but then only implement |
89 |
* what their idiot authors think are the "more important" extensions, |
90 |
* causing enormous grief in return for some better fake benchmark numbers. |
91 |
* or so. |
92 |
* we try to detect these and simply assume they are not gcc - if they have |
93 |
* an issue with that they should have done it right in the first place. |
94 |
*/ |
95 |
#if !defined __GNUC_MINOR__ || defined __INTEL_COMPILER || defined __SUNPRO_C || defined __SUNPRO_CC || defined __llvm__ || defined __clang__ |
96 |
#define ECB_GCC_VERSION(major,minor) 0 |
97 |
#else |
98 |
#define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) |
99 |
#endif |
100 |
|
101 |
#define ECB_CLANG_VERSION(major,minor) (__clang_major__ > (major) || (__clang_major__ == (major) && __clang_minor__ >= (minor))) |
102 |
|
103 |
#if __clang__ && defined __has_builtin |
104 |
#define ECB_CLANG_BUILTIN(x) __has_builtin (x) |
105 |
#else |
106 |
#define ECB_CLANG_BUILTIN(x) 0 |
107 |
#endif |
108 |
|
109 |
#if __clang__ && defined __has_extension |
110 |
#define ECB_CLANG_EXTENSION(x) __has_extension (x) |
111 |
#else |
112 |
#define ECB_CLANG_EXTENSION(x) 0 |
113 |
#endif |
114 |
|
115 |
#define ECB_CPP (__cplusplus+0) |
116 |
#define ECB_CPP11 (__cplusplus >= 201103L) |
117 |
|
118 |
#if ECB_CPP |
119 |
#define ECB_C 0 |
120 |
#define ECB_STDC_VERSION 0 |
121 |
#else |
122 |
#define ECB_C 1 |
123 |
#define ECB_STDC_VERSION __STDC_VERSION__ |
124 |
#endif |
125 |
|
126 |
#define ECB_C99 (ECB_STDC_VERSION >= 199901L) |
127 |
#define ECB_C11 (ECB_STDC_VERSION >= 201112L) |
128 |
|
129 |
#if ECB_CPP |
130 |
#define ECB_EXTERN_C extern "C" |
131 |
#define ECB_EXTERN_C_BEG ECB_EXTERN_C { |
132 |
#define ECB_EXTERN_C_END } |
133 |
#else |
134 |
#define ECB_EXTERN_C extern |
135 |
#define ECB_EXTERN_C_BEG |
136 |
#define ECB_EXTERN_C_END |
137 |
#endif |
138 |
|
139 |
/*****************************************************************************/ |
140 |
|
141 |
/* ECB_NO_THREADS - ecb is not used by multiple threads, ever */ |
142 |
/* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */ |
143 |
|
144 |
#if ECB_NO_THREADS |
145 |
#define ECB_NO_SMP 1 |
146 |
#endif |
147 |
|
148 |
#if ECB_NO_SMP |
149 |
#define ECB_MEMORY_FENCE do { } while (0) |
150 |
#endif |
151 |
|
152 |
#ifndef ECB_MEMORY_FENCE |
153 |
#if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 |
154 |
#if __i386 || __i386__ |
155 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") |
156 |
#define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") |
157 |
#define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") |
158 |
#elif __amd64 || __amd64__ || __x86_64 || __x86_64__ |
159 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") |
160 |
#define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") |
161 |
#define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") |
162 |
#elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ |
163 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") |
164 |
#elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \ |
165 |
|| defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ |
166 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") |
167 |
#elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ |
168 |
|| defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__ |
169 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") |
170 |
#elif __aarch64__ |
171 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory") |
172 |
#elif (__sparc || __sparc__) && !__sparcv8 |
173 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory") |
174 |
#define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory") |
175 |
#define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore") |
176 |
#elif defined __s390__ || defined __s390x__ |
177 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory") |
178 |
#elif defined __mips__ |
179 |
/* GNU/Linux emulates sync on mips1 architectures, so we force its use */ |
180 |
/* anybody else who still uses mips1 is supposed to send in their version, with detection code. */ |
181 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ (".set mips2; sync; .set mips0" : : : "memory") |
182 |
#elif defined __alpha__ |
183 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory") |
184 |
#elif defined __hppa__ |
185 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory") |
186 |
#define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") |
187 |
#elif defined __ia64__ |
188 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory") |
189 |
#elif defined __m68k__ |
190 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory") |
191 |
#elif defined __m88k__ |
192 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("tb1 0,%%r0,128" : : : "memory") |
193 |
#elif defined __sh__ |
194 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory") |
195 |
#endif |
196 |
#endif |
197 |
#endif |
198 |
|
199 |
#ifndef ECB_MEMORY_FENCE |
200 |
#if ECB_GCC_VERSION(4,7) |
201 |
/* see comment below (stdatomic.h) about the C11 memory model. */ |
202 |
#define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST) |
203 |
#define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE) |
204 |
#define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE) |
205 |
|
206 |
#elif ECB_CLANG_EXTENSION(c_atomic) |
207 |
/* see comment below (stdatomic.h) about the C11 memory model. */ |
208 |
#define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST) |
209 |
#define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE) |
210 |
#define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE) |
211 |
|
212 |
#elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ |
213 |
#define ECB_MEMORY_FENCE __sync_synchronize () |
214 |
#elif _MSC_VER >= 1500 /* VC++ 2008 */ |
215 |
/* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */ |
216 |
#pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) |
217 |
#define ECB_MEMORY_FENCE _ReadWriteBarrier (); MemoryBarrier() |
218 |
#define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier (); MemoryBarrier() /* according to msdn, _ReadBarrier is not a load fence */ |
219 |
#define ECB_MEMORY_FENCE_RELEASE _WriteBarrier (); MemoryBarrier() |
220 |
#elif _MSC_VER >= 1400 /* VC++ 2005 */ |
221 |
#pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) |
222 |
#define ECB_MEMORY_FENCE _ReadWriteBarrier () |
223 |
#define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */ |
224 |
#define ECB_MEMORY_FENCE_RELEASE _WriteBarrier () |
225 |
#elif defined _WIN32 |
226 |
#include <WinNT.h> |
227 |
#define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */ |
228 |
#elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 |
229 |
#include <mbarrier.h> |
230 |
#define ECB_MEMORY_FENCE __machine_rw_barrier () |
231 |
#define ECB_MEMORY_FENCE_ACQUIRE __machine_r_barrier () |
232 |
#define ECB_MEMORY_FENCE_RELEASE __machine_w_barrier () |
233 |
#elif __xlC__ |
234 |
#define ECB_MEMORY_FENCE __sync () |
235 |
#endif |
236 |
#endif |
237 |
|
238 |
#ifndef ECB_MEMORY_FENCE |
239 |
#if ECB_C11 && !defined __STDC_NO_ATOMICS__ |
240 |
/* we assume that these memory fences work on all variables/all memory accesses, */ |
241 |
/* not just C11 atomics and atomic accesses */ |
242 |
#include <stdatomic.h> |
243 |
/* Unfortunately, neither gcc 4.7 nor clang 3.1 generate any instructions for */ |
244 |
/* any fence other than seq_cst, which isn't very efficient for us. */ |
245 |
/* Why that is, we don't know - either the C11 memory model is quite useless */ |
246 |
/* for most usages, or gcc and clang have a bug */ |
247 |
/* I *currently* lean towards the latter, and inefficiently implement */ |
248 |
/* all three of ecb's fences as a seq_cst fence */ |
249 |
/* Update, gcc-4.8 generates mfence for all c++ fences, but nothing */ |
250 |
/* for all __atomic_thread_fence's except seq_cst */ |
251 |
#define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst) |
252 |
#endif |
253 |
#endif |
254 |
|
255 |
#ifndef ECB_MEMORY_FENCE |
256 |
#if !ECB_AVOID_PTHREADS |
257 |
/* |
258 |
* if you get undefined symbol references to pthread_mutex_lock, |
259 |
* or failure to find pthread.h, then you should implement |
260 |
* the ECB_MEMORY_FENCE operations for your cpu/compiler |
261 |
* OR provide pthread.h and link against the posix thread library |
262 |
* of your system. |
263 |
*/ |
264 |
#include <pthread.h> |
265 |
#define ECB_NEEDS_PTHREADS 1 |
266 |
#define ECB_MEMORY_FENCE_NEEDS_PTHREADS 1 |
267 |
|
268 |
static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER; |
269 |
#define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0) |
270 |
#endif |
271 |
#endif |
272 |
|
273 |
#if !defined ECB_MEMORY_FENCE_ACQUIRE && defined ECB_MEMORY_FENCE |
274 |
#define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE |
275 |
#endif |
276 |
|
277 |
#if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE |
278 |
#define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE |
279 |
#endif |
280 |
|
281 |
/*****************************************************************************/ |
282 |
|
283 |
#if ECB_CPP |
284 |
#define ecb_inline static inline |
285 |
#elif ECB_GCC_VERSION(2,5) |
286 |
#define ecb_inline static __inline__ |
287 |
#elif ECB_C99 |
288 |
#define ecb_inline static inline |
289 |
#else |
290 |
#define ecb_inline static |
291 |
#endif |
292 |
|
293 |
#if ECB_GCC_VERSION(3,3) |
294 |
#define ecb_restrict __restrict__ |
295 |
#elif ECB_C99 |
296 |
#define ecb_restrict restrict |
297 |
#else |
298 |
#define ecb_restrict |
299 |
#endif |
300 |
|
301 |
typedef int ecb_bool; |
302 |
|
303 |
#define ECB_CONCAT_(a, b) a ## b |
304 |
#define ECB_CONCAT(a, b) ECB_CONCAT_(a, b) |
305 |
#define ECB_STRINGIFY_(a) # a |
306 |
#define ECB_STRINGIFY(a) ECB_STRINGIFY_(a) |
307 |
|
308 |
#define ecb_function_ ecb_inline |
309 |
|
310 |
#if ECB_GCC_VERSION(3,1) || ECB_CLANG_VERSION(2,8) |
311 |
#define ecb_attribute(attrlist) __attribute__ (attrlist) |
312 |
#else |
313 |
#define ecb_attribute(attrlist) |
314 |
#endif |
315 |
|
316 |
#if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_constant_p) |
317 |
#define ecb_is_constant(expr) __builtin_constant_p (expr) |
318 |
#else |
319 |
/* possible C11 impl for integral types |
320 |
typedef struct ecb_is_constant_struct ecb_is_constant_struct; |
321 |
#define ecb_is_constant(expr) _Generic ((1 ? (struct ecb_is_constant_struct *)0 : (void *)((expr) - (expr)), ecb_is_constant_struct *: 0, default: 1)) */ |
322 |
|
323 |
#define ecb_is_constant(expr) 0 |
324 |
#endif |
325 |
|
326 |
#if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_expect) |
327 |
#define ecb_expect(expr,value) __builtin_expect ((expr),(value)) |
328 |
#else |
329 |
#define ecb_expect(expr,value) (expr) |
330 |
#endif |
331 |
|
332 |
#if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_prefetch) |
333 |
#define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality) |
334 |
#else |
335 |
#define ecb_prefetch(addr,rw,locality) |
336 |
#endif |
337 |
|
338 |
/* no emulation for ecb_decltype */ |
339 |
#if ECB_CPP11 |
340 |
// older implementations might have problems with decltype(x)::type, work around it |
341 |
template<class T> struct ecb_decltype_t { typedef T type; }; |
342 |
#define ecb_decltype(x) ecb_decltype_t<decltype (x)>::type |
343 |
#elif ECB_GCC_VERSION(3,0) || ECB_CLANG_VERSION(2,8) |
344 |
#define ecb_decltype(x) __typeof__ (x) |
345 |
#endif |
346 |
|
347 |
#if _MSC_VER >= 1300 |
348 |
#define ecb_deprecated __declspec (deprecated) |
349 |
#else |
350 |
#define ecb_deprecated ecb_attribute ((__deprecated__)) |
351 |
#endif |
352 |
|
353 |
#define ecb_noinline ecb_attribute ((__noinline__)) |
354 |
#define ecb_unused ecb_attribute ((__unused__)) |
355 |
#define ecb_const ecb_attribute ((__const__)) |
356 |
#define ecb_pure ecb_attribute ((__pure__)) |
357 |
|
358 |
/* TODO http://msdn.microsoft.com/en-us/library/k6ktzx3s.aspx __declspec(noreturn) */ |
359 |
#if ECB_C11 || __IBMC_NORETURN |
360 |
/* http://pic.dhe.ibm.com/infocenter/compbg/v121v141/topic/com.ibm.xlcpp121.bg.doc/language_ref/noreturn.html */ |
361 |
#define ecb_noreturn _Noreturn |
362 |
#else |
363 |
#define ecb_noreturn ecb_attribute ((__noreturn__)) |
364 |
#endif |
365 |
|
366 |
#if ECB_GCC_VERSION(4,3) |
367 |
#define ecb_artificial ecb_attribute ((__artificial__)) |
368 |
#define ecb_hot ecb_attribute ((__hot__)) |
369 |
#define ecb_cold ecb_attribute ((__cold__)) |
370 |
#else |
371 |
#define ecb_artificial |
372 |
#define ecb_hot |
373 |
#define ecb_cold |
374 |
#endif |
375 |
|
376 |
/* put around conditional expressions if you are very sure that the */ |
377 |
/* expression is mostly true or mostly false. note that these return */ |
378 |
/* booleans, not the expression. */ |
379 |
#define ecb_expect_false(expr) ecb_expect (!!(expr), 0) |
380 |
#define ecb_expect_true(expr) ecb_expect (!!(expr), 1) |
381 |
/* for compatibility to the rest of the world */ |
382 |
#define ecb_likely(expr) ecb_expect_true (expr) |
383 |
#define ecb_unlikely(expr) ecb_expect_false (expr) |
384 |
|
385 |
/* count trailing zero bits and count # of one bits */ |
386 |
#if ECB_GCC_VERSION(3,4) \ |
387 |
|| (ECB_CLANG_BUILTIN(__builtin_clz) && ECB_CLANG_BUILTIN(__builtin_clzll) \ |
388 |
&& ECB_CLANG_BUILTIN(__builtin_ctz) && ECB_CLANG_BUILTIN(__builtin_ctzll) \ |
389 |
&& ECB_CLANG_BUILTIN(__builtin_popcount)) |
390 |
/* we assume int == 32 bit, long == 32 or 64 bit and long long == 64 bit */ |
391 |
#define ecb_ld32(x) (__builtin_clz (x) ^ 31) |
392 |
#define ecb_ld64(x) (__builtin_clzll (x) ^ 63) |
393 |
#define ecb_ctz32(x) __builtin_ctz (x) |
394 |
#define ecb_ctz64(x) __builtin_ctzll (x) |
395 |
#define ecb_popcount32(x) __builtin_popcount (x) |
396 |
/* no popcountll */ |
397 |
#else |
398 |
ecb_function_ ecb_const int ecb_ctz32 (uint32_t x); |
399 |
ecb_function_ ecb_const int |
400 |
ecb_ctz32 (uint32_t x) |
401 |
{ |
402 |
int r = 0; |
403 |
|
404 |
x &= ~x + 1; /* this isolates the lowest bit */ |
405 |
|
406 |
#if ECB_branchless_on_i386 |
407 |
r += !!(x & 0xaaaaaaaa) << 0; |
408 |
r += !!(x & 0xcccccccc) << 1; |
409 |
r += !!(x & 0xf0f0f0f0) << 2; |
410 |
r += !!(x & 0xff00ff00) << 3; |
411 |
r += !!(x & 0xffff0000) << 4; |
412 |
#else |
413 |
if (x & 0xaaaaaaaa) r += 1; |
414 |
if (x & 0xcccccccc) r += 2; |
415 |
if (x & 0xf0f0f0f0) r += 4; |
416 |
if (x & 0xff00ff00) r += 8; |
417 |
if (x & 0xffff0000) r += 16; |
418 |
#endif |
419 |
|
420 |
return r; |
421 |
} |
422 |
|
423 |
ecb_function_ ecb_const int ecb_ctz64 (uint64_t x); |
424 |
ecb_function_ ecb_const int |
425 |
ecb_ctz64 (uint64_t x) |
426 |
{ |
427 |
int shift = x & 0xffffffffU ? 0 : 32; |
428 |
return ecb_ctz32 (x >> shift) + shift; |
429 |
} |
430 |
|
431 |
ecb_function_ ecb_const int ecb_popcount32 (uint32_t x); |
432 |
ecb_function_ ecb_const int |
433 |
ecb_popcount32 (uint32_t x) |
434 |
{ |
435 |
x -= (x >> 1) & 0x55555555; |
436 |
x = ((x >> 2) & 0x33333333) + (x & 0x33333333); |
437 |
x = ((x >> 4) + x) & 0x0f0f0f0f; |
438 |
x *= 0x01010101; |
439 |
|
440 |
return x >> 24; |
441 |
} |
442 |
|
443 |
ecb_function_ ecb_const int ecb_ld32 (uint32_t x); |
444 |
ecb_function_ ecb_const int ecb_ld32 (uint32_t x) |
445 |
{ |
446 |
int r = 0; |
447 |
|
448 |
if (x >> 16) { x >>= 16; r += 16; } |
449 |
if (x >> 8) { x >>= 8; r += 8; } |
450 |
if (x >> 4) { x >>= 4; r += 4; } |
451 |
if (x >> 2) { x >>= 2; r += 2; } |
452 |
if (x >> 1) { r += 1; } |
453 |
|
454 |
return r; |
455 |
} |
456 |
|
457 |
ecb_function_ ecb_const int ecb_ld64 (uint64_t x); |
458 |
ecb_function_ ecb_const int ecb_ld64 (uint64_t x) |
459 |
{ |
460 |
int r = 0; |
461 |
|
462 |
if (x >> 32) { x >>= 32; r += 32; } |
463 |
|
464 |
return r + ecb_ld32 (x); |
465 |
} |
466 |
#endif |
467 |
|
468 |
ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x); |
469 |
ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); } |
470 |
ecb_function_ ecb_const ecb_bool ecb_is_pot64 (uint64_t x); |
471 |
ecb_function_ ecb_const ecb_bool ecb_is_pot64 (uint64_t x) { return !(x & (x - 1)); } |
472 |
|
473 |
ecb_function_ ecb_const uint8_t ecb_bitrev8 (uint8_t x); |
474 |
ecb_function_ ecb_const uint8_t ecb_bitrev8 (uint8_t x) |
475 |
{ |
476 |
return ( (x * 0x0802U & 0x22110U) |
477 |
| (x * 0x8020U & 0x88440U)) * 0x10101U >> 16; |
478 |
} |
479 |
|
480 |
ecb_function_ ecb_const uint16_t ecb_bitrev16 (uint16_t x); |
481 |
ecb_function_ ecb_const uint16_t ecb_bitrev16 (uint16_t x) |
482 |
{ |
483 |
x = ((x >> 1) & 0x5555) | ((x & 0x5555) << 1); |
484 |
x = ((x >> 2) & 0x3333) | ((x & 0x3333) << 2); |
485 |
x = ((x >> 4) & 0x0f0f) | ((x & 0x0f0f) << 4); |
486 |
x = ( x >> 8 ) | ( x << 8); |
487 |
|
488 |
return x; |
489 |
} |
490 |
|
491 |
ecb_function_ ecb_const uint32_t ecb_bitrev32 (uint32_t x); |
492 |
ecb_function_ ecb_const uint32_t ecb_bitrev32 (uint32_t x) |
493 |
{ |
494 |
x = ((x >> 1) & 0x55555555) | ((x & 0x55555555) << 1); |
495 |
x = ((x >> 2) & 0x33333333) | ((x & 0x33333333) << 2); |
496 |
x = ((x >> 4) & 0x0f0f0f0f) | ((x & 0x0f0f0f0f) << 4); |
497 |
x = ((x >> 8) & 0x00ff00ff) | ((x & 0x00ff00ff) << 8); |
498 |
x = ( x >> 16 ) | ( x << 16); |
499 |
|
500 |
return x; |
501 |
} |
502 |
|
503 |
/* popcount64 is only available on 64 bit cpus as gcc builtin */ |
504 |
/* so for this version we are lazy */ |
505 |
ecb_function_ ecb_const int ecb_popcount64 (uint64_t x); |
506 |
ecb_function_ ecb_const int |
507 |
ecb_popcount64 (uint64_t x) |
508 |
{ |
509 |
return ecb_popcount32 (x) + ecb_popcount32 (x >> 32); |
510 |
} |
511 |
|
512 |
ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count); |
513 |
ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count); |
514 |
ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count); |
515 |
ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count); |
516 |
ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count); |
517 |
ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count); |
518 |
ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count); |
519 |
ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count); |
520 |
|
521 |
ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> ( 8 - count)) | (x << count); } |
522 |
ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << ( 8 - count)) | (x >> count); } |
523 |
ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (16 - count)) | (x << count); } |
524 |
ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (16 - count)) | (x >> count); } |
525 |
ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); } |
526 |
ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); } |
527 |
ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); } |
528 |
ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); } |
529 |
|
530 |
#if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64)) |
531 |
#define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16) |
532 |
#define ecb_bswap32(x) __builtin_bswap32 (x) |
533 |
#define ecb_bswap64(x) __builtin_bswap64 (x) |
534 |
#else |
535 |
ecb_function_ ecb_const uint16_t ecb_bswap16 (uint16_t x); |
536 |
ecb_function_ ecb_const uint16_t |
537 |
ecb_bswap16 (uint16_t x) |
538 |
{ |
539 |
return ecb_rotl16 (x, 8); |
540 |
} |
541 |
|
542 |
ecb_function_ ecb_const uint32_t ecb_bswap32 (uint32_t x); |
543 |
ecb_function_ ecb_const uint32_t |
544 |
ecb_bswap32 (uint32_t x) |
545 |
{ |
546 |
return (((uint32_t)ecb_bswap16 (x)) << 16) | ecb_bswap16 (x >> 16); |
547 |
} |
548 |
|
549 |
ecb_function_ ecb_const uint64_t ecb_bswap64 (uint64_t x); |
550 |
ecb_function_ ecb_const uint64_t |
551 |
ecb_bswap64 (uint64_t x) |
552 |
{ |
553 |
return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32); |
554 |
} |
555 |
#endif |
556 |
|
557 |
#if ECB_GCC_VERSION(4,5) || ECB_CLANG_BUILTIN(__builtin_unreachable) |
558 |
#define ecb_unreachable() __builtin_unreachable () |
559 |
#else |
560 |
/* this seems to work fine, but gcc always emits a warning for it :/ */ |
561 |
ecb_inline ecb_noreturn void ecb_unreachable (void); |
562 |
ecb_inline ecb_noreturn void ecb_unreachable (void) { } |
563 |
#endif |
564 |
|
565 |
/* try to tell the compiler that some condition is definitely true */ |
566 |
#define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0 |
567 |
|
568 |
ecb_inline ecb_const unsigned char ecb_byteorder_helper (void); |
569 |
ecb_inline ecb_const unsigned char |
570 |
ecb_byteorder_helper (void) |
571 |
{ |
572 |
/* the union code still generates code under pressure in gcc, */ |
573 |
/* but less than using pointers, and always seems to */ |
574 |
/* successfully return a constant. */ |
575 |
/* the reason why we have this horrible preprocessor mess */ |
576 |
/* is to avoid it in all cases, at least on common architectures */ |
577 |
/* or when using a recent enough gcc version (>= 4.6) */ |
578 |
#if __i386 || __i386__ || _M_X86 || __amd64 || __amd64__ || _M_X64 |
579 |
return 0x44; |
580 |
#elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ |
581 |
return 0x44; |
582 |
#elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ |
583 |
return 0x11; |
584 |
#else |
585 |
union |
586 |
{ |
587 |
uint32_t i; |
588 |
uint8_t c; |
589 |
} u = { 0x11223344 }; |
590 |
return u.c; |
591 |
#endif |
592 |
} |
593 |
|
594 |
ecb_inline ecb_const ecb_bool ecb_big_endian (void); |
595 |
ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; } |
596 |
ecb_inline ecb_const ecb_bool ecb_little_endian (void); |
597 |
ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44; } |
598 |
|
599 |
#if ECB_GCC_VERSION(3,0) || ECB_C99 |
600 |
#define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0)) |
601 |
#else |
602 |
#define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n))) |
603 |
#endif |
604 |
|
605 |
#if ECB_CPP |
606 |
template<typename T> |
607 |
static inline T ecb_div_rd (T val, T div) |
608 |
{ |
609 |
return val < 0 ? - ((-val + div - 1) / div) : (val ) / div; |
610 |
} |
611 |
template<typename T> |
612 |
static inline T ecb_div_ru (T val, T div) |
613 |
{ |
614 |
return val < 0 ? - ((-val ) / div) : (val + div - 1) / div; |
615 |
} |
616 |
#else |
617 |
#define ecb_div_rd(val,div) ((val) < 0 ? - ((-(val) + (div) - 1) / (div)) : ((val) ) / (div)) |
618 |
#define ecb_div_ru(val,div) ((val) < 0 ? - ((-(val) ) / (div)) : ((val) + (div) - 1) / (div)) |
619 |
#endif |
620 |
|
621 |
#if ecb_cplusplus_does_not_suck |
622 |
/* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */ |
623 |
template<typename T, int N> |
624 |
static inline int ecb_array_length (const T (&arr)[N]) |
625 |
{ |
626 |
return N; |
627 |
} |
628 |
#else |
629 |
#define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) |
630 |
#endif |
631 |
|
632 |
/*******************************************************************************/ |
633 |
/* floating point stuff, can be disabled by defining ECB_NO_LIBM */ |
634 |
|
635 |
/* basically, everything uses "ieee pure-endian" floating point numbers */ |
636 |
/* the only noteworthy exception is ancient armle, which uses order 43218765 */ |
637 |
#if 0 \ |
638 |
|| __i386 || __i386__ \ |
639 |
|| __amd64 || __amd64__ || __x86_64 || __x86_64__ \ |
640 |
|| __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \ |
641 |
|| defined __s390__ || defined __s390x__ \ |
642 |
|| defined __mips__ \ |
643 |
|| defined __alpha__ \ |
644 |
|| defined __hppa__ \ |
645 |
|| defined __ia64__ \ |
646 |
|| defined __m68k__ \ |
647 |
|| defined __m88k__ \ |
648 |
|| defined __sh__ \ |
649 |
|| defined _M_IX86 || defined _M_AMD64 || defined _M_IA64 \ |
650 |
|| (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \ |
651 |
|| defined __aarch64__ |
652 |
#define ECB_STDFP 1 |
653 |
#include <string.h> /* for memcpy */ |
654 |
#else |
655 |
#define ECB_STDFP 0 |
656 |
#endif |
657 |
|
658 |
#ifndef ECB_NO_LIBM |
659 |
|
660 |
#include <math.h> /* for frexp*, ldexp*, INFINITY, NAN */ |
661 |
|
662 |
/* only the oldest of old doesn't have this one. solaris. */ |
663 |
#ifdef INFINITY |
664 |
#define ECB_INFINITY INFINITY |
665 |
#else |
666 |
#define ECB_INFINITY HUGE_VAL |
667 |
#endif |
668 |
|
669 |
#ifdef NAN |
670 |
#define ECB_NAN NAN |
671 |
#else |
672 |
#define ECB_NAN ECB_INFINITY |
673 |
#endif |
674 |
|
675 |
#if ECB_C99 || _XOPEN_VERSION >= 600 || _POSIX_VERSION >= 200112L |
676 |
#define ecb_ldexpf(x,e) ldexpf ((x), (e)) |
677 |
#else |
678 |
#define ecb_ldexpf(x,e) (float) ldexp ((x), (e)) |
679 |
#endif |
680 |
|
681 |
/* converts an ieee half/binary16 to a float */ |
682 |
ecb_function_ ecb_const float ecb_binary16_to_float (uint16_t x); |
683 |
ecb_function_ ecb_const float |
684 |
ecb_binary16_to_float (uint16_t x) |
685 |
{ |
686 |
int e = (x >> 10) & 0x1f; |
687 |
int m = x & 0x3ff; |
688 |
float r; |
689 |
|
690 |
if (!e ) r = ecb_ldexpf (m , -24); |
691 |
else if (e != 31) r = ecb_ldexpf (m + 0x400, e - 25); |
692 |
else if (m ) r = ECB_NAN; |
693 |
else r = ECB_INFINITY; |
694 |
|
695 |
return x & 0x8000 ? -r : r; |
696 |
} |
697 |
|
698 |
/* convert a float to ieee single/binary32 */ |
699 |
ecb_function_ ecb_const uint32_t ecb_float_to_binary32 (float x); |
700 |
ecb_function_ ecb_const uint32_t |
701 |
ecb_float_to_binary32 (float x) |
702 |
{ |
703 |
uint32_t r; |
704 |
|
705 |
#if ECB_STDFP |
706 |
memcpy (&r, &x, 4); |
707 |
#else |
708 |
/* slow emulation, works for anything but -0 */ |
709 |
uint32_t m; |
710 |
int e; |
711 |
|
712 |
if (x == 0e0f ) return 0x00000000U; |
713 |
if (x > +3.40282346638528860e+38f) return 0x7f800000U; |
714 |
if (x < -3.40282346638528860e+38f) return 0xff800000U; |
715 |
if (x != x ) return 0x7fbfffffU; |
716 |
|
717 |
m = frexpf (x, &e) * 0x1000000U; |
718 |
|
719 |
r = m & 0x80000000U; |
720 |
|
721 |
if (r) |
722 |
m = -m; |
723 |
|
724 |
if (e <= -126) |
725 |
{ |
726 |
m &= 0xffffffU; |
727 |
m >>= (-125 - e); |
728 |
e = -126; |
729 |
} |
730 |
|
731 |
r |= (e + 126) << 23; |
732 |
r |= m & 0x7fffffU; |
733 |
#endif |
734 |
|
735 |
return r; |
736 |
} |
737 |
|
738 |
/* converts an ieee single/binary32 to a float */ |
739 |
ecb_function_ ecb_const float ecb_binary32_to_float (uint32_t x); |
740 |
ecb_function_ ecb_const float |
741 |
ecb_binary32_to_float (uint32_t x) |
742 |
{ |
743 |
float r; |
744 |
|
745 |
#if ECB_STDFP |
746 |
memcpy (&r, &x, 4); |
747 |
#else |
748 |
/* emulation, only works for normals and subnormals and +0 */ |
749 |
int neg = x >> 31; |
750 |
int e = (x >> 23) & 0xffU; |
751 |
|
752 |
x &= 0x7fffffU; |
753 |
|
754 |
if (e) |
755 |
x |= 0x800000U; |
756 |
else |
757 |
e = 1; |
758 |
|
759 |
/* we distrust ldexpf a bit and do the 2**-24 scaling by an extra multiply */ |
760 |
r = ecb_ldexpf (x * (0.5f / 0x800000U), e - 126); |
761 |
|
762 |
r = neg ? -r : r; |
763 |
#endif |
764 |
|
765 |
return r; |
766 |
} |
767 |
|
768 |
/* convert a double to ieee double/binary64 */ |
769 |
ecb_function_ ecb_const uint64_t ecb_double_to_binary64 (double x); |
770 |
ecb_function_ ecb_const uint64_t |
771 |
ecb_double_to_binary64 (double x) |
772 |
{ |
773 |
uint64_t r; |
774 |
|
775 |
#if ECB_STDFP |
776 |
memcpy (&r, &x, 8); |
777 |
#else |
778 |
/* slow emulation, works for anything but -0 */ |
779 |
uint64_t m; |
780 |
int e; |
781 |
|
782 |
if (x == 0e0 ) return 0x0000000000000000U; |
783 |
if (x > +1.79769313486231470e+308) return 0x7ff0000000000000U; |
784 |
if (x < -1.79769313486231470e+308) return 0xfff0000000000000U; |
785 |
if (x != x ) return 0X7ff7ffffffffffffU; |
786 |
|
787 |
m = frexp (x, &e) * 0x20000000000000U; |
788 |
|
789 |
r = m & 0x8000000000000000;; |
790 |
|
791 |
if (r) |
792 |
m = -m; |
793 |
|
794 |
if (e <= -1022) |
795 |
{ |
796 |
m &= 0x1fffffffffffffU; |
797 |
m >>= (-1021 - e); |
798 |
e = -1022; |
799 |
} |
800 |
|
801 |
r |= ((uint64_t)(e + 1022)) << 52; |
802 |
r |= m & 0xfffffffffffffU; |
803 |
#endif |
804 |
|
805 |
return r; |
806 |
} |
807 |
|
808 |
/* converts an ieee double/binary64 to a double */ |
809 |
ecb_function_ ecb_const double ecb_binary64_to_double (uint64_t x); |
810 |
ecb_function_ ecb_const double |
811 |
ecb_binary64_to_double (uint64_t x) |
812 |
{ |
813 |
double r; |
814 |
|
815 |
#if ECB_STDFP |
816 |
memcpy (&r, &x, 8); |
817 |
#else |
818 |
/* emulation, only works for normals and subnormals and +0 */ |
819 |
int neg = x >> 63; |
820 |
int e = (x >> 52) & 0x7ffU; |
821 |
|
822 |
x &= 0xfffffffffffffU; |
823 |
|
824 |
if (e) |
825 |
x |= 0x10000000000000U; |
826 |
else |
827 |
e = 1; |
828 |
|
829 |
/* we distrust ldexp a bit and do the 2**-53 scaling by an extra multiply */ |
830 |
r = ldexp (x * (0.5 / 0x10000000000000U), e - 1022); |
831 |
|
832 |
r = neg ? -r : r; |
833 |
#endif |
834 |
|
835 |
return r; |
836 |
} |
837 |
|
838 |
#endif |
839 |
|
840 |
#endif |
841 |
|