1 |
root |
1.17 |
/* |
2 |
root |
1.36 |
* libev event processing core, watcher management |
3 |
|
|
* |
4 |
root |
1.490 |
* Copyright (c) 2007-2019 Marc Alexander Lehmann <libev@schmorp.de> |
5 |
root |
1.17 |
* All rights reserved. |
6 |
|
|
* |
7 |
root |
1.199 |
* Redistribution and use in source and binary forms, with or without modifica- |
8 |
|
|
* tion, are permitted provided that the following conditions are met: |
9 |
root |
1.372 |
* |
10 |
root |
1.199 |
* 1. Redistributions of source code must retain the above copyright notice, |
11 |
|
|
* this list of conditions and the following disclaimer. |
12 |
root |
1.372 |
* |
13 |
root |
1.199 |
* 2. Redistributions in binary form must reproduce the above copyright |
14 |
|
|
* notice, this list of conditions and the following disclaimer in the |
15 |
|
|
* documentation and/or other materials provided with the distribution. |
16 |
root |
1.372 |
* |
17 |
root |
1.199 |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
18 |
|
|
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- |
19 |
|
|
* CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO |
20 |
|
|
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- |
21 |
|
|
* CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
22 |
|
|
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; |
23 |
|
|
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
24 |
|
|
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- |
25 |
|
|
* ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED |
26 |
|
|
* OF THE POSSIBILITY OF SUCH DAMAGE. |
27 |
root |
1.17 |
* |
28 |
root |
1.199 |
* Alternatively, the contents of this file may be used under the terms of |
29 |
|
|
* the GNU General Public License ("GPL") version 2 or any later version, |
30 |
|
|
* in which case the provisions of the GPL are applicable instead of |
31 |
|
|
* the above. If you wish to allow the use of your version of this file |
32 |
|
|
* only under the terms of the GPL and not to allow others to use your |
33 |
|
|
* version of this file under the BSD license, indicate your decision |
34 |
|
|
* by deleting the provisions above and replace them with the notice |
35 |
|
|
* and other provisions required by the GPL. If you do not delete the |
36 |
|
|
* provisions above, a recipient may use your version of this file under |
37 |
|
|
* either the BSD or the GPL. |
38 |
root |
1.17 |
*/ |
39 |
root |
1.87 |
|
40 |
root |
1.220 |
/* this big block deduces configuration from config.h */ |
41 |
root |
1.59 |
#ifndef EV_STANDALONE |
42 |
root |
1.133 |
# ifdef EV_CONFIG_H |
43 |
|
|
# include EV_CONFIG_H |
44 |
|
|
# else |
45 |
|
|
# include "config.h" |
46 |
|
|
# endif |
47 |
root |
1.60 |
|
48 |
root |
1.469 |
# if HAVE_FLOOR |
49 |
|
|
# ifndef EV_USE_FLOOR |
50 |
|
|
# define EV_USE_FLOOR 1 |
51 |
|
|
# endif |
52 |
root |
1.373 |
# endif |
53 |
|
|
|
54 |
root |
1.274 |
# if HAVE_CLOCK_SYSCALL |
55 |
|
|
# ifndef EV_USE_CLOCK_SYSCALL |
56 |
|
|
# define EV_USE_CLOCK_SYSCALL 1 |
57 |
|
|
# ifndef EV_USE_REALTIME |
58 |
|
|
# define EV_USE_REALTIME 0 |
59 |
|
|
# endif |
60 |
|
|
# ifndef EV_USE_MONOTONIC |
61 |
|
|
# define EV_USE_MONOTONIC 1 |
62 |
|
|
# endif |
63 |
|
|
# endif |
64 |
root |
1.416 |
# elif !defined EV_USE_CLOCK_SYSCALL |
65 |
root |
1.290 |
# define EV_USE_CLOCK_SYSCALL 0 |
66 |
root |
1.274 |
# endif |
67 |
|
|
|
68 |
root |
1.60 |
# if HAVE_CLOCK_GETTIME |
69 |
root |
1.97 |
# ifndef EV_USE_MONOTONIC |
70 |
|
|
# define EV_USE_MONOTONIC 1 |
71 |
|
|
# endif |
72 |
|
|
# ifndef EV_USE_REALTIME |
73 |
root |
1.279 |
# define EV_USE_REALTIME 0 |
74 |
root |
1.97 |
# endif |
75 |
root |
1.126 |
# else |
76 |
|
|
# ifndef EV_USE_MONOTONIC |
77 |
|
|
# define EV_USE_MONOTONIC 0 |
78 |
|
|
# endif |
79 |
|
|
# ifndef EV_USE_REALTIME |
80 |
|
|
# define EV_USE_REALTIME 0 |
81 |
|
|
# endif |
82 |
root |
1.60 |
# endif |
83 |
|
|
|
84 |
root |
1.343 |
# if HAVE_NANOSLEEP |
85 |
|
|
# ifndef EV_USE_NANOSLEEP |
86 |
|
|
# define EV_USE_NANOSLEEP EV_FEATURE_OS |
87 |
|
|
# endif |
88 |
|
|
# else |
89 |
|
|
# undef EV_USE_NANOSLEEP |
90 |
root |
1.193 |
# define EV_USE_NANOSLEEP 0 |
91 |
|
|
# endif |
92 |
|
|
|
93 |
root |
1.343 |
# if HAVE_SELECT && HAVE_SYS_SELECT_H |
94 |
|
|
# ifndef EV_USE_SELECT |
95 |
root |
1.339 |
# define EV_USE_SELECT EV_FEATURE_BACKENDS |
96 |
root |
1.127 |
# endif |
97 |
root |
1.343 |
# else |
98 |
|
|
# undef EV_USE_SELECT |
99 |
|
|
# define EV_USE_SELECT 0 |
100 |
root |
1.60 |
# endif |
101 |
|
|
|
102 |
root |
1.343 |
# if HAVE_POLL && HAVE_POLL_H |
103 |
|
|
# ifndef EV_USE_POLL |
104 |
root |
1.339 |
# define EV_USE_POLL EV_FEATURE_BACKENDS |
105 |
root |
1.127 |
# endif |
106 |
root |
1.343 |
# else |
107 |
|
|
# undef EV_USE_POLL |
108 |
|
|
# define EV_USE_POLL 0 |
109 |
root |
1.60 |
# endif |
110 |
root |
1.127 |
|
111 |
root |
1.343 |
# if HAVE_EPOLL_CTL && HAVE_SYS_EPOLL_H |
112 |
|
|
# ifndef EV_USE_EPOLL |
113 |
root |
1.339 |
# define EV_USE_EPOLL EV_FEATURE_BACKENDS |
114 |
root |
1.127 |
# endif |
115 |
root |
1.343 |
# else |
116 |
|
|
# undef EV_USE_EPOLL |
117 |
|
|
# define EV_USE_EPOLL 0 |
118 |
root |
1.60 |
# endif |
119 |
root |
1.127 |
|
120 |
root |
1.491 |
# if HAVE_LINUX_AIO_ABI_H |
121 |
|
|
# ifndef EV_USE_LINUXAIO |
122 |
|
|
# define EV_USE_LINUXAIO EV_FEATURE_BACKENDS |
123 |
|
|
# endif |
124 |
|
|
# else |
125 |
|
|
# undef EV_USE_LINUXAIO |
126 |
|
|
# define EV_USE_LINUXAIO 0 |
127 |
|
|
# endif |
128 |
|
|
|
129 |
root |
1.343 |
# if HAVE_KQUEUE && HAVE_SYS_EVENT_H |
130 |
|
|
# ifndef EV_USE_KQUEUE |
131 |
root |
1.339 |
# define EV_USE_KQUEUE EV_FEATURE_BACKENDS |
132 |
root |
1.127 |
# endif |
133 |
root |
1.343 |
# else |
134 |
|
|
# undef EV_USE_KQUEUE |
135 |
|
|
# define EV_USE_KQUEUE 0 |
136 |
root |
1.60 |
# endif |
137 |
root |
1.127 |
|
138 |
root |
1.343 |
# if HAVE_PORT_H && HAVE_PORT_CREATE |
139 |
|
|
# ifndef EV_USE_PORT |
140 |
root |
1.339 |
# define EV_USE_PORT EV_FEATURE_BACKENDS |
141 |
root |
1.127 |
# endif |
142 |
root |
1.343 |
# else |
143 |
|
|
# undef EV_USE_PORT |
144 |
|
|
# define EV_USE_PORT 0 |
145 |
root |
1.118 |
# endif |
146 |
|
|
|
147 |
root |
1.343 |
# if HAVE_INOTIFY_INIT && HAVE_SYS_INOTIFY_H |
148 |
|
|
# ifndef EV_USE_INOTIFY |
149 |
root |
1.339 |
# define EV_USE_INOTIFY EV_FEATURE_OS |
150 |
root |
1.152 |
# endif |
151 |
root |
1.343 |
# else |
152 |
|
|
# undef EV_USE_INOTIFY |
153 |
|
|
# define EV_USE_INOTIFY 0 |
154 |
root |
1.152 |
# endif |
155 |
|
|
|
156 |
root |
1.343 |
# if HAVE_SIGNALFD && HAVE_SYS_SIGNALFD_H |
157 |
|
|
# ifndef EV_USE_SIGNALFD |
158 |
root |
1.339 |
# define EV_USE_SIGNALFD EV_FEATURE_OS |
159 |
root |
1.303 |
# endif |
160 |
root |
1.343 |
# else |
161 |
|
|
# undef EV_USE_SIGNALFD |
162 |
|
|
# define EV_USE_SIGNALFD 0 |
163 |
root |
1.303 |
# endif |
164 |
|
|
|
165 |
root |
1.343 |
# if HAVE_EVENTFD |
166 |
|
|
# ifndef EV_USE_EVENTFD |
167 |
root |
1.339 |
# define EV_USE_EVENTFD EV_FEATURE_OS |
168 |
root |
1.220 |
# endif |
169 |
root |
1.343 |
# else |
170 |
|
|
# undef EV_USE_EVENTFD |
171 |
|
|
# define EV_USE_EVENTFD 0 |
172 |
root |
1.220 |
# endif |
173 |
root |
1.250 |
|
174 |
root |
1.29 |
#endif |
175 |
root |
1.17 |
|
176 |
root |
1.483 |
/* OS X, in its infinite idiocy, actually HARDCODES |
177 |
|
|
* a limit of 1024 into their select. Where people have brains, |
178 |
|
|
* OS X engineers apparently have a vacuum. Or maybe they were |
179 |
|
|
* ordered to have a vacuum, or they do anything for money. |
180 |
|
|
* This might help. Or not. |
181 |
|
|
* Note that this must be defined early, as other include files |
182 |
|
|
* will rely on this define as well. |
183 |
|
|
*/ |
184 |
|
|
#define _DARWIN_UNLIMITED_SELECT 1 |
185 |
|
|
|
186 |
root |
1.1 |
#include <stdlib.h> |
187 |
root |
1.319 |
#include <string.h> |
188 |
root |
1.7 |
#include <fcntl.h> |
189 |
root |
1.16 |
#include <stddef.h> |
190 |
root |
1.1 |
|
191 |
|
|
#include <stdio.h> |
192 |
|
|
|
193 |
root |
1.4 |
#include <assert.h> |
194 |
root |
1.1 |
#include <errno.h> |
195 |
root |
1.22 |
#include <sys/types.h> |
196 |
root |
1.71 |
#include <time.h> |
197 |
root |
1.326 |
#include <limits.h> |
198 |
root |
1.71 |
|
199 |
root |
1.72 |
#include <signal.h> |
200 |
root |
1.71 |
|
201 |
root |
1.152 |
#ifdef EV_H |
202 |
|
|
# include EV_H |
203 |
|
|
#else |
204 |
|
|
# include "ev.h" |
205 |
|
|
#endif |
206 |
|
|
|
207 |
root |
1.410 |
#if EV_NO_THREADS |
208 |
|
|
# undef EV_NO_SMP |
209 |
|
|
# define EV_NO_SMP 1 |
210 |
|
|
# undef ECB_NO_THREADS |
211 |
|
|
# define ECB_NO_THREADS 1 |
212 |
|
|
#endif |
213 |
|
|
#if EV_NO_SMP |
214 |
|
|
# undef EV_NO_SMP |
215 |
|
|
# define ECB_NO_SMP 1 |
216 |
|
|
#endif |
217 |
|
|
|
218 |
root |
1.103 |
#ifndef _WIN32 |
219 |
root |
1.71 |
# include <sys/time.h> |
220 |
root |
1.45 |
# include <sys/wait.h> |
221 |
root |
1.140 |
# include <unistd.h> |
222 |
root |
1.103 |
#else |
223 |
root |
1.256 |
# include <io.h> |
224 |
root |
1.103 |
# define WIN32_LEAN_AND_MEAN |
225 |
root |
1.431 |
# include <winsock2.h> |
226 |
root |
1.103 |
# include <windows.h> |
227 |
|
|
# ifndef EV_SELECT_IS_WINSOCKET |
228 |
|
|
# define EV_SELECT_IS_WINSOCKET 1 |
229 |
|
|
# endif |
230 |
root |
1.331 |
# undef EV_AVOID_STDIO |
231 |
root |
1.45 |
#endif |
232 |
root |
1.103 |
|
233 |
root |
1.220 |
/* this block tries to deduce configuration from header-defined symbols and defaults */ |
234 |
root |
1.40 |
|
235 |
root |
1.305 |
/* try to deduce the maximum number of signals on this platform */ |
236 |
root |
1.416 |
#if defined EV_NSIG |
237 |
root |
1.305 |
/* use what's provided */ |
238 |
root |
1.416 |
#elif defined NSIG |
239 |
root |
1.305 |
# define EV_NSIG (NSIG) |
240 |
root |
1.416 |
#elif defined _NSIG |
241 |
root |
1.305 |
# define EV_NSIG (_NSIG) |
242 |
root |
1.416 |
#elif defined SIGMAX |
243 |
root |
1.305 |
# define EV_NSIG (SIGMAX+1) |
244 |
root |
1.416 |
#elif defined SIG_MAX |
245 |
root |
1.305 |
# define EV_NSIG (SIG_MAX+1) |
246 |
root |
1.416 |
#elif defined _SIG_MAX |
247 |
root |
1.305 |
# define EV_NSIG (_SIG_MAX+1) |
248 |
root |
1.416 |
#elif defined MAXSIG |
249 |
root |
1.305 |
# define EV_NSIG (MAXSIG+1) |
250 |
root |
1.416 |
#elif defined MAX_SIG |
251 |
root |
1.305 |
# define EV_NSIG (MAX_SIG+1) |
252 |
root |
1.416 |
#elif defined SIGARRAYSIZE |
253 |
root |
1.336 |
# define EV_NSIG (SIGARRAYSIZE) /* Assume ary[SIGARRAYSIZE] */ |
254 |
root |
1.416 |
#elif defined _sys_nsig |
255 |
root |
1.305 |
# define EV_NSIG (_sys_nsig) /* Solaris 2.5 */ |
256 |
|
|
#else |
257 |
root |
1.459 |
# define EV_NSIG (8 * sizeof (sigset_t) + 1) |
258 |
root |
1.305 |
#endif |
259 |
|
|
|
260 |
root |
1.373 |
#ifndef EV_USE_FLOOR |
261 |
|
|
# define EV_USE_FLOOR 0 |
262 |
|
|
#endif |
263 |
|
|
|
264 |
root |
1.274 |
#ifndef EV_USE_CLOCK_SYSCALL |
265 |
root |
1.460 |
# if __linux && __GLIBC__ == 2 && __GLIBC_MINOR__ < 17 |
266 |
root |
1.338 |
# define EV_USE_CLOCK_SYSCALL EV_FEATURE_OS |
267 |
root |
1.274 |
# else |
268 |
|
|
# define EV_USE_CLOCK_SYSCALL 0 |
269 |
|
|
# endif |
270 |
|
|
#endif |
271 |
|
|
|
272 |
root |
1.470 |
#if !(_POSIX_TIMERS > 0) |
273 |
|
|
# ifndef EV_USE_MONOTONIC |
274 |
|
|
# define EV_USE_MONOTONIC 0 |
275 |
|
|
# endif |
276 |
|
|
# ifndef EV_USE_REALTIME |
277 |
|
|
# define EV_USE_REALTIME 0 |
278 |
|
|
# endif |
279 |
|
|
#endif |
280 |
|
|
|
281 |
root |
1.29 |
#ifndef EV_USE_MONOTONIC |
282 |
root |
1.416 |
# if defined _POSIX_MONOTONIC_CLOCK && _POSIX_MONOTONIC_CLOCK >= 0 |
283 |
root |
1.338 |
# define EV_USE_MONOTONIC EV_FEATURE_OS |
284 |
root |
1.253 |
# else |
285 |
|
|
# define EV_USE_MONOTONIC 0 |
286 |
|
|
# endif |
287 |
root |
1.37 |
#endif |
288 |
|
|
|
289 |
root |
1.118 |
#ifndef EV_USE_REALTIME |
290 |
root |
1.279 |
# define EV_USE_REALTIME !EV_USE_CLOCK_SYSCALL |
291 |
root |
1.118 |
#endif |
292 |
|
|
|
293 |
root |
1.193 |
#ifndef EV_USE_NANOSLEEP |
294 |
root |
1.253 |
# if _POSIX_C_SOURCE >= 199309L |
295 |
root |
1.338 |
# define EV_USE_NANOSLEEP EV_FEATURE_OS |
296 |
root |
1.253 |
# else |
297 |
|
|
# define EV_USE_NANOSLEEP 0 |
298 |
|
|
# endif |
299 |
root |
1.193 |
#endif |
300 |
|
|
|
301 |
root |
1.29 |
#ifndef EV_USE_SELECT |
302 |
root |
1.338 |
# define EV_USE_SELECT EV_FEATURE_BACKENDS |
303 |
root |
1.10 |
#endif |
304 |
|
|
|
305 |
root |
1.59 |
#ifndef EV_USE_POLL |
306 |
root |
1.104 |
# ifdef _WIN32 |
307 |
|
|
# define EV_USE_POLL 0 |
308 |
|
|
# else |
309 |
root |
1.338 |
# define EV_USE_POLL EV_FEATURE_BACKENDS |
310 |
root |
1.104 |
# endif |
311 |
root |
1.41 |
#endif |
312 |
|
|
|
313 |
root |
1.29 |
#ifndef EV_USE_EPOLL |
314 |
root |
1.220 |
# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4)) |
315 |
root |
1.338 |
# define EV_USE_EPOLL EV_FEATURE_BACKENDS |
316 |
root |
1.220 |
# else |
317 |
|
|
# define EV_USE_EPOLL 0 |
318 |
|
|
# endif |
319 |
root |
1.10 |
#endif |
320 |
|
|
|
321 |
root |
1.44 |
#ifndef EV_USE_KQUEUE |
322 |
|
|
# define EV_USE_KQUEUE 0 |
323 |
|
|
#endif |
324 |
|
|
|
325 |
root |
1.118 |
#ifndef EV_USE_PORT |
326 |
|
|
# define EV_USE_PORT 0 |
327 |
root |
1.40 |
#endif |
328 |
|
|
|
329 |
root |
1.490 |
#ifndef EV_USE_LINUXAIO |
330 |
root |
1.492 |
# if __linux /* libev currently assumes linux/aio_abi.h is always available on linux */ |
331 |
|
|
# define EV_USE_LINUXAIO 1 |
332 |
|
|
# else |
333 |
|
|
# define EV_USE_LINUXAIO 0 |
334 |
|
|
# endif |
335 |
root |
1.490 |
#endif |
336 |
|
|
|
337 |
root |
1.501 |
#ifndef EV_USE_IOURING |
338 |
|
|
# if __linux |
339 |
|
|
# define EV_USE_IOURING 0 |
340 |
|
|
# else |
341 |
|
|
# define EV_USE_IOURING 0 |
342 |
|
|
# endif |
343 |
|
|
#endif |
344 |
|
|
|
345 |
root |
1.152 |
#ifndef EV_USE_INOTIFY |
346 |
root |
1.220 |
# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4)) |
347 |
root |
1.338 |
# define EV_USE_INOTIFY EV_FEATURE_OS |
348 |
root |
1.220 |
# else |
349 |
|
|
# define EV_USE_INOTIFY 0 |
350 |
|
|
# endif |
351 |
root |
1.152 |
#endif |
352 |
|
|
|
353 |
root |
1.149 |
#ifndef EV_PID_HASHSIZE |
354 |
root |
1.338 |
# define EV_PID_HASHSIZE EV_FEATURE_DATA ? 16 : 1 |
355 |
root |
1.149 |
#endif |
356 |
|
|
|
357 |
root |
1.152 |
#ifndef EV_INOTIFY_HASHSIZE |
358 |
root |
1.338 |
# define EV_INOTIFY_HASHSIZE EV_FEATURE_DATA ? 16 : 1 |
359 |
root |
1.152 |
#endif |
360 |
|
|
|
361 |
root |
1.220 |
#ifndef EV_USE_EVENTFD |
362 |
|
|
# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7)) |
363 |
root |
1.338 |
# define EV_USE_EVENTFD EV_FEATURE_OS |
364 |
root |
1.220 |
# else |
365 |
|
|
# define EV_USE_EVENTFD 0 |
366 |
|
|
# endif |
367 |
|
|
#endif |
368 |
|
|
|
369 |
root |
1.303 |
#ifndef EV_USE_SIGNALFD |
370 |
root |
1.314 |
# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7)) |
371 |
root |
1.338 |
# define EV_USE_SIGNALFD EV_FEATURE_OS |
372 |
root |
1.303 |
# else |
373 |
|
|
# define EV_USE_SIGNALFD 0 |
374 |
|
|
# endif |
375 |
|
|
#endif |
376 |
|
|
|
377 |
root |
1.249 |
#if 0 /* debugging */ |
378 |
root |
1.250 |
# define EV_VERIFY 3 |
379 |
root |
1.249 |
# define EV_USE_4HEAP 1 |
380 |
|
|
# define EV_HEAP_CACHE_AT 1 |
381 |
|
|
#endif |
382 |
|
|
|
383 |
root |
1.250 |
#ifndef EV_VERIFY |
384 |
root |
1.338 |
# define EV_VERIFY (EV_FEATURE_API ? 1 : 0) |
385 |
root |
1.250 |
#endif |
386 |
|
|
|
387 |
root |
1.243 |
#ifndef EV_USE_4HEAP |
388 |
root |
1.338 |
# define EV_USE_4HEAP EV_FEATURE_DATA |
389 |
root |
1.243 |
#endif |
390 |
|
|
|
391 |
|
|
#ifndef EV_HEAP_CACHE_AT |
392 |
root |
1.338 |
# define EV_HEAP_CACHE_AT EV_FEATURE_DATA |
393 |
root |
1.243 |
#endif |
394 |
|
|
|
395 |
root |
1.481 |
#ifdef __ANDROID__ |
396 |
root |
1.452 |
/* supposedly, android doesn't typedef fd_mask */ |
397 |
|
|
# undef EV_USE_SELECT |
398 |
|
|
# define EV_USE_SELECT 0 |
399 |
|
|
/* supposedly, we need to include syscall.h, not sys/syscall.h, so just disable */ |
400 |
|
|
# undef EV_USE_CLOCK_SYSCALL |
401 |
|
|
# define EV_USE_CLOCK_SYSCALL 0 |
402 |
|
|
#endif |
403 |
|
|
|
404 |
|
|
/* aix's poll.h seems to cause lots of trouble */ |
405 |
|
|
#ifdef _AIX |
406 |
|
|
/* AIX has a completely broken poll.h header */ |
407 |
|
|
# undef EV_USE_POLL |
408 |
|
|
# define EV_USE_POLL 0 |
409 |
|
|
#endif |
410 |
|
|
|
411 |
root |
1.291 |
/* on linux, we can use a (slow) syscall to avoid a dependency on pthread, */ |
412 |
|
|
/* which makes programs even slower. might work on other unices, too. */ |
413 |
|
|
#if EV_USE_CLOCK_SYSCALL |
414 |
root |
1.423 |
# include <sys/syscall.h> |
415 |
root |
1.291 |
# ifdef SYS_clock_gettime |
416 |
|
|
# define clock_gettime(id, ts) syscall (SYS_clock_gettime, (id), (ts)) |
417 |
|
|
# undef EV_USE_MONOTONIC |
418 |
|
|
# define EV_USE_MONOTONIC 1 |
419 |
root |
1.501 |
# define EV_NEED_SYSCALL 1 |
420 |
root |
1.291 |
# else |
421 |
|
|
# undef EV_USE_CLOCK_SYSCALL |
422 |
|
|
# define EV_USE_CLOCK_SYSCALL 0 |
423 |
|
|
# endif |
424 |
|
|
#endif |
425 |
|
|
|
426 |
root |
1.220 |
/* this block fixes any misconfiguration where we know we run into trouble otherwise */ |
427 |
root |
1.40 |
|
428 |
|
|
#ifndef CLOCK_MONOTONIC |
429 |
|
|
# undef EV_USE_MONOTONIC |
430 |
|
|
# define EV_USE_MONOTONIC 0 |
431 |
|
|
#endif |
432 |
|
|
|
433 |
root |
1.31 |
#ifndef CLOCK_REALTIME |
434 |
root |
1.40 |
# undef EV_USE_REALTIME |
435 |
root |
1.31 |
# define EV_USE_REALTIME 0 |
436 |
|
|
#endif |
437 |
root |
1.40 |
|
438 |
root |
1.152 |
#if !EV_STAT_ENABLE |
439 |
root |
1.185 |
# undef EV_USE_INOTIFY |
440 |
root |
1.152 |
# define EV_USE_INOTIFY 0 |
441 |
|
|
#endif |
442 |
|
|
|
443 |
root |
1.193 |
#if !EV_USE_NANOSLEEP |
444 |
root |
1.370 |
/* hp-ux has it in sys/time.h, which we unconditionally include above */ |
445 |
root |
1.416 |
# if !defined _WIN32 && !defined __hpux |
446 |
root |
1.193 |
# include <sys/select.h> |
447 |
|
|
# endif |
448 |
|
|
#endif |
449 |
|
|
|
450 |
root |
1.491 |
#if EV_USE_LINUXAIO |
451 |
|
|
# include <sys/syscall.h> |
452 |
root |
1.502 |
# if SYS_io_getevents && EV_USE_EPOLL /* linuxaio backend requires epoll backend */ |
453 |
|
|
# define EV_NEED_SYSCALL 1 |
454 |
|
|
# else |
455 |
root |
1.491 |
# undef EV_USE_LINUXAIO |
456 |
|
|
# define EV_USE_LINUXAIO 0 |
457 |
root |
1.501 |
# endif |
458 |
|
|
#endif |
459 |
|
|
|
460 |
|
|
#if EV_USE_IOURING |
461 |
|
|
# include <sys/syscall.h> |
462 |
root |
1.503 |
# if !SYS_io_uring_setup && __linux && !__alpha |
463 |
root |
1.501 |
# define SYS_io_uring_setup 425 |
464 |
|
|
# define SYS_io_uring_enter 426 |
465 |
|
|
# define SYS_io_uring_wregister 427 |
466 |
|
|
# endif |
467 |
root |
1.502 |
# if SYS_io_uring_setup && EV_USE_EPOLL /* iouring backend requires epoll backend */ |
468 |
root |
1.501 |
# define EV_NEED_SYSCALL 1 |
469 |
|
|
# else |
470 |
|
|
# undef EV_USE_IOURING |
471 |
|
|
# define EV_USE_IOURING 0 |
472 |
root |
1.491 |
# endif |
473 |
|
|
#endif |
474 |
|
|
|
475 |
root |
1.152 |
#if EV_USE_INOTIFY |
476 |
root |
1.273 |
# include <sys/statfs.h> |
477 |
root |
1.152 |
# include <sys/inotify.h> |
478 |
root |
1.263 |
/* some very old inotify.h headers don't have IN_DONT_FOLLOW */ |
479 |
|
|
# ifndef IN_DONT_FOLLOW |
480 |
|
|
# undef EV_USE_INOTIFY |
481 |
|
|
# define EV_USE_INOTIFY 0 |
482 |
|
|
# endif |
483 |
root |
1.152 |
#endif |
484 |
|
|
|
485 |
root |
1.220 |
#if EV_USE_EVENTFD |
486 |
|
|
/* our minimum requirement is glibc 2.7 which has the stub, but not the header */ |
487 |
root |
1.221 |
# include <stdint.h> |
488 |
root |
1.303 |
# ifndef EFD_NONBLOCK |
489 |
|
|
# define EFD_NONBLOCK O_NONBLOCK |
490 |
|
|
# endif |
491 |
|
|
# ifndef EFD_CLOEXEC |
492 |
root |
1.311 |
# ifdef O_CLOEXEC |
493 |
|
|
# define EFD_CLOEXEC O_CLOEXEC |
494 |
|
|
# else |
495 |
|
|
# define EFD_CLOEXEC 02000000 |
496 |
|
|
# endif |
497 |
root |
1.303 |
# endif |
498 |
root |
1.354 |
EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags); |
499 |
root |
1.220 |
#endif |
500 |
|
|
|
501 |
root |
1.303 |
#if EV_USE_SIGNALFD |
502 |
root |
1.314 |
/* our minimum requirement is glibc 2.7 which has the stub, but not the header */ |
503 |
|
|
# include <stdint.h> |
504 |
|
|
# ifndef SFD_NONBLOCK |
505 |
|
|
# define SFD_NONBLOCK O_NONBLOCK |
506 |
|
|
# endif |
507 |
|
|
# ifndef SFD_CLOEXEC |
508 |
|
|
# ifdef O_CLOEXEC |
509 |
|
|
# define SFD_CLOEXEC O_CLOEXEC |
510 |
|
|
# else |
511 |
|
|
# define SFD_CLOEXEC 02000000 |
512 |
|
|
# endif |
513 |
|
|
# endif |
514 |
root |
1.354 |
EV_CPP (extern "C") int signalfd (int fd, const sigset_t *mask, int flags); |
515 |
root |
1.314 |
|
516 |
|
|
struct signalfd_siginfo |
517 |
|
|
{ |
518 |
|
|
uint32_t ssi_signo; |
519 |
|
|
char pad[128 - sizeof (uint32_t)]; |
520 |
|
|
}; |
521 |
root |
1.303 |
#endif |
522 |
|
|
|
523 |
root |
1.501 |
/*****************************************************************************/ |
524 |
|
|
|
525 |
root |
1.250 |
#if EV_VERIFY >= 3 |
526 |
root |
1.340 |
# define EV_FREQUENT_CHECK ev_verify (EV_A) |
527 |
root |
1.248 |
#else |
528 |
|
|
# define EV_FREQUENT_CHECK do { } while (0) |
529 |
|
|
#endif |
530 |
|
|
|
531 |
root |
1.176 |
/* |
532 |
root |
1.373 |
* This is used to work around floating point rounding problems. |
533 |
root |
1.177 |
* This value is good at least till the year 4000. |
534 |
root |
1.176 |
*/ |
535 |
root |
1.373 |
#define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */ |
536 |
|
|
/*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */ |
537 |
root |
1.176 |
|
538 |
root |
1.502 |
#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */ |
539 |
|
|
#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */ |
540 |
|
|
|
541 |
|
|
/* find a portable timestamp that is "alawys" in the future but fits into time_t. |
542 |
|
|
* this is quite hard, and we are mostly guessing - we handle 32 bit signed/unsigned time_t, |
543 |
|
|
* and sizes large than 32 bit, but and maybe the unlikely loating point time_t */ |
544 |
|
|
#define EV_TSTAMP_HUGE \ |
545 |
|
|
(sizeof (time_t) >= 8 ? 10000000000000. \ |
546 |
|
|
: 0 < (time_t)4294967295 ? 4294967295. \ |
547 |
|
|
: 2147483647.) \ |
548 |
root |
1.1 |
|
549 |
root |
1.347 |
#define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0) |
550 |
root |
1.348 |
#define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0) |
551 |
root |
1.347 |
|
552 |
root |
1.391 |
/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */ |
553 |
|
|
/* ECB.H BEGIN */ |
554 |
|
|
/* |
555 |
|
|
* libecb - http://software.schmorp.de/pkg/libecb |
556 |
|
|
* |
557 |
root |
1.474 |
* Copyright (©) 2009-2015 Marc Alexander Lehmann <libecb@schmorp.de> |
558 |
root |
1.391 |
* Copyright (©) 2011 Emanuele Giaquinta |
559 |
|
|
* All rights reserved. |
560 |
|
|
* |
561 |
|
|
* Redistribution and use in source and binary forms, with or without modifica- |
562 |
|
|
* tion, are permitted provided that the following conditions are met: |
563 |
|
|
* |
564 |
|
|
* 1. Redistributions of source code must retain the above copyright notice, |
565 |
|
|
* this list of conditions and the following disclaimer. |
566 |
|
|
* |
567 |
|
|
* 2. Redistributions in binary form must reproduce the above copyright |
568 |
|
|
* notice, this list of conditions and the following disclaimer in the |
569 |
|
|
* documentation and/or other materials provided with the distribution. |
570 |
|
|
* |
571 |
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
572 |
|
|
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- |
573 |
|
|
* CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO |
574 |
|
|
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- |
575 |
|
|
* CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
576 |
|
|
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; |
577 |
|
|
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
578 |
|
|
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- |
579 |
|
|
* ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED |
580 |
|
|
* OF THE POSSIBILITY OF SUCH DAMAGE. |
581 |
root |
1.467 |
* |
582 |
|
|
* Alternatively, the contents of this file may be used under the terms of |
583 |
|
|
* the GNU General Public License ("GPL") version 2 or any later version, |
584 |
|
|
* in which case the provisions of the GPL are applicable instead of |
585 |
|
|
* the above. If you wish to allow the use of your version of this file |
586 |
|
|
* only under the terms of the GPL and not to allow others to use your |
587 |
|
|
* version of this file under the BSD license, indicate your decision |
588 |
|
|
* by deleting the provisions above and replace them with the notice |
589 |
|
|
* and other provisions required by the GPL. If you do not delete the |
590 |
|
|
* provisions above, a recipient may use your version of this file under |
591 |
|
|
* either the BSD or the GPL. |
592 |
root |
1.391 |
*/ |
593 |
|
|
|
594 |
|
|
#ifndef ECB_H |
595 |
|
|
#define ECB_H |
596 |
|
|
|
597 |
root |
1.437 |
/* 16 bits major, 16 bits minor */ |
598 |
root |
1.496 |
#define ECB_VERSION 0x00010006 |
599 |
root |
1.437 |
|
600 |
root |
1.391 |
#ifdef _WIN32 |
601 |
|
|
typedef signed char int8_t; |
602 |
|
|
typedef unsigned char uint8_t; |
603 |
|
|
typedef signed short int16_t; |
604 |
|
|
typedef unsigned short uint16_t; |
605 |
|
|
typedef signed int int32_t; |
606 |
|
|
typedef unsigned int uint32_t; |
607 |
|
|
#if __GNUC__ |
608 |
|
|
typedef signed long long int64_t; |
609 |
|
|
typedef unsigned long long uint64_t; |
610 |
|
|
#else /* _MSC_VER || __BORLANDC__ */ |
611 |
|
|
typedef signed __int64 int64_t; |
612 |
|
|
typedef unsigned __int64 uint64_t; |
613 |
|
|
#endif |
614 |
root |
1.437 |
#ifdef _WIN64 |
615 |
|
|
#define ECB_PTRSIZE 8 |
616 |
|
|
typedef uint64_t uintptr_t; |
617 |
|
|
typedef int64_t intptr_t; |
618 |
|
|
#else |
619 |
|
|
#define ECB_PTRSIZE 4 |
620 |
|
|
typedef uint32_t uintptr_t; |
621 |
|
|
typedef int32_t intptr_t; |
622 |
|
|
#endif |
623 |
root |
1.391 |
#else |
624 |
|
|
#include <inttypes.h> |
625 |
root |
1.479 |
#if (defined INTPTR_MAX ? INTPTR_MAX : ULONG_MAX) > 0xffffffffU |
626 |
root |
1.437 |
#define ECB_PTRSIZE 8 |
627 |
|
|
#else |
628 |
|
|
#define ECB_PTRSIZE 4 |
629 |
|
|
#endif |
630 |
root |
1.391 |
#endif |
631 |
root |
1.379 |
|
632 |
sf-exg |
1.475 |
#define ECB_GCC_AMD64 (__amd64 || __amd64__ || __x86_64 || __x86_64__) |
633 |
|
|
#define ECB_MSVC_AMD64 (_M_AMD64 || _M_X64) |
634 |
|
|
|
635 |
root |
1.454 |
/* work around x32 idiocy by defining proper macros */ |
636 |
sf-exg |
1.475 |
#if ECB_GCC_AMD64 || ECB_MSVC_AMD64 |
637 |
root |
1.458 |
#if _ILP32 |
638 |
root |
1.454 |
#define ECB_AMD64_X32 1 |
639 |
|
|
#else |
640 |
|
|
#define ECB_AMD64 1 |
641 |
|
|
#endif |
642 |
|
|
#endif |
643 |
|
|
|
644 |
root |
1.379 |
/* many compilers define _GNUC_ to some versions but then only implement |
645 |
|
|
* what their idiot authors think are the "more important" extensions, |
646 |
root |
1.391 |
* causing enormous grief in return for some better fake benchmark numbers. |
647 |
root |
1.379 |
* or so. |
648 |
|
|
* we try to detect these and simply assume they are not gcc - if they have |
649 |
|
|
* an issue with that they should have done it right in the first place. |
650 |
|
|
*/ |
651 |
root |
1.474 |
#if !defined __GNUC_MINOR__ || defined __INTEL_COMPILER || defined __SUNPRO_C || defined __SUNPRO_CC || defined __llvm__ || defined __clang__ |
652 |
|
|
#define ECB_GCC_VERSION(major,minor) 0 |
653 |
|
|
#else |
654 |
|
|
#define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) |
655 |
|
|
#endif |
656 |
|
|
|
657 |
|
|
#define ECB_CLANG_VERSION(major,minor) (__clang_major__ > (major) || (__clang_major__ == (major) && __clang_minor__ >= (minor))) |
658 |
|
|
|
659 |
|
|
#if __clang__ && defined __has_builtin |
660 |
|
|
#define ECB_CLANG_BUILTIN(x) __has_builtin (x) |
661 |
|
|
#else |
662 |
|
|
#define ECB_CLANG_BUILTIN(x) 0 |
663 |
|
|
#endif |
664 |
|
|
|
665 |
|
|
#if __clang__ && defined __has_extension |
666 |
|
|
#define ECB_CLANG_EXTENSION(x) __has_extension (x) |
667 |
|
|
#else |
668 |
|
|
#define ECB_CLANG_EXTENSION(x) 0 |
669 |
root |
1.379 |
#endif |
670 |
|
|
|
671 |
root |
1.437 |
#define ECB_CPP (__cplusplus+0) |
672 |
|
|
#define ECB_CPP11 (__cplusplus >= 201103L) |
673 |
root |
1.488 |
#define ECB_CPP14 (__cplusplus >= 201402L) |
674 |
|
|
#define ECB_CPP17 (__cplusplus >= 201703L) |
675 |
root |
1.437 |
|
676 |
root |
1.450 |
#if ECB_CPP |
677 |
root |
1.464 |
#define ECB_C 0 |
678 |
|
|
#define ECB_STDC_VERSION 0 |
679 |
|
|
#else |
680 |
|
|
#define ECB_C 1 |
681 |
|
|
#define ECB_STDC_VERSION __STDC_VERSION__ |
682 |
|
|
#endif |
683 |
|
|
|
684 |
|
|
#define ECB_C99 (ECB_STDC_VERSION >= 199901L) |
685 |
|
|
#define ECB_C11 (ECB_STDC_VERSION >= 201112L) |
686 |
root |
1.488 |
#define ECB_C17 (ECB_STDC_VERSION >= 201710L) |
687 |
root |
1.464 |
|
688 |
|
|
#if ECB_CPP |
689 |
root |
1.450 |
#define ECB_EXTERN_C extern "C" |
690 |
|
|
#define ECB_EXTERN_C_BEG ECB_EXTERN_C { |
691 |
|
|
#define ECB_EXTERN_C_END } |
692 |
|
|
#else |
693 |
|
|
#define ECB_EXTERN_C extern |
694 |
|
|
#define ECB_EXTERN_C_BEG |
695 |
|
|
#define ECB_EXTERN_C_END |
696 |
|
|
#endif |
697 |
|
|
|
698 |
root |
1.391 |
/*****************************************************************************/ |
699 |
|
|
|
700 |
|
|
/* ECB_NO_THREADS - ecb is not used by multiple threads, ever */ |
701 |
|
|
/* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */ |
702 |
|
|
|
703 |
root |
1.410 |
#if ECB_NO_THREADS |
704 |
root |
1.439 |
#define ECB_NO_SMP 1 |
705 |
root |
1.410 |
#endif |
706 |
|
|
|
707 |
root |
1.437 |
#if ECB_NO_SMP |
708 |
root |
1.393 |
#define ECB_MEMORY_FENCE do { } while (0) |
709 |
root |
1.40 |
#endif |
710 |
|
|
|
711 |
root |
1.476 |
/* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/compiler_ref/compiler_builtins.html */ |
712 |
|
|
#if __xlC__ && ECB_CPP |
713 |
|
|
#include <builtins.h> |
714 |
|
|
#endif |
715 |
|
|
|
716 |
root |
1.479 |
#if 1400 <= _MSC_VER |
717 |
|
|
#include <intrin.h> /* fence functions _ReadBarrier, also bit search functions _BitScanReverse */ |
718 |
|
|
#endif |
719 |
|
|
|
720 |
root |
1.383 |
#ifndef ECB_MEMORY_FENCE |
721 |
root |
1.417 |
#if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 |
722 |
root |
1.496 |
#define ECB_MEMORY_FENCE_RELAXED __asm__ __volatile__ ("" : : : "memory") |
723 |
root |
1.404 |
#if __i386 || __i386__ |
724 |
root |
1.383 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") |
725 |
root |
1.437 |
#define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") |
726 |
root |
1.488 |
#define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory") |
727 |
sf-exg |
1.475 |
#elif ECB_GCC_AMD64 |
728 |
root |
1.437 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") |
729 |
|
|
#define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") |
730 |
root |
1.488 |
#define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("" : : : "memory") |
731 |
root |
1.392 |
#elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ |
732 |
root |
1.437 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") |
733 |
root |
1.479 |
#elif defined __ARM_ARCH_2__ \ |
734 |
|
|
|| defined __ARM_ARCH_3__ || defined __ARM_ARCH_3M__ \ |
735 |
|
|
|| defined __ARM_ARCH_4__ || defined __ARM_ARCH_4T__ \ |
736 |
|
|
|| defined __ARM_ARCH_5__ || defined __ARM_ARCH_5E__ \ |
737 |
|
|
|| defined __ARM_ARCH_5T__ || defined __ARM_ARCH_5TE__ \ |
738 |
|
|
|| defined __ARM_ARCH_5TEJ__ |
739 |
|
|
/* should not need any, unless running old code on newer cpu - arm doesn't support that */ |
740 |
root |
1.417 |
#elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \ |
741 |
root |
1.479 |
|| defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ \ |
742 |
|
|
|| defined __ARM_ARCH_6T2__ |
743 |
root |
1.415 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") |
744 |
root |
1.417 |
#elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \ |
745 |
root |
1.479 |
|| defined __ARM_ARCH_7R__ || defined __ARM_ARCH_7M__ |
746 |
root |
1.437 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") |
747 |
root |
1.464 |
#elif __aarch64__ |
748 |
|
|
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory") |
749 |
root |
1.477 |
#elif (__sparc || __sparc__) && !(__sparc_v8__ || defined __sparcv8) |
750 |
root |
1.437 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory") |
751 |
|
|
#define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory") |
752 |
|
|
#define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore") |
753 |
root |
1.417 |
#elif defined __s390__ || defined __s390x__ |
754 |
root |
1.408 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory") |
755 |
root |
1.417 |
#elif defined __mips__ |
756 |
root |
1.458 |
/* GNU/Linux emulates sync on mips1 architectures, so we force its use */ |
757 |
root |
1.456 |
/* anybody else who still uses mips1 is supposed to send in their version, with detection code. */ |
758 |
|
|
#define ECB_MEMORY_FENCE __asm__ __volatile__ (".set mips2; sync; .set mips0" : : : "memory") |
759 |
root |
1.419 |
#elif defined __alpha__ |
760 |
root |
1.437 |
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory") |
761 |
|
|
#elif defined __hppa__ |
762 |
|
|
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory") |
763 |
|
|
#define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") |
764 |
|
|
#elif defined __ia64__ |
765 |
|
|
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory") |
766 |
root |
1.457 |
#elif defined __m68k__ |
767 |
|
|
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory") |
768 |
|
|
#elif defined __m88k__ |
769 |
|
|
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("tb1 0,%%r0,128" : : : "memory") |
770 |
|
|
#elif defined __sh__ |
771 |
|
|
#define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory") |
772 |
root |
1.383 |
#endif |
773 |
|
|
#endif |
774 |
|
|
#endif |
775 |
|
|
|
776 |
|
|
#ifndef ECB_MEMORY_FENCE |
777 |
root |
1.437 |
#if ECB_GCC_VERSION(4,7) |
778 |
root |
1.442 |
/* see comment below (stdatomic.h) about the C11 memory model. */ |
779 |
root |
1.437 |
#define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST) |
780 |
root |
1.464 |
#define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE) |
781 |
|
|
#define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE) |
782 |
root |
1.496 |
#define ECB_MEMORY_FENCE_RELAXED __atomic_thread_fence (__ATOMIC_RELAXED) |
783 |
root |
1.450 |
|
784 |
root |
1.474 |
#elif ECB_CLANG_EXTENSION(c_atomic) |
785 |
|
|
/* see comment below (stdatomic.h) about the C11 memory model. */ |
786 |
|
|
#define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST) |
787 |
|
|
#define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE) |
788 |
|
|
#define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE) |
789 |
root |
1.496 |
#define ECB_MEMORY_FENCE_RELAXED __c11_atomic_thread_fence (__ATOMIC_RELAXED) |
790 |
root |
1.450 |
|
791 |
root |
1.437 |
#elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__ |
792 |
root |
1.383 |
#define ECB_MEMORY_FENCE __sync_synchronize () |
793 |
root |
1.462 |
#elif _MSC_VER >= 1500 /* VC++ 2008 */ |
794 |
|
|
/* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */ |
795 |
|
|
#pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) |
796 |
|
|
#define ECB_MEMORY_FENCE _ReadWriteBarrier (); MemoryBarrier() |
797 |
|
|
#define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier (); MemoryBarrier() /* according to msdn, _ReadBarrier is not a load fence */ |
798 |
|
|
#define ECB_MEMORY_FENCE_RELEASE _WriteBarrier (); MemoryBarrier() |
799 |
root |
1.389 |
#elif _MSC_VER >= 1400 /* VC++ 2005 */ |
800 |
|
|
#pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) |
801 |
|
|
#define ECB_MEMORY_FENCE _ReadWriteBarrier () |
802 |
|
|
#define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */ |
803 |
|
|
#define ECB_MEMORY_FENCE_RELEASE _WriteBarrier () |
804 |
root |
1.417 |
#elif defined _WIN32 |
805 |
root |
1.388 |
#include <WinNT.h> |
806 |
root |
1.391 |
#define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */ |
807 |
root |
1.403 |
#elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 |
808 |
|
|
#include <mbarrier.h> |
809 |
root |
1.496 |
#define ECB_MEMORY_FENCE __machine_rw_barrier () |
810 |
|
|
#define ECB_MEMORY_FENCE_ACQUIRE __machine_acq_barrier () |
811 |
|
|
#define ECB_MEMORY_FENCE_RELEASE __machine_rel_barrier () |
812 |
|
|
#define ECB_MEMORY_FENCE_RELAXED __compiler_barrier () |
813 |
root |
1.413 |
#elif __xlC__ |
814 |
root |
1.414 |
#define ECB_MEMORY_FENCE __sync () |
815 |
root |
1.383 |
#endif |
816 |
|
|
#endif |
817 |
|
|
|
818 |
|
|
#ifndef ECB_MEMORY_FENCE |
819 |
root |
1.437 |
#if ECB_C11 && !defined __STDC_NO_ATOMICS__ |
820 |
|
|
/* we assume that these memory fences work on all variables/all memory accesses, */ |
821 |
|
|
/* not just C11 atomics and atomic accesses */ |
822 |
|
|
#include <stdatomic.h> |
823 |
|
|
#define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst) |
824 |
root |
1.496 |
#define ECB_MEMORY_FENCE_ACQUIRE atomic_thread_fence (memory_order_acquire) |
825 |
|
|
#define ECB_MEMORY_FENCE_RELEASE atomic_thread_fence (memory_order_release) |
826 |
root |
1.437 |
#endif |
827 |
|
|
#endif |
828 |
|
|
|
829 |
|
|
#ifndef ECB_MEMORY_FENCE |
830 |
root |
1.392 |
#if !ECB_AVOID_PTHREADS |
831 |
|
|
/* |
832 |
|
|
* if you get undefined symbol references to pthread_mutex_lock, |
833 |
|
|
* or failure to find pthread.h, then you should implement |
834 |
|
|
* the ECB_MEMORY_FENCE operations for your cpu/compiler |
835 |
|
|
* OR provide pthread.h and link against the posix thread library |
836 |
|
|
* of your system. |
837 |
|
|
*/ |
838 |
|
|
#include <pthread.h> |
839 |
|
|
#define ECB_NEEDS_PTHREADS 1 |
840 |
|
|
#define ECB_MEMORY_FENCE_NEEDS_PTHREADS 1 |
841 |
|
|
|
842 |
|
|
static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER; |
843 |
|
|
#define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0) |
844 |
|
|
#endif |
845 |
|
|
#endif |
846 |
root |
1.383 |
|
847 |
root |
1.417 |
#if !defined ECB_MEMORY_FENCE_ACQUIRE && defined ECB_MEMORY_FENCE |
848 |
root |
1.383 |
#define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE |
849 |
root |
1.392 |
#endif |
850 |
|
|
|
851 |
root |
1.417 |
#if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE |
852 |
root |
1.383 |
#define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE |
853 |
|
|
#endif |
854 |
|
|
|
855 |
root |
1.496 |
#if !defined ECB_MEMORY_FENCE_RELAXED && defined ECB_MEMORY_FENCE |
856 |
|
|
#define ECB_MEMORY_FENCE_RELAXED ECB_MEMORY_FENCE /* very heavy-handed */ |
857 |
|
|
#endif |
858 |
|
|
|
859 |
root |
1.391 |
/*****************************************************************************/ |
860 |
|
|
|
861 |
root |
1.474 |
#if ECB_CPP |
862 |
root |
1.391 |
#define ecb_inline static inline |
863 |
|
|
#elif ECB_GCC_VERSION(2,5) |
864 |
|
|
#define ecb_inline static __inline__ |
865 |
|
|
#elif ECB_C99 |
866 |
|
|
#define ecb_inline static inline |
867 |
|
|
#else |
868 |
|
|
#define ecb_inline static |
869 |
|
|
#endif |
870 |
|
|
|
871 |
|
|
#if ECB_GCC_VERSION(3,3) |
872 |
|
|
#define ecb_restrict __restrict__ |
873 |
|
|
#elif ECB_C99 |
874 |
|
|
#define ecb_restrict restrict |
875 |
|
|
#else |
876 |
|
|
#define ecb_restrict |
877 |
|
|
#endif |
878 |
|
|
|
879 |
|
|
typedef int ecb_bool; |
880 |
|
|
|
881 |
|
|
#define ECB_CONCAT_(a, b) a ## b |
882 |
|
|
#define ECB_CONCAT(a, b) ECB_CONCAT_(a, b) |
883 |
|
|
#define ECB_STRINGIFY_(a) # a |
884 |
|
|
#define ECB_STRINGIFY(a) ECB_STRINGIFY_(a) |
885 |
sf-exg |
1.475 |
#define ECB_STRINGIFY_EXPR(expr) ((expr), ECB_STRINGIFY_ (expr)) |
886 |
root |
1.391 |
|
887 |
|
|
#define ecb_function_ ecb_inline |
888 |
|
|
|
889 |
root |
1.474 |
#if ECB_GCC_VERSION(3,1) || ECB_CLANG_VERSION(2,8) |
890 |
|
|
#define ecb_attribute(attrlist) __attribute__ (attrlist) |
891 |
root |
1.379 |
#else |
892 |
|
|
#define ecb_attribute(attrlist) |
893 |
root |
1.474 |
#endif |
894 |
root |
1.464 |
|
895 |
root |
1.474 |
#if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_constant_p) |
896 |
|
|
#define ecb_is_constant(expr) __builtin_constant_p (expr) |
897 |
|
|
#else |
898 |
root |
1.464 |
/* possible C11 impl for integral types |
899 |
|
|
typedef struct ecb_is_constant_struct ecb_is_constant_struct; |
900 |
|
|
#define ecb_is_constant(expr) _Generic ((1 ? (struct ecb_is_constant_struct *)0 : (void *)((expr) - (expr)), ecb_is_constant_struct *: 0, default: 1)) */ |
901 |
|
|
|
902 |
root |
1.379 |
#define ecb_is_constant(expr) 0 |
903 |
root |
1.474 |
#endif |
904 |
|
|
|
905 |
|
|
#if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_expect) |
906 |
|
|
#define ecb_expect(expr,value) __builtin_expect ((expr),(value)) |
907 |
|
|
#else |
908 |
root |
1.379 |
#define ecb_expect(expr,value) (expr) |
909 |
root |
1.474 |
#endif |
910 |
|
|
|
911 |
|
|
#if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_prefetch) |
912 |
|
|
#define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality) |
913 |
|
|
#else |
914 |
root |
1.379 |
#define ecb_prefetch(addr,rw,locality) |
915 |
|
|
#endif |
916 |
|
|
|
917 |
root |
1.391 |
/* no emulation for ecb_decltype */ |
918 |
root |
1.474 |
#if ECB_CPP11 |
919 |
|
|
// older implementations might have problems with decltype(x)::type, work around it |
920 |
|
|
template<class T> struct ecb_decltype_t { typedef T type; }; |
921 |
|
|
#define ecb_decltype(x) ecb_decltype_t<decltype (x)>::type |
922 |
|
|
#elif ECB_GCC_VERSION(3,0) || ECB_CLANG_VERSION(2,8) |
923 |
|
|
#define ecb_decltype(x) __typeof__ (x) |
924 |
root |
1.391 |
#endif |
925 |
|
|
|
926 |
root |
1.468 |
#if _MSC_VER >= 1300 |
927 |
root |
1.474 |
#define ecb_deprecated __declspec (deprecated) |
928 |
root |
1.468 |
#else |
929 |
|
|
#define ecb_deprecated ecb_attribute ((__deprecated__)) |
930 |
|
|
#endif |
931 |
|
|
|
932 |
root |
1.476 |
#if _MSC_VER >= 1500 |
933 |
sf-exg |
1.475 |
#define ecb_deprecated_message(msg) __declspec (deprecated (msg)) |
934 |
|
|
#elif ECB_GCC_VERSION(4,5) |
935 |
|
|
#define ecb_deprecated_message(msg) ecb_attribute ((__deprecated__ (msg)) |
936 |
|
|
#else |
937 |
|
|
#define ecb_deprecated_message(msg) ecb_deprecated |
938 |
|
|
#endif |
939 |
|
|
|
940 |
|
|
#if _MSC_VER >= 1400 |
941 |
|
|
#define ecb_noinline __declspec (noinline) |
942 |
|
|
#else |
943 |
|
|
#define ecb_noinline ecb_attribute ((__noinline__)) |
944 |
|
|
#endif |
945 |
|
|
|
946 |
root |
1.379 |
#define ecb_unused ecb_attribute ((__unused__)) |
947 |
|
|
#define ecb_const ecb_attribute ((__const__)) |
948 |
|
|
#define ecb_pure ecb_attribute ((__pure__)) |
949 |
|
|
|
950 |
root |
1.474 |
#if ECB_C11 || __IBMC_NORETURN |
951 |
root |
1.476 |
/* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/language_ref/noreturn.html */ |
952 |
root |
1.437 |
#define ecb_noreturn _Noreturn |
953 |
sf-exg |
1.475 |
#elif ECB_CPP11 |
954 |
|
|
#define ecb_noreturn [[noreturn]] |
955 |
|
|
#elif _MSC_VER >= 1200 |
956 |
|
|
/* http://msdn.microsoft.com/en-us/library/k6ktzx3s.aspx */ |
957 |
|
|
#define ecb_noreturn __declspec (noreturn) |
958 |
root |
1.437 |
#else |
959 |
|
|
#define ecb_noreturn ecb_attribute ((__noreturn__)) |
960 |
|
|
#endif |
961 |
|
|
|
962 |
root |
1.379 |
#if ECB_GCC_VERSION(4,3) |
963 |
|
|
#define ecb_artificial ecb_attribute ((__artificial__)) |
964 |
|
|
#define ecb_hot ecb_attribute ((__hot__)) |
965 |
|
|
#define ecb_cold ecb_attribute ((__cold__)) |
966 |
|
|
#else |
967 |
|
|
#define ecb_artificial |
968 |
|
|
#define ecb_hot |
969 |
|
|
#define ecb_cold |
970 |
|
|
#endif |
971 |
|
|
|
972 |
|
|
/* put around conditional expressions if you are very sure that the */ |
973 |
|
|
/* expression is mostly true or mostly false. note that these return */ |
974 |
|
|
/* booleans, not the expression. */ |
975 |
|
|
#define ecb_expect_false(expr) ecb_expect (!!(expr), 0) |
976 |
|
|
#define ecb_expect_true(expr) ecb_expect (!!(expr), 1) |
977 |
root |
1.391 |
/* for compatibility to the rest of the world */ |
978 |
|
|
#define ecb_likely(expr) ecb_expect_true (expr) |
979 |
|
|
#define ecb_unlikely(expr) ecb_expect_false (expr) |
980 |
|
|
|
981 |
|
|
/* count trailing zero bits and count # of one bits */ |
982 |
root |
1.474 |
#if ECB_GCC_VERSION(3,4) \ |
983 |
|
|
|| (ECB_CLANG_BUILTIN(__builtin_clz) && ECB_CLANG_BUILTIN(__builtin_clzll) \ |
984 |
|
|
&& ECB_CLANG_BUILTIN(__builtin_ctz) && ECB_CLANG_BUILTIN(__builtin_ctzll) \ |
985 |
|
|
&& ECB_CLANG_BUILTIN(__builtin_popcount)) |
986 |
root |
1.391 |
/* we assume int == 32 bit, long == 32 or 64 bit and long long == 64 bit */ |
987 |
|
|
#define ecb_ld32(x) (__builtin_clz (x) ^ 31) |
988 |
|
|
#define ecb_ld64(x) (__builtin_clzll (x) ^ 63) |
989 |
|
|
#define ecb_ctz32(x) __builtin_ctz (x) |
990 |
|
|
#define ecb_ctz64(x) __builtin_ctzll (x) |
991 |
|
|
#define ecb_popcount32(x) __builtin_popcount (x) |
992 |
|
|
/* no popcountll */ |
993 |
|
|
#else |
994 |
root |
1.474 |
ecb_function_ ecb_const int ecb_ctz32 (uint32_t x); |
995 |
|
|
ecb_function_ ecb_const int |
996 |
root |
1.391 |
ecb_ctz32 (uint32_t x) |
997 |
|
|
{ |
998 |
root |
1.479 |
#if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM) |
999 |
|
|
unsigned long r; |
1000 |
|
|
_BitScanForward (&r, x); |
1001 |
|
|
return (int)r; |
1002 |
|
|
#else |
1003 |
root |
1.391 |
int r = 0; |
1004 |
|
|
|
1005 |
|
|
x &= ~x + 1; /* this isolates the lowest bit */ |
1006 |
|
|
|
1007 |
|
|
#if ECB_branchless_on_i386 |
1008 |
|
|
r += !!(x & 0xaaaaaaaa) << 0; |
1009 |
|
|
r += !!(x & 0xcccccccc) << 1; |
1010 |
|
|
r += !!(x & 0xf0f0f0f0) << 2; |
1011 |
|
|
r += !!(x & 0xff00ff00) << 3; |
1012 |
|
|
r += !!(x & 0xffff0000) << 4; |
1013 |
|
|
#else |
1014 |
|
|
if (x & 0xaaaaaaaa) r += 1; |
1015 |
|
|
if (x & 0xcccccccc) r += 2; |
1016 |
|
|
if (x & 0xf0f0f0f0) r += 4; |
1017 |
|
|
if (x & 0xff00ff00) r += 8; |
1018 |
|
|
if (x & 0xffff0000) r += 16; |
1019 |
|
|
#endif |
1020 |
|
|
|
1021 |
|
|
return r; |
1022 |
root |
1.479 |
#endif |
1023 |
root |
1.391 |
} |
1024 |
|
|
|
1025 |
root |
1.474 |
ecb_function_ ecb_const int ecb_ctz64 (uint64_t x); |
1026 |
|
|
ecb_function_ ecb_const int |
1027 |
root |
1.391 |
ecb_ctz64 (uint64_t x) |
1028 |
|
|
{ |
1029 |
root |
1.479 |
#if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM) |
1030 |
|
|
unsigned long r; |
1031 |
|
|
_BitScanForward64 (&r, x); |
1032 |
|
|
return (int)r; |
1033 |
|
|
#else |
1034 |
|
|
int shift = x & 0xffffffff ? 0 : 32; |
1035 |
root |
1.391 |
return ecb_ctz32 (x >> shift) + shift; |
1036 |
root |
1.479 |
#endif |
1037 |
root |
1.391 |
} |
1038 |
|
|
|
1039 |
root |
1.474 |
ecb_function_ ecb_const int ecb_popcount32 (uint32_t x); |
1040 |
|
|
ecb_function_ ecb_const int |
1041 |
root |
1.391 |
ecb_popcount32 (uint32_t x) |
1042 |
|
|
{ |
1043 |
|
|
x -= (x >> 1) & 0x55555555; |
1044 |
|
|
x = ((x >> 2) & 0x33333333) + (x & 0x33333333); |
1045 |
|
|
x = ((x >> 4) + x) & 0x0f0f0f0f; |
1046 |
|
|
x *= 0x01010101; |
1047 |
|
|
|
1048 |
|
|
return x >> 24; |
1049 |
|
|
} |
1050 |
|
|
|
1051 |
root |
1.474 |
ecb_function_ ecb_const int ecb_ld32 (uint32_t x); |
1052 |
|
|
ecb_function_ ecb_const int ecb_ld32 (uint32_t x) |
1053 |
root |
1.391 |
{ |
1054 |
root |
1.479 |
#if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM) |
1055 |
|
|
unsigned long r; |
1056 |
|
|
_BitScanReverse (&r, x); |
1057 |
|
|
return (int)r; |
1058 |
|
|
#else |
1059 |
root |
1.391 |
int r = 0; |
1060 |
|
|
|
1061 |
|
|
if (x >> 16) { x >>= 16; r += 16; } |
1062 |
|
|
if (x >> 8) { x >>= 8; r += 8; } |
1063 |
|
|
if (x >> 4) { x >>= 4; r += 4; } |
1064 |
|
|
if (x >> 2) { x >>= 2; r += 2; } |
1065 |
|
|
if (x >> 1) { r += 1; } |
1066 |
|
|
|
1067 |
|
|
return r; |
1068 |
root |
1.479 |
#endif |
1069 |
root |
1.391 |
} |
1070 |
|
|
|
1071 |
root |
1.474 |
ecb_function_ ecb_const int ecb_ld64 (uint64_t x); |
1072 |
|
|
ecb_function_ ecb_const int ecb_ld64 (uint64_t x) |
1073 |
root |
1.391 |
{ |
1074 |
root |
1.479 |
#if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM) |
1075 |
|
|
unsigned long r; |
1076 |
|
|
_BitScanReverse64 (&r, x); |
1077 |
|
|
return (int)r; |
1078 |
|
|
#else |
1079 |
root |
1.391 |
int r = 0; |
1080 |
|
|
|
1081 |
|
|
if (x >> 32) { x >>= 32; r += 32; } |
1082 |
|
|
|
1083 |
|
|
return r + ecb_ld32 (x); |
1084 |
root |
1.479 |
#endif |
1085 |
root |
1.391 |
} |
1086 |
|
|
#endif |
1087 |
|
|
|
1088 |
root |
1.474 |
ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x); |
1089 |
|
|
ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); } |
1090 |
|
|
ecb_function_ ecb_const ecb_bool ecb_is_pot64 (uint64_t x); |
1091 |
|
|
ecb_function_ ecb_const ecb_bool ecb_is_pot64 (uint64_t x) { return !(x & (x - 1)); } |
1092 |
root |
1.437 |
|
1093 |
root |
1.474 |
ecb_function_ ecb_const uint8_t ecb_bitrev8 (uint8_t x); |
1094 |
|
|
ecb_function_ ecb_const uint8_t ecb_bitrev8 (uint8_t x) |
1095 |
root |
1.403 |
{ |
1096 |
|
|
return ( (x * 0x0802U & 0x22110U) |
1097 |
root |
1.474 |
| (x * 0x8020U & 0x88440U)) * 0x10101U >> 16; |
1098 |
root |
1.403 |
} |
1099 |
|
|
|
1100 |
root |
1.474 |
ecb_function_ ecb_const uint16_t ecb_bitrev16 (uint16_t x); |
1101 |
|
|
ecb_function_ ecb_const uint16_t ecb_bitrev16 (uint16_t x) |
1102 |
root |
1.403 |
{ |
1103 |
|
|
x = ((x >> 1) & 0x5555) | ((x & 0x5555) << 1); |
1104 |
|
|
x = ((x >> 2) & 0x3333) | ((x & 0x3333) << 2); |
1105 |
|
|
x = ((x >> 4) & 0x0f0f) | ((x & 0x0f0f) << 4); |
1106 |
|
|
x = ( x >> 8 ) | ( x << 8); |
1107 |
|
|
|
1108 |
|
|
return x; |
1109 |
|
|
} |
1110 |
|
|
|
1111 |
root |
1.474 |
ecb_function_ ecb_const uint32_t ecb_bitrev32 (uint32_t x); |
1112 |
|
|
ecb_function_ ecb_const uint32_t ecb_bitrev32 (uint32_t x) |
1113 |
root |
1.403 |
{ |
1114 |
|
|
x = ((x >> 1) & 0x55555555) | ((x & 0x55555555) << 1); |
1115 |
|
|
x = ((x >> 2) & 0x33333333) | ((x & 0x33333333) << 2); |
1116 |
|
|
x = ((x >> 4) & 0x0f0f0f0f) | ((x & 0x0f0f0f0f) << 4); |
1117 |
|
|
x = ((x >> 8) & 0x00ff00ff) | ((x & 0x00ff00ff) << 8); |
1118 |
|
|
x = ( x >> 16 ) | ( x << 16); |
1119 |
|
|
|
1120 |
|
|
return x; |
1121 |
|
|
} |
1122 |
|
|
|
1123 |
root |
1.391 |
/* popcount64 is only available on 64 bit cpus as gcc builtin */ |
1124 |
|
|
/* so for this version we are lazy */ |
1125 |
root |
1.474 |
ecb_function_ ecb_const int ecb_popcount64 (uint64_t x); |
1126 |
|
|
ecb_function_ ecb_const int |
1127 |
root |
1.391 |
ecb_popcount64 (uint64_t x) |
1128 |
|
|
{ |
1129 |
|
|
return ecb_popcount32 (x) + ecb_popcount32 (x >> 32); |
1130 |
|
|
} |
1131 |
|
|
|
1132 |
root |
1.474 |
ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count); |
1133 |
|
|
ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count); |
1134 |
|
|
ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count); |
1135 |
|
|
ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count); |
1136 |
|
|
ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count); |
1137 |
|
|
ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count); |
1138 |
|
|
ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count); |
1139 |
|
|
ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count); |
1140 |
|
|
|
1141 |
|
|
ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> ( 8 - count)) | (x << count); } |
1142 |
|
|
ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << ( 8 - count)) | (x >> count); } |
1143 |
|
|
ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (16 - count)) | (x << count); } |
1144 |
|
|
ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (16 - count)) | (x >> count); } |
1145 |
|
|
ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); } |
1146 |
|
|
ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); } |
1147 |
|
|
ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); } |
1148 |
|
|
ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); } |
1149 |
root |
1.391 |
|
1150 |
root |
1.474 |
#if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64)) |
1151 |
root |
1.476 |
#if ECB_GCC_VERSION(4,8) || ECB_CLANG_BUILTIN(__builtin_bswap16) |
1152 |
|
|
#define ecb_bswap16(x) __builtin_bswap16 (x) |
1153 |
|
|
#else |
1154 |
root |
1.391 |
#define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16) |
1155 |
root |
1.476 |
#endif |
1156 |
root |
1.391 |
#define ecb_bswap32(x) __builtin_bswap32 (x) |
1157 |
|
|
#define ecb_bswap64(x) __builtin_bswap64 (x) |
1158 |
root |
1.476 |
#elif _MSC_VER |
1159 |
|
|
#include <stdlib.h> |
1160 |
|
|
#define ecb_bswap16(x) ((uint16_t)_byteswap_ushort ((uint16_t)(x))) |
1161 |
|
|
#define ecb_bswap32(x) ((uint32_t)_byteswap_ulong ((uint32_t)(x))) |
1162 |
|
|
#define ecb_bswap64(x) ((uint64_t)_byteswap_uint64 ((uint64_t)(x))) |
1163 |
root |
1.391 |
#else |
1164 |
root |
1.474 |
ecb_function_ ecb_const uint16_t ecb_bswap16 (uint16_t x); |
1165 |
|
|
ecb_function_ ecb_const uint16_t |
1166 |
root |
1.391 |
ecb_bswap16 (uint16_t x) |
1167 |
|
|
{ |
1168 |
|
|
return ecb_rotl16 (x, 8); |
1169 |
|
|
} |
1170 |
|
|
|
1171 |
root |
1.474 |
ecb_function_ ecb_const uint32_t ecb_bswap32 (uint32_t x); |
1172 |
|
|
ecb_function_ ecb_const uint32_t |
1173 |
root |
1.391 |
ecb_bswap32 (uint32_t x) |
1174 |
|
|
{ |
1175 |
|
|
return (((uint32_t)ecb_bswap16 (x)) << 16) | ecb_bswap16 (x >> 16); |
1176 |
|
|
} |
1177 |
|
|
|
1178 |
root |
1.474 |
ecb_function_ ecb_const uint64_t ecb_bswap64 (uint64_t x); |
1179 |
|
|
ecb_function_ ecb_const uint64_t |
1180 |
root |
1.391 |
ecb_bswap64 (uint64_t x) |
1181 |
|
|
{ |
1182 |
|
|
return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32); |
1183 |
|
|
} |
1184 |
|
|
#endif |
1185 |
|
|
|
1186 |
root |
1.474 |
#if ECB_GCC_VERSION(4,5) || ECB_CLANG_BUILTIN(__builtin_unreachable) |
1187 |
root |
1.391 |
#define ecb_unreachable() __builtin_unreachable () |
1188 |
|
|
#else |
1189 |
|
|
/* this seems to work fine, but gcc always emits a warning for it :/ */ |
1190 |
root |
1.474 |
ecb_inline ecb_noreturn void ecb_unreachable (void); |
1191 |
|
|
ecb_inline ecb_noreturn void ecb_unreachable (void) { } |
1192 |
root |
1.391 |
#endif |
1193 |
|
|
|
1194 |
|
|
/* try to tell the compiler that some condition is definitely true */ |
1195 |
root |
1.450 |
#define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0 |
1196 |
root |
1.391 |
|
1197 |
root |
1.479 |
ecb_inline ecb_const uint32_t ecb_byteorder_helper (void); |
1198 |
|
|
ecb_inline ecb_const uint32_t |
1199 |
root |
1.391 |
ecb_byteorder_helper (void) |
1200 |
|
|
{ |
1201 |
root |
1.450 |
/* the union code still generates code under pressure in gcc, */ |
1202 |
|
|
/* but less than using pointers, and always seems to */ |
1203 |
|
|
/* successfully return a constant. */ |
1204 |
|
|
/* the reason why we have this horrible preprocessor mess */ |
1205 |
|
|
/* is to avoid it in all cases, at least on common architectures */ |
1206 |
|
|
/* or when using a recent enough gcc version (>= 4.6) */ |
1207 |
root |
1.479 |
#if (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \ |
1208 |
|
|
|| ((__i386 || __i386__ || _M_IX86 || ECB_GCC_AMD64 || ECB_MSVC_AMD64) && !__VOS__) |
1209 |
|
|
#define ECB_LITTLE_ENDIAN 1 |
1210 |
|
|
return 0x44332211; |
1211 |
|
|
#elif (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) \ |
1212 |
|
|
|| ((__AARCH64EB__ || __MIPSEB__ || __ARMEB__) && !__VOS__) |
1213 |
|
|
#define ECB_BIG_ENDIAN 1 |
1214 |
|
|
return 0x11223344; |
1215 |
root |
1.450 |
#else |
1216 |
|
|
union |
1217 |
|
|
{ |
1218 |
root |
1.479 |
uint8_t c[4]; |
1219 |
|
|
uint32_t u; |
1220 |
|
|
} u = { 0x11, 0x22, 0x33, 0x44 }; |
1221 |
|
|
return u.u; |
1222 |
root |
1.450 |
#endif |
1223 |
root |
1.391 |
} |
1224 |
|
|
|
1225 |
root |
1.474 |
ecb_inline ecb_const ecb_bool ecb_big_endian (void); |
1226 |
root |
1.479 |
ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11223344; } |
1227 |
root |
1.474 |
ecb_inline ecb_const ecb_bool ecb_little_endian (void); |
1228 |
root |
1.479 |
ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44332211; } |
1229 |
root |
1.391 |
|
1230 |
|
|
#if ECB_GCC_VERSION(3,0) || ECB_C99 |
1231 |
|
|
#define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0)) |
1232 |
|
|
#else |
1233 |
|
|
#define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n))) |
1234 |
|
|
#endif |
1235 |
|
|
|
1236 |
root |
1.474 |
#if ECB_CPP |
1237 |
root |
1.398 |
template<typename T> |
1238 |
|
|
static inline T ecb_div_rd (T val, T div) |
1239 |
|
|
{ |
1240 |
|
|
return val < 0 ? - ((-val + div - 1) / div) : (val ) / div; |
1241 |
|
|
} |
1242 |
|
|
template<typename T> |
1243 |
|
|
static inline T ecb_div_ru (T val, T div) |
1244 |
|
|
{ |
1245 |
|
|
return val < 0 ? - ((-val ) / div) : (val + div - 1) / div; |
1246 |
|
|
} |
1247 |
|
|
#else |
1248 |
|
|
#define ecb_div_rd(val,div) ((val) < 0 ? - ((-(val) + (div) - 1) / (div)) : ((val) ) / (div)) |
1249 |
|
|
#define ecb_div_ru(val,div) ((val) < 0 ? - ((-(val) ) / (div)) : ((val) + (div) - 1) / (div)) |
1250 |
|
|
#endif |
1251 |
|
|
|
1252 |
root |
1.391 |
#if ecb_cplusplus_does_not_suck |
1253 |
|
|
/* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */ |
1254 |
|
|
template<typename T, int N> |
1255 |
|
|
static inline int ecb_array_length (const T (&arr)[N]) |
1256 |
|
|
{ |
1257 |
|
|
return N; |
1258 |
|
|
} |
1259 |
|
|
#else |
1260 |
|
|
#define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) |
1261 |
|
|
#endif |
1262 |
|
|
|
1263 |
root |
1.479 |
ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x); |
1264 |
|
|
ecb_function_ ecb_const uint32_t |
1265 |
|
|
ecb_binary16_to_binary32 (uint32_t x) |
1266 |
|
|
{ |
1267 |
|
|
unsigned int s = (x & 0x8000) << (31 - 15); |
1268 |
|
|
int e = (x >> 10) & 0x001f; |
1269 |
|
|
unsigned int m = x & 0x03ff; |
1270 |
|
|
|
1271 |
|
|
if (ecb_expect_false (e == 31)) |
1272 |
|
|
/* infinity or NaN */ |
1273 |
|
|
e = 255 - (127 - 15); |
1274 |
|
|
else if (ecb_expect_false (!e)) |
1275 |
|
|
{ |
1276 |
|
|
if (ecb_expect_true (!m)) |
1277 |
|
|
/* zero, handled by code below by forcing e to 0 */ |
1278 |
|
|
e = 0 - (127 - 15); |
1279 |
|
|
else |
1280 |
|
|
{ |
1281 |
|
|
/* subnormal, renormalise */ |
1282 |
|
|
unsigned int s = 10 - ecb_ld32 (m); |
1283 |
|
|
|
1284 |
|
|
m = (m << s) & 0x3ff; /* mask implicit bit */ |
1285 |
|
|
e -= s - 1; |
1286 |
|
|
} |
1287 |
|
|
} |
1288 |
|
|
|
1289 |
|
|
/* e and m now are normalised, or zero, (or inf or nan) */ |
1290 |
|
|
e += 127 - 15; |
1291 |
|
|
|
1292 |
|
|
return s | (e << 23) | (m << (23 - 10)); |
1293 |
|
|
} |
1294 |
|
|
|
1295 |
|
|
ecb_function_ ecb_const uint16_t ecb_binary32_to_binary16 (uint32_t x); |
1296 |
|
|
ecb_function_ ecb_const uint16_t |
1297 |
|
|
ecb_binary32_to_binary16 (uint32_t x) |
1298 |
|
|
{ |
1299 |
|
|
unsigned int s = (x >> 16) & 0x00008000; /* sign bit, the easy part */ |
1300 |
|
|
unsigned int e = ((x >> 23) & 0x000000ff) - (127 - 15); /* the desired exponent */ |
1301 |
|
|
unsigned int m = x & 0x007fffff; |
1302 |
|
|
|
1303 |
|
|
x &= 0x7fffffff; |
1304 |
|
|
|
1305 |
|
|
/* if it's within range of binary16 normals, use fast path */ |
1306 |
|
|
if (ecb_expect_true (0x38800000 <= x && x <= 0x477fefff)) |
1307 |
|
|
{ |
1308 |
|
|
/* mantissa round-to-even */ |
1309 |
|
|
m += 0x00000fff + ((m >> (23 - 10)) & 1); |
1310 |
|
|
|
1311 |
|
|
/* handle overflow */ |
1312 |
|
|
if (ecb_expect_false (m >= 0x00800000)) |
1313 |
|
|
{ |
1314 |
|
|
m >>= 1; |
1315 |
|
|
e += 1; |
1316 |
|
|
} |
1317 |
|
|
|
1318 |
|
|
return s | (e << 10) | (m >> (23 - 10)); |
1319 |
|
|
} |
1320 |
|
|
|
1321 |
|
|
/* handle large numbers and infinity */ |
1322 |
|
|
if (ecb_expect_true (0x477fefff < x && x <= 0x7f800000)) |
1323 |
|
|
return s | 0x7c00; |
1324 |
|
|
|
1325 |
|
|
/* handle zero, subnormals and small numbers */ |
1326 |
|
|
if (ecb_expect_true (x < 0x38800000)) |
1327 |
|
|
{ |
1328 |
|
|
/* zero */ |
1329 |
|
|
if (ecb_expect_true (!x)) |
1330 |
|
|
return s; |
1331 |
|
|
|
1332 |
|
|
/* handle subnormals */ |
1333 |
|
|
|
1334 |
|
|
/* too small, will be zero */ |
1335 |
|
|
if (e < (14 - 24)) /* might not be sharp, but is good enough */ |
1336 |
|
|
return s; |
1337 |
|
|
|
1338 |
|
|
m |= 0x00800000; /* make implicit bit explicit */ |
1339 |
|
|
|
1340 |
|
|
/* very tricky - we need to round to the nearest e (+10) bit value */ |
1341 |
|
|
{ |
1342 |
|
|
unsigned int bits = 14 - e; |
1343 |
|
|
unsigned int half = (1 << (bits - 1)) - 1; |
1344 |
|
|
unsigned int even = (m >> bits) & 1; |
1345 |
|
|
|
1346 |
|
|
/* if this overflows, we will end up with a normalised number */ |
1347 |
|
|
m = (m + half + even) >> bits; |
1348 |
|
|
} |
1349 |
|
|
|
1350 |
|
|
return s | m; |
1351 |
|
|
} |
1352 |
|
|
|
1353 |
|
|
/* handle NaNs, preserve leftmost nan bits, but make sure we don't turn them into infinities */ |
1354 |
|
|
m >>= 13; |
1355 |
|
|
|
1356 |
|
|
return s | 0x7c00 | m | !m; |
1357 |
|
|
} |
1358 |
|
|
|
1359 |
root |
1.450 |
/*******************************************************************************/ |
1360 |
|
|
/* floating point stuff, can be disabled by defining ECB_NO_LIBM */ |
1361 |
|
|
|
1362 |
|
|
/* basically, everything uses "ieee pure-endian" floating point numbers */ |
1363 |
|
|
/* the only noteworthy exception is ancient armle, which uses order 43218765 */ |
1364 |
|
|
#if 0 \ |
1365 |
|
|
|| __i386 || __i386__ \ |
1366 |
sf-exg |
1.475 |
|| ECB_GCC_AMD64 \ |
1367 |
root |
1.450 |
|| __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \ |
1368 |
|
|
|| defined __s390__ || defined __s390x__ \ |
1369 |
|
|
|| defined __mips__ \ |
1370 |
|
|
|| defined __alpha__ \ |
1371 |
|
|
|| defined __hppa__ \ |
1372 |
|
|
|| defined __ia64__ \ |
1373 |
root |
1.457 |
|| defined __m68k__ \ |
1374 |
|
|
|| defined __m88k__ \ |
1375 |
|
|
|| defined __sh__ \ |
1376 |
sf-exg |
1.475 |
|| defined _M_IX86 || defined ECB_MSVC_AMD64 || defined _M_IA64 \ |
1377 |
root |
1.465 |
|| (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \ |
1378 |
root |
1.466 |
|| defined __aarch64__ |
1379 |
root |
1.450 |
#define ECB_STDFP 1 |
1380 |
|
|
#include <string.h> /* for memcpy */ |
1381 |
|
|
#else |
1382 |
|
|
#define ECB_STDFP 0 |
1383 |
|
|
#endif |
1384 |
|
|
|
1385 |
|
|
#ifndef ECB_NO_LIBM |
1386 |
|
|
|
1387 |
root |
1.458 |
#include <math.h> /* for frexp*, ldexp*, INFINITY, NAN */ |
1388 |
|
|
|
1389 |
root |
1.462 |
/* only the oldest of old doesn't have this one. solaris. */ |
1390 |
|
|
#ifdef INFINITY |
1391 |
|
|
#define ECB_INFINITY INFINITY |
1392 |
|
|
#else |
1393 |
|
|
#define ECB_INFINITY HUGE_VAL |
1394 |
|
|
#endif |
1395 |
|
|
|
1396 |
|
|
#ifdef NAN |
1397 |
root |
1.458 |
#define ECB_NAN NAN |
1398 |
|
|
#else |
1399 |
root |
1.462 |
#define ECB_NAN ECB_INFINITY |
1400 |
root |
1.458 |
#endif |
1401 |
|
|
|
1402 |
root |
1.474 |
#if ECB_C99 || _XOPEN_VERSION >= 600 || _POSIX_VERSION >= 200112L |
1403 |
|
|
#define ecb_ldexpf(x,e) ldexpf ((x), (e)) |
1404 |
root |
1.476 |
#define ecb_frexpf(x,e) frexpf ((x), (e)) |
1405 |
root |
1.474 |
#else |
1406 |
root |
1.476 |
#define ecb_ldexpf(x,e) (float) ldexp ((double) (x), (e)) |
1407 |
|
|
#define ecb_frexpf(x,e) (float) frexp ((double) (x), (e)) |
1408 |
root |
1.474 |
#endif |
1409 |
|
|
|
1410 |
root |
1.450 |
/* convert a float to ieee single/binary32 */ |
1411 |
root |
1.474 |
ecb_function_ ecb_const uint32_t ecb_float_to_binary32 (float x); |
1412 |
|
|
ecb_function_ ecb_const uint32_t |
1413 |
root |
1.450 |
ecb_float_to_binary32 (float x) |
1414 |
|
|
{ |
1415 |
|
|
uint32_t r; |
1416 |
|
|
|
1417 |
|
|
#if ECB_STDFP |
1418 |
|
|
memcpy (&r, &x, 4); |
1419 |
|
|
#else |
1420 |
|
|
/* slow emulation, works for anything but -0 */ |
1421 |
|
|
uint32_t m; |
1422 |
|
|
int e; |
1423 |
|
|
|
1424 |
|
|
if (x == 0e0f ) return 0x00000000U; |
1425 |
|
|
if (x > +3.40282346638528860e+38f) return 0x7f800000U; |
1426 |
|
|
if (x < -3.40282346638528860e+38f) return 0xff800000U; |
1427 |
|
|
if (x != x ) return 0x7fbfffffU; |
1428 |
|
|
|
1429 |
root |
1.476 |
m = ecb_frexpf (x, &e) * 0x1000000U; |
1430 |
root |
1.450 |
|
1431 |
|
|
r = m & 0x80000000U; |
1432 |
|
|
|
1433 |
|
|
if (r) |
1434 |
|
|
m = -m; |
1435 |
|
|
|
1436 |
|
|
if (e <= -126) |
1437 |
|
|
{ |
1438 |
|
|
m &= 0xffffffU; |
1439 |
|
|
m >>= (-125 - e); |
1440 |
|
|
e = -126; |
1441 |
|
|
} |
1442 |
|
|
|
1443 |
|
|
r |= (e + 126) << 23; |
1444 |
|
|
r |= m & 0x7fffffU; |
1445 |
|
|
#endif |
1446 |
|
|
|
1447 |
|
|
return r; |
1448 |
|
|
} |
1449 |
|
|
|
1450 |
|
|
/* converts an ieee single/binary32 to a float */ |
1451 |
root |
1.474 |
ecb_function_ ecb_const float ecb_binary32_to_float (uint32_t x); |
1452 |
|
|
ecb_function_ ecb_const float |
1453 |
root |
1.450 |
ecb_binary32_to_float (uint32_t x) |
1454 |
|
|
{ |
1455 |
|
|
float r; |
1456 |
|
|
|
1457 |
|
|
#if ECB_STDFP |
1458 |
|
|
memcpy (&r, &x, 4); |
1459 |
|
|
#else |
1460 |
|
|
/* emulation, only works for normals and subnormals and +0 */ |
1461 |
|
|
int neg = x >> 31; |
1462 |
|
|
int e = (x >> 23) & 0xffU; |
1463 |
|
|
|
1464 |
|
|
x &= 0x7fffffU; |
1465 |
|
|
|
1466 |
|
|
if (e) |
1467 |
|
|
x |= 0x800000U; |
1468 |
|
|
else |
1469 |
|
|
e = 1; |
1470 |
|
|
|
1471 |
|
|
/* we distrust ldexpf a bit and do the 2**-24 scaling by an extra multiply */ |
1472 |
root |
1.474 |
r = ecb_ldexpf (x * (0.5f / 0x800000U), e - 126); |
1473 |
root |
1.450 |
|
1474 |
|
|
r = neg ? -r : r; |
1475 |
|
|
#endif |
1476 |
|
|
|
1477 |
|
|
return r; |
1478 |
|
|
} |
1479 |
|
|
|
1480 |
|
|
/* convert a double to ieee double/binary64 */ |
1481 |
root |
1.474 |
ecb_function_ ecb_const uint64_t ecb_double_to_binary64 (double x); |
1482 |
|
|
ecb_function_ ecb_const uint64_t |
1483 |
root |
1.450 |
ecb_double_to_binary64 (double x) |
1484 |
|
|
{ |
1485 |
|
|
uint64_t r; |
1486 |
|
|
|
1487 |
|
|
#if ECB_STDFP |
1488 |
|
|
memcpy (&r, &x, 8); |
1489 |
|
|
#else |
1490 |
|
|
/* slow emulation, works for anything but -0 */ |
1491 |
|
|
uint64_t m; |
1492 |
|
|
int e; |
1493 |
|
|
|
1494 |
|
|
if (x == 0e0 ) return 0x0000000000000000U; |
1495 |
|
|
if (x > +1.79769313486231470e+308) return 0x7ff0000000000000U; |
1496 |
|
|
if (x < -1.79769313486231470e+308) return 0xfff0000000000000U; |
1497 |
|
|
if (x != x ) return 0X7ff7ffffffffffffU; |
1498 |
|
|
|
1499 |
|
|
m = frexp (x, &e) * 0x20000000000000U; |
1500 |
|
|
|
1501 |
|
|
r = m & 0x8000000000000000;; |
1502 |
|
|
|
1503 |
|
|
if (r) |
1504 |
|
|
m = -m; |
1505 |
|
|
|
1506 |
|
|
if (e <= -1022) |
1507 |
|
|
{ |
1508 |
|
|
m &= 0x1fffffffffffffU; |
1509 |
|
|
m >>= (-1021 - e); |
1510 |
|
|
e = -1022; |
1511 |
|
|
} |
1512 |
|
|
|
1513 |
|
|
r |= ((uint64_t)(e + 1022)) << 52; |
1514 |
|
|
r |= m & 0xfffffffffffffU; |
1515 |
|
|
#endif |
1516 |
|
|
|
1517 |
|
|
return r; |
1518 |
|
|
} |
1519 |
|
|
|
1520 |
|
|
/* converts an ieee double/binary64 to a double */ |
1521 |
root |
1.474 |
ecb_function_ ecb_const double ecb_binary64_to_double (uint64_t x); |
1522 |
|
|
ecb_function_ ecb_const double |
1523 |
root |
1.450 |
ecb_binary64_to_double (uint64_t x) |
1524 |
|
|
{ |
1525 |
|
|
double r; |
1526 |
|
|
|
1527 |
|
|
#if ECB_STDFP |
1528 |
|
|
memcpy (&r, &x, 8); |
1529 |
|
|
#else |
1530 |
|
|
/* emulation, only works for normals and subnormals and +0 */ |
1531 |
|
|
int neg = x >> 63; |
1532 |
|
|
int e = (x >> 52) & 0x7ffU; |
1533 |
|
|
|
1534 |
|
|
x &= 0xfffffffffffffU; |
1535 |
|
|
|
1536 |
|
|
if (e) |
1537 |
|
|
x |= 0x10000000000000U; |
1538 |
|
|
else |
1539 |
|
|
e = 1; |
1540 |
|
|
|
1541 |
|
|
/* we distrust ldexp a bit and do the 2**-53 scaling by an extra multiply */ |
1542 |
|
|
r = ldexp (x * (0.5 / 0x10000000000000U), e - 1022); |
1543 |
|
|
|
1544 |
|
|
r = neg ? -r : r; |
1545 |
|
|
#endif |
1546 |
|
|
|
1547 |
|
|
return r; |
1548 |
|
|
} |
1549 |
|
|
|
1550 |
root |
1.479 |
/* convert a float to ieee half/binary16 */ |
1551 |
|
|
ecb_function_ ecb_const uint16_t ecb_float_to_binary16 (float x); |
1552 |
|
|
ecb_function_ ecb_const uint16_t |
1553 |
|
|
ecb_float_to_binary16 (float x) |
1554 |
|
|
{ |
1555 |
|
|
return ecb_binary32_to_binary16 (ecb_float_to_binary32 (x)); |
1556 |
|
|
} |
1557 |
|
|
|
1558 |
|
|
/* convert an ieee half/binary16 to float */ |
1559 |
|
|
ecb_function_ ecb_const float ecb_binary16_to_float (uint16_t x); |
1560 |
|
|
ecb_function_ ecb_const float |
1561 |
|
|
ecb_binary16_to_float (uint16_t x) |
1562 |
|
|
{ |
1563 |
|
|
return ecb_binary32_to_float (ecb_binary16_to_binary32 (x)); |
1564 |
|
|
} |
1565 |
|
|
|
1566 |
root |
1.450 |
#endif |
1567 |
|
|
|
1568 |
root |
1.391 |
#endif |
1569 |
|
|
|
1570 |
|
|
/* ECB.H END */ |
1571 |
root |
1.379 |
|
1572 |
root |
1.392 |
#if ECB_MEMORY_FENCE_NEEDS_PTHREADS |
1573 |
root |
1.397 |
/* if your architecture doesn't need memory fences, e.g. because it is |
1574 |
root |
1.396 |
* single-cpu/core, or if you use libev in a project that doesn't use libev |
1575 |
root |
1.500 |
* from multiple threads, then you can define ECB_NO_THREADS when compiling |
1576 |
sf-exg |
1.402 |
* libev, in which cases the memory fences become nops. |
1577 |
root |
1.396 |
* alternatively, you can remove this #error and link against libpthread, |
1578 |
|
|
* which will then provide the memory fences. |
1579 |
|
|
*/ |
1580 |
|
|
# error "memory fences not defined for your architecture, please report" |
1581 |
|
|
#endif |
1582 |
|
|
|
1583 |
|
|
#ifndef ECB_MEMORY_FENCE |
1584 |
|
|
# define ECB_MEMORY_FENCE do { } while (0) |
1585 |
|
|
# define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE |
1586 |
|
|
# define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE |
1587 |
root |
1.392 |
#endif |
1588 |
|
|
|
1589 |
root |
1.379 |
#define inline_size ecb_inline |
1590 |
root |
1.169 |
|
1591 |
root |
1.338 |
#if EV_FEATURE_CODE |
1592 |
root |
1.379 |
# define inline_speed ecb_inline |
1593 |
root |
1.338 |
#else |
1594 |
root |
1.500 |
# define inline_speed ecb_noinline static |
1595 |
root |
1.169 |
#endif |
1596 |
root |
1.40 |
|
1597 |
root |
1.502 |
/*****************************************************************************/ |
1598 |
|
|
/* raw syscall wrappers */ |
1599 |
|
|
|
1600 |
|
|
#if EV_NEED_SYSCALL |
1601 |
|
|
|
1602 |
|
|
#include <sys/syscall.h> |
1603 |
|
|
|
1604 |
|
|
/* |
1605 |
|
|
* define some syscall wrappers for common architectures |
1606 |
|
|
* this is mostly for nice looks during debugging, not performance. |
1607 |
|
|
* our syscalls return < 0, not == -1, on error. which is good |
1608 |
|
|
* enough for linux aio. |
1609 |
|
|
* TODO: arm is also common nowadays, maybe even mips and x86 |
1610 |
|
|
* TODO: after implementing this, it suddenly looks like overkill, but its hard to remove... |
1611 |
|
|
*/ |
1612 |
|
|
#if __GNUC__ && __linux && ECB_AMD64 && !defined __OPTIMIZE_SIZE__ |
1613 |
|
|
/* the costly errno access probably kills this for size optimisation */ |
1614 |
|
|
|
1615 |
|
|
#define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5,arg6) \ |
1616 |
|
|
({ \ |
1617 |
|
|
long res; \ |
1618 |
|
|
register unsigned long r6 __asm__ ("r9" ); \ |
1619 |
|
|
register unsigned long r5 __asm__ ("r8" ); \ |
1620 |
|
|
register unsigned long r4 __asm__ ("r10"); \ |
1621 |
|
|
register unsigned long r3 __asm__ ("rdx"); \ |
1622 |
|
|
register unsigned long r2 __asm__ ("rsi"); \ |
1623 |
|
|
register unsigned long r1 __asm__ ("rdi"); \ |
1624 |
|
|
if (narg >= 6) r6 = (unsigned long)(arg6); \ |
1625 |
|
|
if (narg >= 5) r5 = (unsigned long)(arg5); \ |
1626 |
|
|
if (narg >= 4) r4 = (unsigned long)(arg4); \ |
1627 |
|
|
if (narg >= 3) r3 = (unsigned long)(arg3); \ |
1628 |
|
|
if (narg >= 2) r2 = (unsigned long)(arg2); \ |
1629 |
|
|
if (narg >= 1) r1 = (unsigned long)(arg1); \ |
1630 |
|
|
__asm__ __volatile__ ( \ |
1631 |
|
|
"syscall\n\t" \ |
1632 |
|
|
: "=a" (res) \ |
1633 |
|
|
: "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \ |
1634 |
|
|
: "cc", "r11", "cx", "memory"); \ |
1635 |
|
|
errno = -res; \ |
1636 |
|
|
res; \ |
1637 |
|
|
}) |
1638 |
|
|
|
1639 |
|
|
#endif |
1640 |
|
|
|
1641 |
|
|
#ifdef ev_syscall |
1642 |
|
|
#define ev_syscall0(nr) ev_syscall (nr, 0, 0, 0, 0, 0, 0, 0) |
1643 |
|
|
#define ev_syscall1(nr,arg1) ev_syscall (nr, 1, arg1, 0, 0, 0, 0, 0) |
1644 |
|
|
#define ev_syscall2(nr,arg1,arg2) ev_syscall (nr, 2, arg1, arg2, 0, 0, 0, 0) |
1645 |
|
|
#define ev_syscall3(nr,arg1,arg2,arg3) ev_syscall (nr, 3, arg1, arg2, arg3, 0, 0, 0) |
1646 |
|
|
#define ev_syscall4(nr,arg1,arg2,arg3,arg4) ev_syscall (nr, 3, arg1, arg2, arg3, arg4, 0, 0) |
1647 |
|
|
#define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5, 0) |
1648 |
|
|
#define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) ev_syscall (nr, 6, arg1, arg2, arg3, arg4, arg5,arg6) |
1649 |
|
|
#else |
1650 |
|
|
#define ev_syscall0(nr) syscall (nr) |
1651 |
|
|
#define ev_syscall1(nr,arg1) syscall (nr, arg1) |
1652 |
|
|
#define ev_syscall2(nr,arg1,arg2) syscall (nr, arg1, arg2) |
1653 |
|
|
#define ev_syscall3(nr,arg1,arg2,arg3) syscall (nr, arg1, arg2, arg3) |
1654 |
|
|
#define ev_syscall4(nr,arg1,arg2,arg3,arg4) syscall (nr, arg1, arg2, arg3, arg4) |
1655 |
|
|
#define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) syscall (nr, arg1, arg2, arg3, arg4, arg5) |
1656 |
|
|
#define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) syscall (nr, arg1, arg2, arg3, arg4, arg5,arg6) |
1657 |
|
|
#endif |
1658 |
|
|
|
1659 |
|
|
#endif |
1660 |
|
|
|
1661 |
|
|
/*****************************************************************************/ |
1662 |
|
|
|
1663 |
root |
1.295 |
#define NUMPRI (EV_MAXPRI - EV_MINPRI + 1) |
1664 |
|
|
|
1665 |
|
|
#if EV_MINPRI == EV_MAXPRI |
1666 |
|
|
# define ABSPRI(w) (((W)w), 0) |
1667 |
|
|
#else |
1668 |
|
|
# define ABSPRI(w) (((W)w)->priority - EV_MINPRI) |
1669 |
|
|
#endif |
1670 |
root |
1.42 |
|
1671 |
root |
1.490 |
#define EMPTY /* required for microsofts broken pseudo-c compiler */ |
1672 |
root |
1.103 |
|
1673 |
root |
1.136 |
typedef ev_watcher *W; |
1674 |
|
|
typedef ev_watcher_list *WL; |
1675 |
|
|
typedef ev_watcher_time *WT; |
1676 |
root |
1.10 |
|
1677 |
root |
1.229 |
#define ev_active(w) ((W)(w))->active |
1678 |
root |
1.228 |
#define ev_at(w) ((WT)(w))->at |
1679 |
|
|
|
1680 |
root |
1.279 |
#if EV_USE_REALTIME |
1681 |
root |
1.194 |
/* sig_atomic_t is used to avoid per-thread variables or locking but still */ |
1682 |
sf-exg |
1.345 |
/* giving it a reasonably high chance of working on typical architectures */ |
1683 |
root |
1.279 |
static EV_ATOMIC_T have_realtime; /* did clock_gettime (CLOCK_REALTIME) work? */ |
1684 |
|
|
#endif |
1685 |
|
|
|
1686 |
|
|
#if EV_USE_MONOTONIC |
1687 |
root |
1.207 |
static EV_ATOMIC_T have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work? */ |
1688 |
root |
1.198 |
#endif |
1689 |
root |
1.54 |
|
1690 |
root |
1.313 |
#ifndef EV_FD_TO_WIN32_HANDLE |
1691 |
|
|
# define EV_FD_TO_WIN32_HANDLE(fd) _get_osfhandle (fd) |
1692 |
|
|
#endif |
1693 |
|
|
#ifndef EV_WIN32_HANDLE_TO_FD |
1694 |
root |
1.322 |
# define EV_WIN32_HANDLE_TO_FD(handle) _open_osfhandle (handle, 0) |
1695 |
root |
1.313 |
#endif |
1696 |
|
|
#ifndef EV_WIN32_CLOSE_FD |
1697 |
|
|
# define EV_WIN32_CLOSE_FD(fd) close (fd) |
1698 |
|
|
#endif |
1699 |
|
|
|
1700 |
root |
1.103 |
#ifdef _WIN32 |
1701 |
root |
1.98 |
# include "ev_win32.c" |
1702 |
|
|
#endif |
1703 |
root |
1.67 |
|
1704 |
root |
1.53 |
/*****************************************************************************/ |
1705 |
root |
1.1 |
|
1706 |
root |
1.493 |
#if EV_USE_LINUXAIO |
1707 |
|
|
# include <linux/aio_abi.h> /* probably only needed for aio_context_t */ |
1708 |
|
|
#endif |
1709 |
|
|
|
1710 |
root |
1.373 |
/* define a suitable floor function (only used by periodics atm) */ |
1711 |
|
|
|
1712 |
|
|
#if EV_USE_FLOOR |
1713 |
|
|
# include <math.h> |
1714 |
|
|
# define ev_floor(v) floor (v) |
1715 |
|
|
#else |
1716 |
|
|
|
1717 |
|
|
#include <float.h> |
1718 |
|
|
|
1719 |
|
|
/* a floor() replacement function, should be independent of ev_tstamp type */ |
1720 |
root |
1.500 |
ecb_noinline |
1721 |
root |
1.480 |
static ev_tstamp |
1722 |
root |
1.373 |
ev_floor (ev_tstamp v) |
1723 |
|
|
{ |
1724 |
|
|
/* the choice of shift factor is not terribly important */ |
1725 |
|
|
#if FLT_RADIX != 2 /* assume FLT_RADIX == 10 */ |
1726 |
|
|
const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 10000000000000000000. : 1000000000.; |
1727 |
|
|
#else |
1728 |
|
|
const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.; |
1729 |
|
|
#endif |
1730 |
|
|
|
1731 |
root |
1.504 |
/* special treatment for negative arguments */ |
1732 |
|
|
if (ecb_expect_false (v < 0.)) |
1733 |
|
|
{ |
1734 |
|
|
ev_tstamp f = -ev_floor (-v); |
1735 |
|
|
|
1736 |
|
|
return f - (f == v ? 0 : 1); |
1737 |
|
|
} |
1738 |
|
|
|
1739 |
|
|
/* argument too large for an unsigned long? then reduce it */ |
1740 |
root |
1.500 |
if (ecb_expect_false (v >= shift)) |
1741 |
root |
1.373 |
{ |
1742 |
|
|
ev_tstamp f; |
1743 |
|
|
|
1744 |
|
|
if (v == v - 1.) |
1745 |
root |
1.504 |
return v; /* very large numbers are assumed to be integer */ |
1746 |
root |
1.373 |
|
1747 |
|
|
f = shift * ev_floor (v * (1. / shift)); |
1748 |
|
|
return f + ev_floor (v - f); |
1749 |
|
|
} |
1750 |
|
|
|
1751 |
|
|
/* fits into an unsigned long */ |
1752 |
|
|
return (unsigned long)v; |
1753 |
|
|
} |
1754 |
|
|
|
1755 |
|
|
#endif |
1756 |
|
|
|
1757 |
|
|
/*****************************************************************************/ |
1758 |
|
|
|
1759 |
root |
1.356 |
#ifdef __linux |
1760 |
|
|
# include <sys/utsname.h> |
1761 |
|
|
#endif |
1762 |
|
|
|
1763 |
root |
1.500 |
ecb_noinline ecb_cold |
1764 |
root |
1.480 |
static unsigned int |
1765 |
root |
1.355 |
ev_linux_version (void) |
1766 |
|
|
{ |
1767 |
|
|
#ifdef __linux |
1768 |
root |
1.359 |
unsigned int v = 0; |
1769 |
root |
1.355 |
struct utsname buf; |
1770 |
|
|
int i; |
1771 |
|
|
char *p = buf.release; |
1772 |
|
|
|
1773 |
|
|
if (uname (&buf)) |
1774 |
|
|
return 0; |
1775 |
|
|
|
1776 |
|
|
for (i = 3+1; --i; ) |
1777 |
|
|
{ |
1778 |
|
|
unsigned int c = 0; |
1779 |
|
|
|
1780 |
|
|
for (;;) |
1781 |
|
|
{ |
1782 |
|
|
if (*p >= '0' && *p <= '9') |
1783 |
|
|
c = c * 10 + *p++ - '0'; |
1784 |
|
|
else |
1785 |
|
|
{ |
1786 |
|
|
p += *p == '.'; |
1787 |
|
|
break; |
1788 |
|
|
} |
1789 |
|
|
} |
1790 |
|
|
|
1791 |
|
|
v = (v << 8) | c; |
1792 |
|
|
} |
1793 |
|
|
|
1794 |
|
|
return v; |
1795 |
|
|
#else |
1796 |
|
|
return 0; |
1797 |
|
|
#endif |
1798 |
|
|
} |
1799 |
|
|
|
1800 |
|
|
/*****************************************************************************/ |
1801 |
|
|
|
1802 |
root |
1.331 |
#if EV_AVOID_STDIO |
1803 |
root |
1.500 |
ecb_noinline ecb_cold |
1804 |
root |
1.480 |
static void |
1805 |
root |
1.331 |
ev_printerr (const char *msg) |
1806 |
|
|
{ |
1807 |
|
|
write (STDERR_FILENO, msg, strlen (msg)); |
1808 |
|
|
} |
1809 |
|
|
#endif |
1810 |
|
|
|
1811 |
root |
1.486 |
static void (*syserr_cb)(const char *msg) EV_NOEXCEPT; |
1812 |
root |
1.69 |
|
1813 |
root |
1.480 |
ecb_cold |
1814 |
|
|
void |
1815 |
root |
1.486 |
ev_set_syserr_cb (void (*cb)(const char *msg) EV_NOEXCEPT) EV_NOEXCEPT |
1816 |
root |
1.69 |
{ |
1817 |
|
|
syserr_cb = cb; |
1818 |
|
|
} |
1819 |
|
|
|
1820 |
root |
1.500 |
ecb_noinline ecb_cold |
1821 |
root |
1.480 |
static void |
1822 |
root |
1.269 |
ev_syserr (const char *msg) |
1823 |
root |
1.69 |
{ |
1824 |
root |
1.70 |
if (!msg) |
1825 |
|
|
msg = "(libev) system error"; |
1826 |
|
|
|
1827 |
root |
1.69 |
if (syserr_cb) |
1828 |
root |
1.70 |
syserr_cb (msg); |
1829 |
root |
1.69 |
else |
1830 |
|
|
{ |
1831 |
root |
1.330 |
#if EV_AVOID_STDIO |
1832 |
root |
1.331 |
ev_printerr (msg); |
1833 |
|
|
ev_printerr (": "); |
1834 |
root |
1.365 |
ev_printerr (strerror (errno)); |
1835 |
root |
1.331 |
ev_printerr ("\n"); |
1836 |
root |
1.330 |
#else |
1837 |
root |
1.70 |
perror (msg); |
1838 |
root |
1.330 |
#endif |
1839 |
root |
1.69 |
abort (); |
1840 |
|
|
} |
1841 |
|
|
} |
1842 |
|
|
|
1843 |
root |
1.224 |
static void * |
1844 |
root |
1.486 |
ev_realloc_emul (void *ptr, long size) EV_NOEXCEPT |
1845 |
root |
1.224 |
{ |
1846 |
|
|
/* some systems, notably openbsd and darwin, fail to properly |
1847 |
root |
1.335 |
* implement realloc (x, 0) (as required by both ansi c-89 and |
1848 |
root |
1.224 |
* the single unix specification, so work around them here. |
1849 |
root |
1.447 |
* recently, also (at least) fedora and debian started breaking it, |
1850 |
|
|
* despite documenting it otherwise. |
1851 |
root |
1.224 |
*/ |
1852 |
root |
1.333 |
|
1853 |
root |
1.224 |
if (size) |
1854 |
|
|
return realloc (ptr, size); |
1855 |
|
|
|
1856 |
|
|
free (ptr); |
1857 |
|
|
return 0; |
1858 |
|
|
} |
1859 |
|
|
|
1860 |
root |
1.486 |
static void *(*alloc)(void *ptr, long size) EV_NOEXCEPT = ev_realloc_emul; |
1861 |
root |
1.69 |
|
1862 |
root |
1.480 |
ecb_cold |
1863 |
|
|
void |
1864 |
root |
1.486 |
ev_set_allocator (void *(*cb)(void *ptr, long size) EV_NOEXCEPT) EV_NOEXCEPT |
1865 |
root |
1.69 |
{ |
1866 |
|
|
alloc = cb; |
1867 |
|
|
} |
1868 |
|
|
|
1869 |
root |
1.150 |
inline_speed void * |
1870 |
root |
1.155 |
ev_realloc (void *ptr, long size) |
1871 |
root |
1.69 |
{ |
1872 |
root |
1.224 |
ptr = alloc (ptr, size); |
1873 |
root |
1.69 |
|
1874 |
|
|
if (!ptr && size) |
1875 |
|
|
{ |
1876 |
root |
1.330 |
#if EV_AVOID_STDIO |
1877 |
root |
1.365 |
ev_printerr ("(libev) memory allocation failed, aborting.\n"); |
1878 |
root |
1.330 |
#else |
1879 |
root |
1.365 |
fprintf (stderr, "(libev) cannot allocate %ld bytes, aborting.", size); |
1880 |
root |
1.330 |
#endif |
1881 |
root |
1.69 |
abort (); |
1882 |
|
|
} |
1883 |
|
|
|
1884 |
|
|
return ptr; |
1885 |
|
|
} |
1886 |
|
|
|
1887 |
|
|
#define ev_malloc(size) ev_realloc (0, (size)) |
1888 |
|
|
#define ev_free(ptr) ev_realloc ((ptr), 0) |
1889 |
|
|
|
1890 |
|
|
/*****************************************************************************/ |
1891 |
|
|
|
1892 |
root |
1.298 |
/* set in reify when reification needed */ |
1893 |
|
|
#define EV_ANFD_REIFY 1 |
1894 |
|
|
|
1895 |
root |
1.288 |
/* file descriptor info structure */ |
1896 |
root |
1.53 |
typedef struct |
1897 |
|
|
{ |
1898 |
root |
1.68 |
WL head; |
1899 |
root |
1.288 |
unsigned char events; /* the events watched for */ |
1900 |
root |
1.298 |
unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */ |
1901 |
root |
1.490 |
unsigned char emask; /* some backends store the actual kernel mask in here */ |
1902 |
root |
1.502 |
unsigned char eflags; /* flags field for use by backends */ |
1903 |
root |
1.269 |
#if EV_USE_EPOLL |
1904 |
root |
1.288 |
unsigned int egen; /* generation counter to counter epoll bugs */ |
1905 |
root |
1.269 |
#endif |
1906 |
root |
1.357 |
#if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP |
1907 |
root |
1.103 |
SOCKET handle; |
1908 |
|
|
#endif |
1909 |
root |
1.357 |
#if EV_USE_IOCP |
1910 |
|
|
OVERLAPPED or, ow; |
1911 |
|
|
#endif |
1912 |
root |
1.53 |
} ANFD; |
1913 |
root |
1.1 |
|
1914 |
root |
1.288 |
/* stores the pending event set for a given watcher */ |
1915 |
root |
1.53 |
typedef struct |
1916 |
|
|
{ |
1917 |
|
|
W w; |
1918 |
root |
1.288 |
int events; /* the pending event set for the given watcher */ |
1919 |
root |
1.53 |
} ANPENDING; |
1920 |
root |
1.51 |
|
1921 |
root |
1.155 |
#if EV_USE_INOTIFY |
1922 |
root |
1.241 |
/* hash table entry per inotify-id */ |
1923 |
root |
1.152 |
typedef struct |
1924 |
|
|
{ |
1925 |
|
|
WL head; |
1926 |
root |
1.155 |
} ANFS; |
1927 |
root |
1.152 |
#endif |
1928 |
|
|
|
1929 |
root |
1.241 |
/* Heap Entry */ |
1930 |
|
|
#if EV_HEAP_CACHE_AT |
1931 |
root |
1.288 |
/* a heap element */ |
1932 |
root |
1.241 |
typedef struct { |
1933 |
root |
1.243 |
ev_tstamp at; |
1934 |
root |
1.241 |
WT w; |
1935 |
|
|
} ANHE; |
1936 |
|
|
|
1937 |
root |
1.248 |
#define ANHE_w(he) (he).w /* access watcher, read-write */ |
1938 |
|
|
#define ANHE_at(he) (he).at /* access cached at, read-only */ |
1939 |
|
|
#define ANHE_at_cache(he) (he).at = (he).w->at /* update at from watcher */ |
1940 |
root |
1.241 |
#else |
1941 |
root |
1.288 |
/* a heap element */ |
1942 |
root |
1.241 |
typedef WT ANHE; |
1943 |
|
|
|
1944 |
root |
1.248 |
#define ANHE_w(he) (he) |
1945 |
|
|
#define ANHE_at(he) (he)->at |
1946 |
|
|
#define ANHE_at_cache(he) |
1947 |
root |
1.241 |
#endif |
1948 |
|
|
|
1949 |
root |
1.55 |
#if EV_MULTIPLICITY |
1950 |
root |
1.54 |
|
1951 |
root |
1.80 |
struct ev_loop |
1952 |
|
|
{ |
1953 |
root |
1.86 |
ev_tstamp ev_rt_now; |
1954 |
root |
1.99 |
#define ev_rt_now ((loop)->ev_rt_now) |
1955 |
root |
1.80 |
#define VAR(name,decl) decl; |
1956 |
|
|
#include "ev_vars.h" |
1957 |
|
|
#undef VAR |
1958 |
|
|
}; |
1959 |
|
|
#include "ev_wrap.h" |
1960 |
|
|
|
1961 |
root |
1.116 |
static struct ev_loop default_loop_struct; |
1962 |
sf-exg |
1.402 |
EV_API_DECL struct ev_loop *ev_default_loop_ptr = 0; /* needs to be initialised to make it a definition despite extern */ |
1963 |
root |
1.54 |
|
1964 |
root |
1.53 |
#else |
1965 |
root |
1.54 |
|
1966 |
sf-exg |
1.402 |
EV_API_DECL ev_tstamp ev_rt_now = 0; /* needs to be initialised to make it a definition despite extern */ |
1967 |
root |
1.80 |
#define VAR(name,decl) static decl; |
1968 |
|
|
#include "ev_vars.h" |
1969 |
|
|
#undef VAR |
1970 |
|
|
|
1971 |
root |
1.116 |
static int ev_default_loop_ptr; |
1972 |
root |
1.54 |
|
1973 |
root |
1.51 |
#endif |
1974 |
root |
1.1 |
|
1975 |
root |
1.338 |
#if EV_FEATURE_API |
1976 |
root |
1.500 |
# define EV_RELEASE_CB if (ecb_expect_false (release_cb)) release_cb (EV_A) |
1977 |
|
|
# define EV_ACQUIRE_CB if (ecb_expect_false (acquire_cb)) acquire_cb (EV_A) |
1978 |
root |
1.297 |
# define EV_INVOKE_PENDING invoke_cb (EV_A) |
1979 |
|
|
#else |
1980 |
root |
1.298 |
# define EV_RELEASE_CB (void)0 |
1981 |
|
|
# define EV_ACQUIRE_CB (void)0 |
1982 |
root |
1.297 |
# define EV_INVOKE_PENDING ev_invoke_pending (EV_A) |
1983 |
|
|
#endif |
1984 |
|
|
|
1985 |
root |
1.353 |
#define EVBREAK_RECURSE 0x80 |
1986 |
root |
1.298 |
|
1987 |
root |
1.8 |
/*****************************************************************************/ |
1988 |
|
|
|
1989 |
root |
1.292 |
#ifndef EV_HAVE_EV_TIME |
1990 |
root |
1.141 |
ev_tstamp |
1991 |
root |
1.486 |
ev_time (void) EV_NOEXCEPT |
1992 |
root |
1.1 |
{ |
1993 |
root |
1.29 |
#if EV_USE_REALTIME |
1994 |
root |
1.500 |
if (ecb_expect_true (have_realtime)) |
1995 |
root |
1.279 |
{ |
1996 |
|
|
struct timespec ts; |
1997 |
|
|
clock_gettime (CLOCK_REALTIME, &ts); |
1998 |
|
|
return ts.tv_sec + ts.tv_nsec * 1e-9; |
1999 |
|
|
} |
2000 |
|
|
#endif |
2001 |
|
|
|
2002 |
root |
1.1 |
struct timeval tv; |
2003 |
|
|
gettimeofday (&tv, 0); |
2004 |
|
|
return tv.tv_sec + tv.tv_usec * 1e-6; |
2005 |
|
|
} |
2006 |
root |
1.292 |
#endif |
2007 |
root |
1.1 |
|
2008 |
root |
1.284 |
inline_size ev_tstamp |
2009 |
root |
1.1 |
get_clock (void) |
2010 |
|
|
{ |
2011 |
root |
1.29 |
#if EV_USE_MONOTONIC |
2012 |
root |
1.500 |
if (ecb_expect_true (have_monotonic)) |
2013 |
root |
1.1 |
{ |
2014 |
|
|
struct timespec ts; |
2015 |
|
|
clock_gettime (CLOCK_MONOTONIC, &ts); |
2016 |
|
|
return ts.tv_sec + ts.tv_nsec * 1e-9; |
2017 |
|
|
} |
2018 |
|
|
#endif |
2019 |
|
|
|
2020 |
|
|
return ev_time (); |
2021 |
|
|
} |
2022 |
|
|
|
2023 |
root |
1.85 |
#if EV_MULTIPLICITY |
2024 |
root |
1.51 |
ev_tstamp |
2025 |
root |
1.486 |
ev_now (EV_P) EV_NOEXCEPT |
2026 |
root |
1.51 |
{ |
2027 |
root |
1.85 |
return ev_rt_now; |
2028 |
root |
1.51 |
} |
2029 |
root |
1.85 |
#endif |
2030 |
root |
1.51 |
|
2031 |
root |
1.193 |
void |
2032 |
root |
1.486 |
ev_sleep (ev_tstamp delay) EV_NOEXCEPT |
2033 |
root |
1.193 |
{ |
2034 |
|
|
if (delay > 0.) |
2035 |
|
|
{ |
2036 |
|
|
#if EV_USE_NANOSLEEP |
2037 |
|
|
struct timespec ts; |
2038 |
|
|
|
2039 |
root |
1.348 |
EV_TS_SET (ts, delay); |
2040 |
root |
1.193 |
nanosleep (&ts, 0); |
2041 |
root |
1.416 |
#elif defined _WIN32 |
2042 |
root |
1.482 |
/* maybe this should round up, as ms is very low resolution */ |
2043 |
|
|
/* compared to select (µs) or nanosleep (ns) */ |
2044 |
root |
1.217 |
Sleep ((unsigned long)(delay * 1e3)); |
2045 |
root |
1.193 |
#else |
2046 |
|
|
struct timeval tv; |
2047 |
|
|
|
2048 |
root |
1.257 |
/* here we rely on sys/time.h + sys/types.h + unistd.h providing select */ |
2049 |
root |
1.302 |
/* something not guaranteed by newer posix versions, but guaranteed */ |
2050 |
root |
1.257 |
/* by older ones */ |
2051 |
sf-exg |
1.349 |
EV_TV_SET (tv, delay); |
2052 |
root |
1.193 |
select (0, 0, 0, 0, &tv); |
2053 |
|
|
#endif |
2054 |
|
|
} |
2055 |
|
|
} |
2056 |
|
|
|
2057 |
|
|
/*****************************************************************************/ |
2058 |
|
|
|
2059 |
root |
1.233 |
#define MALLOC_ROUND 4096 /* prefer to allocate in chunks of this size, must be 2**n and >> 4 longs */ |
2060 |
root |
1.232 |
|
2061 |
root |
1.288 |
/* find a suitable new size for the given array, */ |
2062 |
sf-exg |
1.345 |
/* hopefully by rounding to a nice-to-malloc size */ |
2063 |
root |
1.284 |
inline_size int |
2064 |
root |
1.163 |
array_nextsize (int elem, int cur, int cnt) |
2065 |
|
|
{ |
2066 |
|
|
int ncur = cur + 1; |
2067 |
|
|
|
2068 |
|
|
do |
2069 |
|
|
ncur <<= 1; |
2070 |
|
|
while (cnt > ncur); |
2071 |
|
|
|
2072 |
root |
1.400 |
/* if size is large, round to MALLOC_ROUND - 4 * longs to accommodate malloc overhead */ |
2073 |
root |
1.232 |
if (elem * ncur > MALLOC_ROUND - sizeof (void *) * 4) |
2074 |
root |
1.163 |
{ |
2075 |
|
|
ncur *= elem; |
2076 |
root |
1.232 |
ncur = (ncur + elem + (MALLOC_ROUND - 1) + sizeof (void *) * 4) & ~(MALLOC_ROUND - 1); |
2077 |
root |
1.163 |
ncur = ncur - sizeof (void *) * 4; |
2078 |
|
|
ncur /= elem; |
2079 |
|
|
} |
2080 |
|
|
|
2081 |
|
|
return ncur; |
2082 |
|
|
} |
2083 |
|
|
|
2084 |
root |
1.500 |
ecb_noinline ecb_cold |
2085 |
root |
1.480 |
static void * |
2086 |
root |
1.163 |
array_realloc (int elem, void *base, int *cur, int cnt) |
2087 |
|
|
{ |
2088 |
|
|
*cur = array_nextsize (elem, *cur, cnt); |
2089 |
|
|
return ev_realloc (base, elem * *cur); |
2090 |
|
|
} |
2091 |
root |
1.29 |
|
2092 |
root |
1.495 |
#define array_needsize_noinit(base,offset,count) |
2093 |
root |
1.490 |
|
2094 |
root |
1.495 |
#define array_needsize_zerofill(base,offset,count) \ |
2095 |
|
|
memset ((void *)(base + offset), 0, sizeof (*(base)) * (count)) |
2096 |
root |
1.265 |
|
2097 |
root |
1.74 |
#define array_needsize(type,base,cur,cnt,init) \ |
2098 |
root |
1.500 |
if (ecb_expect_false ((cnt) > (cur))) \ |
2099 |
root |
1.69 |
{ \ |
2100 |
root |
1.480 |
ecb_unused int ocur_ = (cur); \ |
2101 |
root |
1.163 |
(base) = (type *)array_realloc \ |
2102 |
|
|
(sizeof (type), (base), &(cur), (cnt)); \ |
2103 |
root |
1.495 |
init ((base), ocur_, ((cur) - ocur_)); \ |
2104 |
root |
1.1 |
} |
2105 |
|
|
|
2106 |
root |
1.163 |
#if 0 |
2107 |
root |
1.74 |
#define array_slim(type,stem) \ |
2108 |
root |
1.67 |
if (stem ## max < array_roundsize (stem ## cnt >> 2)) \ |
2109 |
|
|
{ \ |
2110 |
|
|
stem ## max = array_roundsize (stem ## cnt >> 1); \ |
2111 |
root |
1.74 |
base = (type *)ev_realloc (base, sizeof (type) * (stem ## max));\ |
2112 |
root |
1.67 |
fprintf (stderr, "slimmed down " # stem " to %d\n", stem ## max);/*D*/\ |
2113 |
|
|
} |
2114 |
root |
1.163 |
#endif |
2115 |
root |
1.67 |
|
2116 |
root |
1.65 |
#define array_free(stem, idx) \ |
2117 |
root |
1.280 |
ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0; stem ## s idx = 0 |
2118 |
root |
1.65 |
|
2119 |
root |
1.8 |
/*****************************************************************************/ |
2120 |
|
|
|
2121 |
root |
1.288 |
/* dummy callback for pending events */ |
2122 |
root |
1.500 |
ecb_noinline |
2123 |
root |
1.480 |
static void |
2124 |
root |
1.288 |
pendingcb (EV_P_ ev_prepare *w, int revents) |
2125 |
|
|
{ |
2126 |
|
|
} |
2127 |
|
|
|
2128 |
root |
1.500 |
ecb_noinline |
2129 |
root |
1.480 |
void |
2130 |
root |
1.486 |
ev_feed_event (EV_P_ void *w, int revents) EV_NOEXCEPT |
2131 |
root |
1.1 |
{ |
2132 |
root |
1.78 |
W w_ = (W)w; |
2133 |
root |
1.171 |
int pri = ABSPRI (w_); |
2134 |
root |
1.78 |
|
2135 |
root |
1.500 |
if (ecb_expect_false (w_->pending)) |
2136 |
root |
1.171 |
pendings [pri][w_->pending - 1].events |= revents; |
2137 |
|
|
else |
2138 |
root |
1.32 |
{ |
2139 |
root |
1.171 |
w_->pending = ++pendingcnt [pri]; |
2140 |
root |
1.490 |
array_needsize (ANPENDING, pendings [pri], pendingmax [pri], w_->pending, array_needsize_noinit); |
2141 |
root |
1.171 |
pendings [pri][w_->pending - 1].w = w_; |
2142 |
|
|
pendings [pri][w_->pending - 1].events = revents; |
2143 |
root |
1.32 |
} |
2144 |
root |
1.425 |
|
2145 |
|
|
pendingpri = NUMPRI - 1; |
2146 |
root |
1.1 |
} |
2147 |
|
|
|
2148 |
root |
1.284 |
inline_speed void |
2149 |
|
|
feed_reverse (EV_P_ W w) |
2150 |
|
|
{ |
2151 |
root |
1.490 |
array_needsize (W, rfeeds, rfeedmax, rfeedcnt + 1, array_needsize_noinit); |
2152 |
root |
1.284 |
rfeeds [rfeedcnt++] = w; |
2153 |
|
|
} |
2154 |
|
|
|
2155 |
|
|
inline_size void |
2156 |
|
|
feed_reverse_done (EV_P_ int revents) |
2157 |
|
|
{ |
2158 |
|
|
do |
2159 |
|
|
ev_feed_event (EV_A_ rfeeds [--rfeedcnt], revents); |
2160 |
|
|
while (rfeedcnt); |
2161 |
|
|
} |
2162 |
|
|
|
2163 |
|
|
inline_speed void |
2164 |
root |
1.51 |
queue_events (EV_P_ W *events, int eventcnt, int type) |
2165 |
root |
1.27 |
{ |
2166 |
|
|
int i; |
2167 |
|
|
|
2168 |
|
|
for (i = 0; i < eventcnt; ++i) |
2169 |
root |
1.78 |
ev_feed_event (EV_A_ events [i], type); |
2170 |
root |
1.27 |
} |
2171 |
|
|
|
2172 |
root |
1.141 |
/*****************************************************************************/ |
2173 |
|
|
|
2174 |
root |
1.284 |
inline_speed void |
2175 |
root |
1.337 |
fd_event_nocheck (EV_P_ int fd, int revents) |
2176 |
root |
1.1 |
{ |
2177 |
|
|
ANFD *anfd = anfds + fd; |
2178 |
root |
1.136 |
ev_io *w; |
2179 |
root |
1.1 |
|
2180 |
root |
1.136 |
for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next) |
2181 |
root |
1.1 |
{ |
2182 |
root |
1.79 |
int ev = w->events & revents; |
2183 |
root |
1.1 |
|
2184 |
|
|
if (ev) |
2185 |
root |
1.78 |
ev_feed_event (EV_A_ (W)w, ev); |
2186 |
root |
1.1 |
} |
2187 |
|
|
} |
2188 |
|
|
|
2189 |
root |
1.298 |
/* do not submit kernel events for fds that have reify set */ |
2190 |
|
|
/* because that means they changed while we were polling for new events */ |
2191 |
|
|
inline_speed void |
2192 |
|
|
fd_event (EV_P_ int fd, int revents) |
2193 |
|
|
{ |
2194 |
|
|
ANFD *anfd = anfds + fd; |
2195 |
|
|
|
2196 |
root |
1.500 |
if (ecb_expect_true (!anfd->reify)) |
2197 |
root |
1.337 |
fd_event_nocheck (EV_A_ fd, revents); |
2198 |
root |
1.298 |
} |
2199 |
|
|
|
2200 |
root |
1.79 |
void |
2201 |
root |
1.486 |
ev_feed_fd_event (EV_P_ int fd, int revents) EV_NOEXCEPT |
2202 |
root |
1.79 |
{ |
2203 |
root |
1.168 |
if (fd >= 0 && fd < anfdmax) |
2204 |
root |
1.337 |
fd_event_nocheck (EV_A_ fd, revents); |
2205 |
root |
1.79 |
} |
2206 |
|
|
|
2207 |
root |
1.288 |
/* make sure the external fd watch events are in-sync */ |
2208 |
|
|
/* with the kernel/libev internal state */ |
2209 |
root |
1.284 |
inline_size void |
2210 |
root |
1.51 |
fd_reify (EV_P) |
2211 |
root |
1.9 |
{ |
2212 |
|
|
int i; |
2213 |
|
|
|
2214 |
root |
1.371 |
#if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP |
2215 |
|
|
for (i = 0; i < fdchangecnt; ++i) |
2216 |
|
|
{ |
2217 |
|
|
int fd = fdchanges [i]; |
2218 |
|
|
ANFD *anfd = anfds + fd; |
2219 |
|
|
|
2220 |
root |
1.374 |
if (anfd->reify & EV__IOFDSET && anfd->head) |
2221 |
root |
1.371 |
{ |
2222 |
|
|
SOCKET handle = EV_FD_TO_WIN32_HANDLE (fd); |
2223 |
|
|
|
2224 |
|
|
if (handle != anfd->handle) |
2225 |
|
|
{ |
2226 |
|
|
unsigned long arg; |
2227 |
|
|
|
2228 |
|
|
assert (("libev: only socket fds supported in this configuration", ioctlsocket (handle, FIONREAD, &arg) == 0)); |
2229 |
|
|
|
2230 |
|
|
/* handle changed, but fd didn't - we need to do it in two steps */ |
2231 |
|
|
backend_modify (EV_A_ fd, anfd->events, 0); |
2232 |
|
|
anfd->events = 0; |
2233 |
|
|
anfd->handle = handle; |
2234 |
|
|
} |
2235 |
|
|
} |
2236 |
|
|
} |
2237 |
|
|
#endif |
2238 |
|
|
|
2239 |
root |
1.27 |
for (i = 0; i < fdchangecnt; ++i) |
2240 |
|
|
{ |
2241 |
|
|
int fd = fdchanges [i]; |
2242 |
|
|
ANFD *anfd = anfds + fd; |
2243 |
root |
1.136 |
ev_io *w; |
2244 |
root |
1.27 |
|
2245 |
root |
1.350 |
unsigned char o_events = anfd->events; |
2246 |
|
|
unsigned char o_reify = anfd->reify; |
2247 |
root |
1.27 |
|
2248 |
root |
1.497 |
anfd->reify = 0; |
2249 |
root |
1.27 |
|
2250 |
root |
1.500 |
/*if (ecb_expect_true (o_reify & EV_ANFD_REIFY)) probably a deoptimisation */ |
2251 |
root |
1.350 |
{ |
2252 |
|
|
anfd->events = 0; |
2253 |
root |
1.184 |
|
2254 |
root |
1.350 |
for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next) |
2255 |
|
|
anfd->events |= (unsigned char)w->events; |
2256 |
root |
1.27 |
|
2257 |
root |
1.351 |
if (o_events != anfd->events) |
2258 |
root |
1.350 |
o_reify = EV__IOFDSET; /* actually |= */ |
2259 |
|
|
} |
2260 |
|
|
|
2261 |
|
|
if (o_reify & EV__IOFDSET) |
2262 |
|
|
backend_modify (EV_A_ fd, o_events, anfd->events); |
2263 |
root |
1.27 |
} |
2264 |
|
|
|
2265 |
|
|
fdchangecnt = 0; |
2266 |
|
|
} |
2267 |
|
|
|
2268 |
root |
1.288 |
/* something about the given fd changed */ |
2269 |
root |
1.480 |
inline_size |
2270 |
|
|
void |
2271 |
root |
1.183 |
fd_change (EV_P_ int fd, int flags) |
2272 |
root |
1.27 |
{ |
2273 |
root |
1.183 |
unsigned char reify = anfds [fd].reify; |
2274 |
root |
1.184 |
anfds [fd].reify |= flags; |
2275 |
root |
1.27 |
|
2276 |
root |
1.500 |
if (ecb_expect_true (!reify)) |
2277 |
root |
1.183 |
{ |
2278 |
|
|
++fdchangecnt; |
2279 |
root |
1.490 |
array_needsize (int, fdchanges, fdchangemax, fdchangecnt, array_needsize_noinit); |
2280 |
root |
1.183 |
fdchanges [fdchangecnt - 1] = fd; |
2281 |
|
|
} |
2282 |
root |
1.9 |
} |
2283 |
|
|
|
2284 |
root |
1.288 |
/* the given fd is invalid/unusable, so make sure it doesn't hurt us anymore */ |
2285 |
root |
1.480 |
inline_speed ecb_cold void |
2286 |
root |
1.51 |
fd_kill (EV_P_ int fd) |
2287 |
root |
1.41 |
{ |
2288 |
root |
1.136 |
ev_io *w; |
2289 |
root |
1.41 |
|
2290 |
root |
1.136 |
while ((w = (ev_io *)anfds [fd].head)) |
2291 |
root |
1.41 |
{ |
2292 |
root |
1.51 |
ev_io_stop (EV_A_ w); |
2293 |
root |
1.78 |
ev_feed_event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE); |
2294 |
root |
1.41 |
} |
2295 |
|
|
} |
2296 |
|
|
|
2297 |
root |
1.336 |
/* check whether the given fd is actually valid, for error recovery */ |
2298 |
root |
1.480 |
inline_size ecb_cold int |
2299 |
root |
1.71 |
fd_valid (int fd) |
2300 |
|
|
{ |
2301 |
root |
1.103 |
#ifdef _WIN32 |
2302 |
root |
1.322 |
return EV_FD_TO_WIN32_HANDLE (fd) != -1; |
2303 |
root |
1.71 |
#else |
2304 |
|
|
return fcntl (fd, F_GETFD) != -1; |
2305 |
|
|
#endif |
2306 |
|
|
} |
2307 |
|
|
|
2308 |
root |
1.19 |
/* called on EBADF to verify fds */ |
2309 |
root |
1.500 |
ecb_noinline ecb_cold |
2310 |
root |
1.480 |
static void |
2311 |
root |
1.51 |
fd_ebadf (EV_P) |
2312 |
root |
1.19 |
{ |
2313 |
|
|
int fd; |
2314 |
|
|
|
2315 |
|
|
for (fd = 0; fd < anfdmax; ++fd) |
2316 |
root |
1.27 |
if (anfds [fd].events) |
2317 |
root |
1.254 |
if (!fd_valid (fd) && errno == EBADF) |
2318 |
root |
1.51 |
fd_kill (EV_A_ fd); |
2319 |
root |
1.41 |
} |
2320 |
|
|
|
2321 |
|
|
/* called on ENOMEM in select/poll to kill some fds and retry */ |
2322 |
root |
1.500 |
ecb_noinline ecb_cold |
2323 |
root |
1.480 |
static void |
2324 |
root |
1.51 |
fd_enomem (EV_P) |
2325 |
root |
1.41 |
{ |
2326 |
root |
1.62 |
int fd; |
2327 |
root |
1.41 |
|
2328 |
root |
1.62 |
for (fd = anfdmax; fd--; ) |
2329 |
root |
1.41 |
if (anfds [fd].events) |
2330 |
|
|
{ |
2331 |
root |
1.51 |
fd_kill (EV_A_ fd); |
2332 |
root |
1.307 |
break; |
2333 |
root |
1.41 |
} |
2334 |
root |
1.19 |
} |
2335 |
|
|
|
2336 |
root |
1.130 |
/* usually called after fork if backend needs to re-arm all fds from scratch */ |
2337 |
root |
1.500 |
ecb_noinline |
2338 |
root |
1.480 |
static void |
2339 |
root |
1.56 |
fd_rearm_all (EV_P) |
2340 |
|
|
{ |
2341 |
|
|
int fd; |
2342 |
|
|
|
2343 |
|
|
for (fd = 0; fd < anfdmax; ++fd) |
2344 |
|
|
if (anfds [fd].events) |
2345 |
|
|
{ |
2346 |
|
|
anfds [fd].events = 0; |
2347 |
root |
1.268 |
anfds [fd].emask = 0; |
2348 |
root |
1.298 |
fd_change (EV_A_ fd, EV__IOFDSET | EV_ANFD_REIFY); |
2349 |
root |
1.56 |
} |
2350 |
|
|
} |
2351 |
|
|
|
2352 |
root |
1.336 |
/* used to prepare libev internal fd's */ |
2353 |
|
|
/* this is not fork-safe */ |
2354 |
|
|
inline_speed void |
2355 |
|
|
fd_intern (int fd) |
2356 |
|
|
{ |
2357 |
|
|
#ifdef _WIN32 |
2358 |
|
|
unsigned long arg = 1; |
2359 |
|
|
ioctlsocket (EV_FD_TO_WIN32_HANDLE (fd), FIONBIO, &arg); |
2360 |
|
|
#else |
2361 |
|
|
fcntl (fd, F_SETFD, FD_CLOEXEC); |
2362 |
|
|
fcntl (fd, F_SETFL, O_NONBLOCK); |
2363 |
|
|
#endif |
2364 |
|
|
} |
2365 |
|
|
|
2366 |
root |
1.8 |
/*****************************************************************************/ |
2367 |
|
|
|
2368 |
root |
1.235 |
/* |
2369 |
sf-exg |
1.345 |
* the heap functions want a real array index. array index 0 is guaranteed to not |
2370 |
root |
1.241 |
* be in-use at any time. the first heap entry is at array [HEAP0]. DHEAP gives |
2371 |
|
|
* the branching factor of the d-tree. |
2372 |
|
|
*/ |
2373 |
|
|
|
2374 |
|
|
/* |
2375 |
root |
1.235 |
* at the moment we allow libev the luxury of two heaps, |
2376 |
|
|
* a small-code-size 2-heap one and a ~1.5kb larger 4-heap |
2377 |
|
|
* which is more cache-efficient. |
2378 |
|
|
* the difference is about 5% with 50000+ watchers. |
2379 |
|
|
*/ |
2380 |
root |
1.241 |
#if EV_USE_4HEAP |
2381 |
root |
1.235 |
|
2382 |
root |
1.237 |
#define DHEAP 4 |
2383 |
|
|
#define HEAP0 (DHEAP - 1) /* index of first element in heap */ |
2384 |
root |
1.247 |
#define HPARENT(k) ((((k) - HEAP0 - 1) / DHEAP) + HEAP0) |
2385 |
root |
1.248 |
#define UPHEAP_DONE(p,k) ((p) == (k)) |
2386 |
root |
1.235 |
|
2387 |
|
|
/* away from the root */ |
2388 |
root |
1.284 |
inline_speed void |
2389 |
root |
1.241 |
downheap (ANHE *heap, int N, int k) |
2390 |
root |
1.235 |
{ |
2391 |
root |
1.241 |
ANHE he = heap [k]; |
2392 |
|
|
ANHE *E = heap + N + HEAP0; |
2393 |
root |
1.235 |
|
2394 |
|
|
for (;;) |
2395 |
|
|
{ |
2396 |
|
|
ev_tstamp minat; |
2397 |
root |
1.241 |
ANHE *minpos; |
2398 |
root |
1.248 |
ANHE *pos = heap + DHEAP * (k - HEAP0) + HEAP0 + 1; |
2399 |
root |
1.235 |
|
2400 |
root |
1.248 |
/* find minimum child */ |
2401 |
root |
1.500 |
if (ecb_expect_true (pos + DHEAP - 1 < E)) |
2402 |
root |
1.235 |
{ |
2403 |
root |
1.245 |
/* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos)); |
2404 |
|
|
if ( ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos)); |
2405 |
|
|
if ( ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos)); |
2406 |
|
|
if ( ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos)); |
2407 |
root |
1.235 |
} |
2408 |
root |
1.240 |
else if (pos < E) |
2409 |
root |
1.235 |
{ |
2410 |
root |
1.241 |
/* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos)); |
2411 |
|
|
if (pos + 1 < E && ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos)); |
2412 |
|
|
if (pos + 2 < E && ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos)); |
2413 |
|
|
if (pos + 3 < E && ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos)); |
2414 |
root |
1.235 |
} |
2415 |
root |
1.240 |
else |
2416 |
|
|
break; |
2417 |
root |
1.235 |
|
2418 |
root |
1.241 |
if (ANHE_at (he) <= minat) |
2419 |
root |
1.235 |
break; |
2420 |
|
|
|
2421 |
root |
1.247 |
heap [k] = *minpos; |
2422 |
root |
1.241 |
ev_active (ANHE_w (*minpos)) = k; |
2423 |
root |
1.235 |
|
2424 |
|
|
k = minpos - heap; |
2425 |
|
|
} |
2426 |
|
|
|
2427 |
root |
1.247 |
heap [k] = he; |
2428 |
root |
1.241 |
ev_active (ANHE_w (he)) = k; |
2429 |
root |
1.235 |
} |
2430 |
|
|
|
2431 |
root |
1.248 |
#else /* 4HEAP */ |
2432 |
root |
1.235 |
|
2433 |
|
|
#define HEAP0 1 |
2434 |
root |
1.247 |
#define HPARENT(k) ((k) >> 1) |
2435 |
root |
1.248 |
#define UPHEAP_DONE(p,k) (!(p)) |
2436 |
root |
1.235 |
|
2437 |
root |
1.248 |
/* away from the root */ |
2438 |
root |
1.284 |
inline_speed void |
2439 |
root |
1.248 |
downheap (ANHE *heap, int N, int k) |
2440 |
root |
1.1 |
{ |
2441 |
root |
1.241 |
ANHE he = heap [k]; |
2442 |
root |
1.1 |
|
2443 |
root |
1.228 |
for (;;) |
2444 |
root |
1.1 |
{ |
2445 |
root |
1.248 |
int c = k << 1; |
2446 |
root |
1.179 |
|
2447 |
root |
1.309 |
if (c >= N + HEAP0) |
2448 |
root |
1.179 |
break; |
2449 |
|
|
|
2450 |
root |
1.248 |
c += c + 1 < N + HEAP0 && ANHE_at (heap [c]) > ANHE_at (heap [c + 1]) |
2451 |
|
|
? 1 : 0; |
2452 |
|
|
|
2453 |
|
|
if (ANHE_at (he) <= ANHE_at (heap [c])) |
2454 |
|
|
break; |
2455 |
|
|
|
2456 |
|
|
heap [k] = heap [c]; |
2457 |
root |
1.241 |
ev_active (ANHE_w (heap [k])) = k; |
2458 |
root |
1.248 |
|
2459 |
|
|
k = c; |
2460 |
root |
1.1 |
} |
2461 |
|
|
|
2462 |
root |
1.243 |
heap [k] = he; |
2463 |
root |
1.248 |
ev_active (ANHE_w (he)) = k; |
2464 |
root |
1.1 |
} |
2465 |
root |
1.248 |
#endif |
2466 |
root |
1.1 |
|
2467 |
root |
1.248 |
/* towards the root */ |
2468 |
root |
1.284 |
inline_speed void |
2469 |
root |
1.248 |
upheap (ANHE *heap, int k) |
2470 |
root |
1.1 |
{ |
2471 |
root |
1.241 |
ANHE he = heap [k]; |
2472 |
root |
1.1 |
|
2473 |
root |
1.179 |
for (;;) |
2474 |
root |
1.1 |
{ |
2475 |
root |
1.248 |
int p = HPARENT (k); |
2476 |
root |
1.179 |
|
2477 |
root |
1.248 |
if (UPHEAP_DONE (p, k) || ANHE_at (heap [p]) <= ANHE_at (he)) |
2478 |
root |
1.179 |
break; |
2479 |
root |
1.1 |
|
2480 |
root |
1.248 |
heap [k] = heap [p]; |
2481 |
root |
1.241 |
ev_active (ANHE_w (heap [k])) = k; |
2482 |
root |
1.248 |
k = p; |
2483 |
root |
1.1 |
} |
2484 |
|
|
|
2485 |
root |
1.241 |
heap [k] = he; |
2486 |
|
|
ev_active (ANHE_w (he)) = k; |
2487 |
root |
1.1 |
} |
2488 |
|
|
|
2489 |
root |
1.288 |
/* move an element suitably so it is in a correct place */ |
2490 |
root |
1.284 |
inline_size void |
2491 |
root |
1.241 |
adjustheap (ANHE *heap, int N, int k) |
2492 |
root |
1.84 |
{ |
2493 |
root |
1.310 |
if (k > HEAP0 && ANHE_at (heap [k]) <= ANHE_at (heap [HPARENT (k)])) |
2494 |
root |
1.247 |
upheap (heap, k); |
2495 |
|
|
else |
2496 |
|
|
downheap (heap, N, k); |
2497 |
root |
1.84 |
} |
2498 |
|
|
|
2499 |
root |
1.248 |
/* rebuild the heap: this function is used only once and executed rarely */ |
2500 |
root |
1.284 |
inline_size void |
2501 |
root |
1.248 |
reheap (ANHE *heap, int N) |
2502 |
|
|
{ |
2503 |
|
|
int i; |
2504 |
root |
1.251 |
|
2505 |
root |
1.248 |
/* we don't use floyds algorithm, upheap is simpler and is more cache-efficient */ |
2506 |
|
|
/* also, this is easy to implement and correct for both 2-heaps and 4-heaps */ |
2507 |
|
|
for (i = 0; i < N; ++i) |
2508 |
|
|
upheap (heap, i + HEAP0); |
2509 |
|
|
} |
2510 |
|
|
|
2511 |
root |
1.8 |
/*****************************************************************************/ |
2512 |
|
|
|
2513 |
root |
1.288 |
/* associate signal watchers to a signal signal */ |
2514 |
root |
1.7 |
typedef struct |
2515 |
|
|
{ |
2516 |
root |
1.307 |
EV_ATOMIC_T pending; |
2517 |
root |
1.306 |
#if EV_MULTIPLICITY |
2518 |
|
|
EV_P; |
2519 |
|
|
#endif |
2520 |
root |
1.68 |
WL head; |
2521 |
root |
1.7 |
} ANSIG; |
2522 |
|
|
|
2523 |
root |
1.306 |
static ANSIG signals [EV_NSIG - 1]; |
2524 |
root |
1.7 |
|
2525 |
root |
1.207 |
/*****************************************************************************/ |
2526 |
|
|
|
2527 |
root |
1.336 |
#if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE |
2528 |
root |
1.207 |
|
2529 |
root |
1.500 |
ecb_noinline ecb_cold |
2530 |
root |
1.480 |
static void |
2531 |
root |
1.207 |
evpipe_init (EV_P) |
2532 |
|
|
{ |
2533 |
root |
1.288 |
if (!ev_is_active (&pipe_w)) |
2534 |
root |
1.207 |
{ |
2535 |
root |
1.448 |
int fds [2]; |
2536 |
|
|
|
2537 |
root |
1.336 |
# if EV_USE_EVENTFD |
2538 |
root |
1.448 |
fds [0] = -1; |
2539 |
|
|
fds [1] = eventfd (0, EFD_NONBLOCK | EFD_CLOEXEC); |
2540 |
|
|
if (fds [1] < 0 && errno == EINVAL) |
2541 |
|
|
fds [1] = eventfd (0, 0); |
2542 |
|
|
|
2543 |
|
|
if (fds [1] < 0) |
2544 |
|
|
# endif |
2545 |
|
|
{ |
2546 |
|
|
while (pipe (fds)) |
2547 |
|
|
ev_syserr ("(libev) error creating signal/async pipe"); |
2548 |
|
|
|
2549 |
|
|
fd_intern (fds [0]); |
2550 |
root |
1.220 |
} |
2551 |
root |
1.448 |
|
2552 |
|
|
evpipe [0] = fds [0]; |
2553 |
|
|
|
2554 |
|
|
if (evpipe [1] < 0) |
2555 |
|
|
evpipe [1] = fds [1]; /* first call, set write fd */ |
2556 |
root |
1.220 |
else |
2557 |
|
|
{ |
2558 |
root |
1.448 |
/* on subsequent calls, do not change evpipe [1] */ |
2559 |
|
|
/* so that evpipe_write can always rely on its value. */ |
2560 |
|
|
/* this branch does not do anything sensible on windows, */ |
2561 |
|
|
/* so must not be executed on windows */ |
2562 |
root |
1.207 |
|
2563 |
root |
1.448 |
dup2 (fds [1], evpipe [1]); |
2564 |
|
|
close (fds [1]); |
2565 |
root |
1.220 |
} |
2566 |
root |
1.207 |
|
2567 |
root |
1.455 |
fd_intern (evpipe [1]); |
2568 |
|
|
|
2569 |
root |
1.448 |
ev_io_set (&pipe_w, evpipe [0] < 0 ? evpipe [1] : evpipe [0], EV_READ); |
2570 |
root |
1.288 |
ev_io_start (EV_A_ &pipe_w); |
2571 |
root |
1.210 |
ev_unref (EV_A); /* watcher should not keep loop alive */ |
2572 |
root |
1.207 |
} |
2573 |
|
|
} |
2574 |
|
|
|
2575 |
root |
1.380 |
inline_speed void |
2576 |
root |
1.214 |
evpipe_write (EV_P_ EV_ATOMIC_T *flag) |
2577 |
root |
1.207 |
{ |
2578 |
root |
1.424 |
ECB_MEMORY_FENCE; /* push out the write before this function was called, acquire flag */ |
2579 |
|
|
|
2580 |
root |
1.500 |
if (ecb_expect_true (*flag)) |
2581 |
root |
1.387 |
return; |
2582 |
root |
1.383 |
|
2583 |
|
|
*flag = 1; |
2584 |
root |
1.384 |
ECB_MEMORY_FENCE_RELEASE; /* make sure flag is visible before the wakeup */ |
2585 |
root |
1.383 |
|
2586 |
|
|
pipe_write_skipped = 1; |
2587 |
root |
1.378 |
|
2588 |
root |
1.384 |
ECB_MEMORY_FENCE; /* make sure pipe_write_skipped is visible before we check pipe_write_wanted */ |
2589 |
root |
1.214 |
|
2590 |
root |
1.383 |
if (pipe_write_wanted) |
2591 |
|
|
{ |
2592 |
|
|
int old_errno; |
2593 |
root |
1.378 |
|
2594 |
root |
1.436 |
pipe_write_skipped = 0; |
2595 |
|
|
ECB_MEMORY_FENCE_RELEASE; |
2596 |
root |
1.220 |
|
2597 |
root |
1.383 |
old_errno = errno; /* save errno because write will clobber it */ |
2598 |
root |
1.380 |
|
2599 |
root |
1.220 |
#if EV_USE_EVENTFD |
2600 |
root |
1.448 |
if (evpipe [0] < 0) |
2601 |
root |
1.383 |
{ |
2602 |
|
|
uint64_t counter = 1; |
2603 |
root |
1.448 |
write (evpipe [1], &counter, sizeof (uint64_t)); |
2604 |
root |
1.383 |
} |
2605 |
|
|
else |
2606 |
root |
1.220 |
#endif |
2607 |
root |
1.383 |
{ |
2608 |
root |
1.427 |
#ifdef _WIN32 |
2609 |
|
|
WSABUF buf; |
2610 |
|
|
DWORD sent; |
2611 |
root |
1.485 |
buf.buf = (char *)&buf; |
2612 |
root |
1.427 |
buf.len = 1; |
2613 |
|
|
WSASend (EV_FD_TO_WIN32_HANDLE (evpipe [1]), &buf, 1, &sent, 0, 0, 0); |
2614 |
|
|
#else |
2615 |
root |
1.383 |
write (evpipe [1], &(evpipe [1]), 1); |
2616 |
root |
1.427 |
#endif |
2617 |
root |
1.383 |
} |
2618 |
root |
1.214 |
|
2619 |
root |
1.383 |
errno = old_errno; |
2620 |
root |
1.207 |
} |
2621 |
|
|
} |
2622 |
|
|
|
2623 |
root |
1.288 |
/* called whenever the libev signal pipe */ |
2624 |
|
|
/* got some events (signal, async) */ |
2625 |
root |
1.207 |
static void |
2626 |
|
|
pipecb (EV_P_ ev_io *iow, int revents) |
2627 |
|
|
{ |
2628 |
root |
1.307 |
int i; |
2629 |
|
|
|
2630 |
root |
1.378 |
if (revents & EV_READ) |
2631 |
|
|
{ |
2632 |
root |
1.220 |
#if EV_USE_EVENTFD |
2633 |
root |
1.448 |
if (evpipe [0] < 0) |
2634 |
root |
1.378 |
{ |
2635 |
|
|
uint64_t counter; |
2636 |
root |
1.448 |
read (evpipe [1], &counter, sizeof (uint64_t)); |
2637 |
root |
1.378 |
} |
2638 |
|
|
else |
2639 |
root |
1.220 |
#endif |
2640 |
root |
1.378 |
{ |
2641 |
root |
1.427 |
char dummy[4]; |
2642 |
|
|
#ifdef _WIN32 |
2643 |
|
|
WSABUF buf; |
2644 |
|
|
DWORD recvd; |
2645 |
root |
1.432 |
DWORD flags = 0; |
2646 |
root |
1.427 |
buf.buf = dummy; |
2647 |
|
|
buf.len = sizeof (dummy); |
2648 |
root |
1.432 |
WSARecv (EV_FD_TO_WIN32_HANDLE (evpipe [0]), &buf, 1, &recvd, &flags, 0, 0); |
2649 |
root |
1.427 |
#else |
2650 |
|
|
read (evpipe [0], &dummy, sizeof (dummy)); |
2651 |
|
|
#endif |
2652 |
root |
1.378 |
} |
2653 |
root |
1.220 |
} |
2654 |
root |
1.207 |
|
2655 |
root |
1.378 |
pipe_write_skipped = 0; |
2656 |
|
|
|
2657 |
root |
1.424 |
ECB_MEMORY_FENCE; /* push out skipped, acquire flags */ |
2658 |
|
|
|
2659 |
root |
1.369 |
#if EV_SIGNAL_ENABLE |
2660 |
root |
1.307 |
if (sig_pending) |
2661 |
root |
1.372 |
{ |
2662 |
root |
1.307 |
sig_pending = 0; |
2663 |
root |
1.207 |
|
2664 |
root |
1.436 |
ECB_MEMORY_FENCE; |
2665 |
root |
1.424 |
|
2666 |
root |
1.307 |
for (i = EV_NSIG - 1; i--; ) |
2667 |
root |
1.500 |
if (ecb_expect_false (signals [i].pending)) |
2668 |
root |
1.307 |
ev_feed_signal_event (EV_A_ i + 1); |
2669 |
root |
1.207 |
} |
2670 |
root |
1.369 |
#endif |
2671 |
root |
1.207 |
|
2672 |
root |
1.209 |
#if EV_ASYNC_ENABLE |
2673 |
root |
1.307 |
if (async_pending) |
2674 |
root |
1.207 |
{ |
2675 |
root |
1.307 |
async_pending = 0; |
2676 |
root |
1.207 |
|
2677 |
root |
1.436 |
ECB_MEMORY_FENCE; |
2678 |
root |
1.424 |
|
2679 |
root |
1.207 |
for (i = asynccnt; i--; ) |
2680 |
|
|
if (asyncs [i]->sent) |
2681 |
|
|
{ |
2682 |
|
|
asyncs [i]->sent = 0; |
2683 |
root |
1.436 |
ECB_MEMORY_FENCE_RELEASE; |
2684 |
root |
1.207 |
ev_feed_event (EV_A_ asyncs [i], EV_ASYNC); |
2685 |
|
|
} |
2686 |
|
|
} |
2687 |
root |
1.209 |
#endif |
2688 |
root |
1.207 |
} |
2689 |
|
|
|
2690 |
|
|
/*****************************************************************************/ |
2691 |
|
|
|
2692 |
root |
1.366 |
void |
2693 |
root |
1.486 |
ev_feed_signal (int signum) EV_NOEXCEPT |
2694 |
root |
1.7 |
{ |
2695 |
root |
1.207 |
#if EV_MULTIPLICITY |
2696 |
root |
1.453 |
EV_P; |
2697 |
root |
1.449 |
ECB_MEMORY_FENCE_ACQUIRE; |
2698 |
root |
1.453 |
EV_A = signals [signum - 1].loop; |
2699 |
root |
1.366 |
|
2700 |
|
|
if (!EV_A) |
2701 |
|
|
return; |
2702 |
root |
1.207 |
#endif |
2703 |
|
|
|
2704 |
root |
1.366 |
signals [signum - 1].pending = 1; |
2705 |
|
|
evpipe_write (EV_A_ &sig_pending); |
2706 |
|
|
} |
2707 |
|
|
|
2708 |
|
|
static void |
2709 |
|
|
ev_sighandler (int signum) |
2710 |
|
|
{ |
2711 |
root |
1.322 |
#ifdef _WIN32 |
2712 |
root |
1.218 |
signal (signum, ev_sighandler); |
2713 |
root |
1.67 |
#endif |
2714 |
|
|
|
2715 |
root |
1.366 |
ev_feed_signal (signum); |
2716 |
root |
1.7 |
} |
2717 |
|
|
|
2718 |
root |
1.500 |
ecb_noinline |
2719 |
root |
1.480 |
void |
2720 |
root |
1.486 |
ev_feed_signal_event (EV_P_ int signum) EV_NOEXCEPT |
2721 |
root |
1.79 |
{ |
2722 |
root |
1.80 |
WL w; |
2723 |
|
|
|
2724 |
root |
1.500 |
if (ecb_expect_false (signum <= 0 || signum >= EV_NSIG)) |
2725 |
root |
1.307 |
return; |
2726 |
|
|
|
2727 |
|
|
--signum; |
2728 |
|
|
|
2729 |
root |
1.79 |
#if EV_MULTIPLICITY |
2730 |
root |
1.307 |
/* it is permissible to try to feed a signal to the wrong loop */ |
2731 |
|
|
/* or, likely more useful, feeding a signal nobody is waiting for */ |
2732 |
root |
1.79 |
|
2733 |
root |
1.500 |
if (ecb_expect_false (signals [signum].loop != EV_A)) |
2734 |
root |
1.306 |
return; |
2735 |
root |
1.307 |
#endif |
2736 |
root |
1.306 |
|
2737 |
root |
1.307 |
signals [signum].pending = 0; |
2738 |
root |
1.438 |
ECB_MEMORY_FENCE_RELEASE; |
2739 |
root |
1.79 |
|
2740 |
|
|
for (w = signals [signum].head; w; w = w->next) |
2741 |
|
|
ev_feed_event (EV_A_ (W)w, EV_SIGNAL); |
2742 |
|
|
} |
2743 |
|
|
|
2744 |
root |
1.303 |
#if EV_USE_SIGNALFD |
2745 |
|
|
static void |
2746 |
|
|
sigfdcb (EV_P_ ev_io *iow, int revents) |
2747 |
|
|
{ |
2748 |
root |
1.306 |
struct signalfd_siginfo si[2], *sip; /* these structs are big */ |
2749 |
root |
1.303 |
|
2750 |
|
|
for (;;) |
2751 |
|
|
{ |
2752 |
|
|
ssize_t res = read (sigfd, si, sizeof (si)); |
2753 |
|
|
|
2754 |
|
|
/* not ISO-C, as res might be -1, but works with SuS */ |
2755 |
|
|
for (sip = si; (char *)sip < (char *)si + res; ++sip) |
2756 |
|
|
ev_feed_signal_event (EV_A_ sip->ssi_signo); |
2757 |
|
|
|
2758 |
|
|
if (res < (ssize_t)sizeof (si)) |
2759 |
|
|
break; |
2760 |
|
|
} |
2761 |
|
|
} |
2762 |
|
|
#endif |
2763 |
|
|
|
2764 |
root |
1.336 |
#endif |
2765 |
|
|
|
2766 |
root |
1.8 |
/*****************************************************************************/ |
2767 |
|
|
|
2768 |
root |
1.336 |
#if EV_CHILD_ENABLE |
2769 |
root |
1.182 |
static WL childs [EV_PID_HASHSIZE]; |
2770 |
root |
1.71 |
|
2771 |
root |
1.136 |
static ev_signal childev; |
2772 |
root |
1.59 |
|
2773 |
root |
1.206 |
#ifndef WIFCONTINUED |
2774 |
|
|
# define WIFCONTINUED(status) 0 |
2775 |
|
|
#endif |
2776 |
|
|
|
2777 |
root |
1.288 |
/* handle a single child status event */ |
2778 |
root |
1.284 |
inline_speed void |
2779 |
root |
1.216 |
child_reap (EV_P_ int chain, int pid, int status) |
2780 |
root |
1.47 |
{ |
2781 |
root |
1.136 |
ev_child *w; |
2782 |
root |
1.206 |
int traced = WIFSTOPPED (status) || WIFCONTINUED (status); |
2783 |
root |
1.47 |
|
2784 |
root |
1.338 |
for (w = (ev_child *)childs [chain & ((EV_PID_HASHSIZE) - 1)]; w; w = (ev_child *)((WL)w)->next) |
2785 |
root |
1.206 |
{ |
2786 |
|
|
if ((w->pid == pid || !w->pid) |
2787 |
|
|
&& (!traced || (w->flags & 1))) |
2788 |
|
|
{ |
2789 |
root |
1.216 |
ev_set_priority (w, EV_MAXPRI); /* need to do it *now*, this *must* be the same prio as the signal watcher itself */ |
2790 |
root |
1.206 |
w->rpid = pid; |
2791 |
|
|
w->rstatus = status; |
2792 |
|
|
ev_feed_event (EV_A_ (W)w, EV_CHILD); |
2793 |
|
|
} |
2794 |
|
|
} |
2795 |
root |
1.47 |
} |
2796 |
|
|
|
2797 |
root |
1.142 |
#ifndef WCONTINUED |
2798 |
|
|
# define WCONTINUED 0 |
2799 |
|
|
#endif |
2800 |
|
|
|
2801 |
root |
1.288 |
/* called on sigchld etc., calls waitpid */ |
2802 |
root |
1.47 |
static void |
2803 |
root |
1.136 |
childcb (EV_P_ ev_signal *sw, int revents) |
2804 |
root |
1.22 |
{ |
2805 |
|
|
int pid, status; |
2806 |
|
|
|
2807 |
root |
1.142 |
/* some systems define WCONTINUED but then fail to support it (linux 2.4) */ |
2808 |
|
|
if (0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED | WCONTINUED))) |
2809 |
|
|
if (!WCONTINUED |
2810 |
|
|
|| errno != EINVAL |
2811 |
|
|
|| 0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED))) |
2812 |
|
|
return; |
2813 |
|
|
|
2814 |
root |
1.216 |
/* make sure we are called again until all children have been reaped */ |
2815 |
root |
1.142 |
/* we need to do it this way so that the callback gets called before we continue */ |
2816 |
|
|
ev_feed_event (EV_A_ (W)sw, EV_SIGNAL); |
2817 |
root |
1.47 |
|
2818 |
root |
1.216 |
child_reap (EV_A_ pid, pid, status); |
2819 |
root |
1.338 |
if ((EV_PID_HASHSIZE) > 1) |
2820 |
root |
1.216 |
child_reap (EV_A_ 0, pid, status); /* this might trigger a watcher twice, but feed_event catches that */ |
2821 |
root |
1.22 |
} |
2822 |
|
|
|
2823 |
root |
1.45 |
#endif |
2824 |
|
|
|
2825 |
root |
1.22 |
/*****************************************************************************/ |
2826 |
|
|
|
2827 |
root |
1.357 |
#if EV_USE_IOCP |
2828 |
|
|
# include "ev_iocp.c" |
2829 |
|
|
#endif |
2830 |
root |
1.118 |
#if EV_USE_PORT |
2831 |
|
|
# include "ev_port.c" |
2832 |
|
|
#endif |
2833 |
root |
1.44 |
#if EV_USE_KQUEUE |
2834 |
|
|
# include "ev_kqueue.c" |
2835 |
|
|
#endif |
2836 |
root |
1.493 |
#if EV_USE_EPOLL |
2837 |
|
|
# include "ev_epoll.c" |
2838 |
|
|
#endif |
2839 |
root |
1.490 |
#if EV_USE_LINUXAIO |
2840 |
|
|
# include "ev_linuxaio.c" |
2841 |
|
|
#endif |
2842 |
root |
1.501 |
#if EV_USE_IOURING |
2843 |
|
|
# include "ev_iouring.c" |
2844 |
|
|
#endif |
2845 |
root |
1.59 |
#if EV_USE_POLL |
2846 |
root |
1.41 |
# include "ev_poll.c" |
2847 |
|
|
#endif |
2848 |
root |
1.29 |
#if EV_USE_SELECT |
2849 |
root |
1.1 |
# include "ev_select.c" |
2850 |
|
|
#endif |
2851 |
|
|
|
2852 |
root |
1.480 |
ecb_cold int |
2853 |
root |
1.486 |
ev_version_major (void) EV_NOEXCEPT |
2854 |
root |
1.24 |
{ |
2855 |
|
|
return EV_VERSION_MAJOR; |
2856 |
|
|
} |
2857 |
|
|
|
2858 |
root |
1.480 |
ecb_cold int |
2859 |
root |
1.486 |
ev_version_minor (void) EV_NOEXCEPT |
2860 |
root |
1.24 |
{ |
2861 |
|
|
return EV_VERSION_MINOR; |
2862 |
|
|
} |
2863 |
|
|
|
2864 |
root |
1.49 |
/* return true if we are running with elevated privileges and should ignore env variables */ |
2865 |
root |
1.480 |
inline_size ecb_cold int |
2866 |
root |
1.51 |
enable_secure (void) |
2867 |
root |
1.41 |
{ |
2868 |
root |
1.103 |
#ifdef _WIN32 |
2869 |
root |
1.49 |
return 0; |
2870 |
|
|
#else |
2871 |
root |
1.41 |
return getuid () != geteuid () |
2872 |
|
|
|| getgid () != getegid (); |
2873 |
root |
1.49 |
#endif |
2874 |
root |
1.41 |
} |
2875 |
|
|
|
2876 |
root |
1.480 |
ecb_cold |
2877 |
|
|
unsigned int |
2878 |
root |
1.486 |
ev_supported_backends (void) EV_NOEXCEPT |
2879 |
root |
1.129 |
{ |
2880 |
root |
1.130 |
unsigned int flags = 0; |
2881 |
root |
1.129 |
|
2882 |
root |
1.490 |
if (EV_USE_PORT ) flags |= EVBACKEND_PORT; |
2883 |
|
|
if (EV_USE_KQUEUE ) flags |= EVBACKEND_KQUEUE; |
2884 |
|
|
if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL; |
2885 |
|
|
if (EV_USE_LINUXAIO) flags |= EVBACKEND_LINUXAIO; |
2886 |
root |
1.501 |
if (EV_USE_IOURING ) flags |= EVBACKEND_IOURING; |
2887 |
root |
1.490 |
if (EV_USE_POLL ) flags |= EVBACKEND_POLL; |
2888 |
|
|
if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT; |
2889 |
root |
1.129 |
|
2890 |
|
|
return flags; |
2891 |
|
|
} |
2892 |
|
|
|
2893 |
root |
1.480 |
ecb_cold |
2894 |
|
|
unsigned int |
2895 |
root |
1.486 |
ev_recommended_backends (void) EV_NOEXCEPT |
2896 |
root |
1.1 |
{ |
2897 |
root |
1.131 |
unsigned int flags = ev_supported_backends (); |
2898 |
root |
1.129 |
|
2899 |
|
|
#ifndef __NetBSD__ |
2900 |
|
|
/* kqueue is borked on everything but netbsd apparently */ |
2901 |
|
|
/* it usually doesn't work correctly on anything but sockets and pipes */ |
2902 |
|
|
flags &= ~EVBACKEND_KQUEUE; |
2903 |
|
|
#endif |
2904 |
|
|
#ifdef __APPLE__ |
2905 |
root |
1.278 |
/* only select works correctly on that "unix-certified" platform */ |
2906 |
|
|
flags &= ~EVBACKEND_KQUEUE; /* horribly broken, even for sockets */ |
2907 |
|
|
flags &= ~EVBACKEND_POLL; /* poll is based on kqueue from 10.5 onwards */ |
2908 |
root |
1.129 |
#endif |
2909 |
root |
1.342 |
#ifdef __FreeBSD__ |
2910 |
|
|
flags &= ~EVBACKEND_POLL; /* poll return value is unusable (http://forums.freebsd.org/archive/index.php/t-10270.html) */ |
2911 |
|
|
#endif |
2912 |
root |
1.129 |
|
2913 |
root |
1.491 |
/* TODO: linuxaio is very experimental */ |
2914 |
root |
1.494 |
#if !EV_RECOMMEND_LINUXAIO |
2915 |
root |
1.491 |
flags &= ~EVBACKEND_LINUXAIO; |
2916 |
root |
1.494 |
#endif |
2917 |
root |
1.501 |
/* TODO: linuxaio is super experimental */ |
2918 |
|
|
#if !EV_RECOMMEND_IOURING |
2919 |
|
|
flags &= ~EVBACKEND_IOURING; |
2920 |
|
|
#endif |
2921 |
root |
1.491 |
|
2922 |
root |
1.129 |
return flags; |
2923 |
root |
1.51 |
} |
2924 |
|
|
|
2925 |
root |
1.480 |
ecb_cold |
2926 |
|
|
unsigned int |
2927 |
root |
1.486 |
ev_embeddable_backends (void) EV_NOEXCEPT |
2928 |
root |
1.134 |
{ |
2929 |
root |
1.196 |
int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT; |
2930 |
|
|
|
2931 |
root |
1.192 |
/* epoll embeddability broken on all linux versions up to at least 2.6.23 */ |
2932 |
root |
1.355 |
if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */ |
2933 |
|
|
flags &= ~EVBACKEND_EPOLL; |
2934 |
root |
1.196 |
|
2935 |
root |
1.502 |
/* EVBACKEND_LINUXAIO is theoretically embeddable, but suffers from a performance overhead */ |
2936 |
|
|
|
2937 |
|
|
/* EVBACKEND_IOURING is practically embeddable, but the current implementation is not |
2938 |
|
|
* because our backend_fd is the epoll fd we need as fallback. |
2939 |
|
|
* if the kernel ever is fixed, this might change... |
2940 |
|
|
*/ |
2941 |
|
|
|
2942 |
root |
1.196 |
return flags; |
2943 |
root |
1.134 |
} |
2944 |
|
|
|
2945 |
|
|
unsigned int |
2946 |
root |
1.486 |
ev_backend (EV_P) EV_NOEXCEPT |
2947 |
root |
1.130 |
{ |
2948 |
|
|
return backend; |
2949 |
|
|
} |
2950 |
|
|
|
2951 |
root |
1.338 |
#if EV_FEATURE_API |
2952 |
root |
1.162 |
unsigned int |
2953 |
root |
1.486 |
ev_iteration (EV_P) EV_NOEXCEPT |
2954 |
root |
1.162 |
{ |
2955 |
|
|
return loop_count; |
2956 |
|
|
} |
2957 |
|
|
|
2958 |
root |
1.294 |
unsigned int |
2959 |
root |
1.486 |
ev_depth (EV_P) EV_NOEXCEPT |
2960 |
root |
1.294 |
{ |
2961 |
|
|
return loop_depth; |
2962 |
|
|
} |
2963 |
|
|
|
2964 |
root |
1.193 |
void |
2965 |
root |
1.486 |
ev_set_io_collect_interval (EV_P_ ev_tstamp interval) EV_NOEXCEPT |
2966 |
root |
1.193 |
{ |
2967 |
|
|
io_blocktime = interval; |
2968 |
|
|
} |
2969 |
|
|
|
2970 |
|
|
void |
2971 |
root |
1.486 |
ev_set_timeout_collect_interval (EV_P_ ev_tstamp interval) EV_NOEXCEPT |
2972 |
root |
1.193 |
{ |
2973 |
|
|
timeout_blocktime = interval; |
2974 |
|
|
} |
2975 |
|
|
|
2976 |
root |
1.297 |
void |
2977 |
root |
1.486 |
ev_set_userdata (EV_P_ void *data) EV_NOEXCEPT |
2978 |
root |
1.297 |
{ |
2979 |
|
|
userdata = data; |
2980 |
|
|
} |
2981 |
|
|
|
2982 |
|
|
void * |
2983 |
root |
1.486 |
ev_userdata (EV_P) EV_NOEXCEPT |
2984 |
root |
1.297 |
{ |
2985 |
|
|
return userdata; |
2986 |
|
|
} |
2987 |
|
|
|
2988 |
root |
1.379 |
void |
2989 |
root |
1.486 |
ev_set_invoke_pending_cb (EV_P_ ev_loop_callback invoke_pending_cb) EV_NOEXCEPT |
2990 |
root |
1.297 |
{ |
2991 |
|
|
invoke_cb = invoke_pending_cb; |
2992 |
|
|
} |
2993 |
|
|
|
2994 |
root |
1.379 |
void |
2995 |
root |
1.486 |
ev_set_loop_release_cb (EV_P_ void (*release)(EV_P) EV_NOEXCEPT, void (*acquire)(EV_P) EV_NOEXCEPT) EV_NOEXCEPT |
2996 |
root |
1.297 |
{ |
2997 |
root |
1.298 |
release_cb = release; |
2998 |
|
|
acquire_cb = acquire; |
2999 |
root |
1.297 |
} |
3000 |
|
|
#endif |
3001 |
|
|
|
3002 |
root |
1.288 |
/* initialise a loop structure, must be zero-initialised */ |
3003 |
root |
1.500 |
ecb_noinline ecb_cold |
3004 |
root |
1.480 |
static void |
3005 |
root |
1.486 |
loop_init (EV_P_ unsigned int flags) EV_NOEXCEPT |
3006 |
root |
1.51 |
{ |
3007 |
root |
1.130 |
if (!backend) |
3008 |
root |
1.23 |
{ |
3009 |
root |
1.366 |
origflags = flags; |
3010 |
|
|
|
3011 |
root |
1.279 |
#if EV_USE_REALTIME |
3012 |
|
|
if (!have_realtime) |
3013 |
|
|
{ |
3014 |
|
|
struct timespec ts; |
3015 |
|
|
|
3016 |
|
|
if (!clock_gettime (CLOCK_REALTIME, &ts)) |
3017 |
|
|
have_realtime = 1; |
3018 |
|
|
} |
3019 |
|
|
#endif |
3020 |
|
|
|
3021 |
root |
1.29 |
#if EV_USE_MONOTONIC |
3022 |
root |
1.279 |
if (!have_monotonic) |
3023 |
|
|
{ |
3024 |
|
|
struct timespec ts; |
3025 |
|
|
|
3026 |
|
|
if (!clock_gettime (CLOCK_MONOTONIC, &ts)) |
3027 |
|
|
have_monotonic = 1; |
3028 |
|
|
} |
3029 |
root |
1.1 |
#endif |
3030 |
|
|
|
3031 |
root |
1.306 |
/* pid check not overridable via env */ |
3032 |
|
|
#ifndef _WIN32 |
3033 |
|
|
if (flags & EVFLAG_FORKCHECK) |
3034 |
|
|
curpid = getpid (); |
3035 |
|
|
#endif |
3036 |
|
|
|
3037 |
|
|
if (!(flags & EVFLAG_NOENV) |
3038 |
|
|
&& !enable_secure () |
3039 |
|
|
&& getenv ("LIBEV_FLAGS")) |
3040 |
|
|
flags = atoi (getenv ("LIBEV_FLAGS")); |
3041 |
|
|
|
3042 |
root |
1.378 |
ev_rt_now = ev_time (); |
3043 |
|
|
mn_now = get_clock (); |
3044 |
|
|
now_floor = mn_now; |
3045 |
|
|
rtmn_diff = ev_rt_now - mn_now; |
3046 |
root |
1.338 |
#if EV_FEATURE_API |
3047 |
root |
1.378 |
invoke_cb = ev_invoke_pending; |
3048 |
root |
1.297 |
#endif |
3049 |
root |
1.1 |
|
3050 |
root |
1.378 |
io_blocktime = 0.; |
3051 |
|
|
timeout_blocktime = 0.; |
3052 |
|
|
backend = 0; |
3053 |
|
|
backend_fd = -1; |
3054 |
|
|
sig_pending = 0; |
3055 |
root |
1.307 |
#if EV_ASYNC_ENABLE |
3056 |
root |
1.378 |
async_pending = 0; |
3057 |
root |
1.307 |
#endif |
3058 |
root |
1.378 |
pipe_write_skipped = 0; |
3059 |
|
|
pipe_write_wanted = 0; |
3060 |
root |
1.448 |
evpipe [0] = -1; |
3061 |
|
|
evpipe [1] = -1; |
3062 |
root |
1.209 |
#if EV_USE_INOTIFY |
3063 |
root |
1.378 |
fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2; |
3064 |
root |
1.209 |
#endif |
3065 |
root |
1.303 |
#if EV_USE_SIGNALFD |
3066 |
root |
1.378 |
sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1; |
3067 |
root |
1.303 |
#endif |
3068 |
root |
1.193 |
|
3069 |
root |
1.366 |
if (!(flags & EVBACKEND_MASK)) |
3070 |
root |
1.129 |
flags |= ev_recommended_backends (); |
3071 |
root |
1.41 |
|
3072 |
root |
1.357 |
#if EV_USE_IOCP |
3073 |
root |
1.490 |
if (!backend && (flags & EVBACKEND_IOCP )) backend = iocp_init (EV_A_ flags); |
3074 |
root |
1.357 |
#endif |
3075 |
root |
1.118 |
#if EV_USE_PORT |
3076 |
root |
1.490 |
if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags); |
3077 |
root |
1.118 |
#endif |
3078 |
root |
1.44 |
#if EV_USE_KQUEUE |
3079 |
root |
1.490 |
if (!backend && (flags & EVBACKEND_KQUEUE )) backend = kqueue_init (EV_A_ flags); |
3080 |
|
|
#endif |
3081 |
root |
1.501 |
#if EV_USE_IOURING |
3082 |
|
|
if (!backend && (flags & EVBACKEND_IOURING )) backend = iouring_init (EV_A_ flags); |
3083 |
|
|
#endif |
3084 |
root |
1.490 |
#if EV_USE_LINUXAIO |
3085 |
|
|
if (!backend && (flags & EVBACKEND_LINUXAIO)) backend = linuxaio_init (EV_A_ flags); |
3086 |
root |
1.44 |
#endif |
3087 |
root |
1.29 |
#if EV_USE_EPOLL |
3088 |
root |
1.490 |
if (!backend && (flags & EVBACKEND_EPOLL )) backend = epoll_init (EV_A_ flags); |
3089 |
root |
1.41 |
#endif |
3090 |
root |
1.59 |
#if EV_USE_POLL |
3091 |
root |
1.490 |
if (!backend && (flags & EVBACKEND_POLL )) backend = poll_init (EV_A_ flags); |
3092 |
root |
1.1 |
#endif |
3093 |
root |
1.29 |
#if EV_USE_SELECT |
3094 |
root |
1.490 |
if (!backend && (flags & EVBACKEND_SELECT )) backend = select_init (EV_A_ flags); |
3095 |
root |
1.1 |
#endif |
3096 |
root |
1.70 |
|
3097 |
root |
1.288 |
ev_prepare_init (&pending_w, pendingcb); |
3098 |
|
|
|
3099 |
root |
1.336 |
#if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE |
3100 |
root |
1.288 |
ev_init (&pipe_w, pipecb); |
3101 |
|
|
ev_set_priority (&pipe_w, EV_MAXPRI); |
3102 |
root |
1.336 |
#endif |
3103 |
root |
1.56 |
} |
3104 |
|
|
} |
3105 |
|
|
|
3106 |
root |
1.288 |
/* free up a loop structure */ |
3107 |
root |
1.480 |
ecb_cold |
3108 |
|
|
void |
3109 |
root |
1.422 |
ev_loop_destroy (EV_P) |
3110 |
root |
1.56 |
{ |
3111 |
root |
1.65 |
int i; |
3112 |
|
|
|
3113 |
root |
1.364 |
#if EV_MULTIPLICITY |
3114 |
root |
1.363 |
/* mimic free (0) */ |
3115 |
|
|
if (!EV_A) |
3116 |
|
|
return; |
3117 |
root |
1.364 |
#endif |
3118 |
root |
1.363 |
|
3119 |
root |
1.361 |
#if EV_CLEANUP_ENABLE |
3120 |
|
|
/* queue cleanup watchers (and execute them) */ |
3121 |
root |
1.500 |
if (ecb_expect_false (cleanupcnt)) |
3122 |
root |
1.361 |
{ |
3123 |
|
|
queue_events (EV_A_ (W *)cleanups, cleanupcnt, EV_CLEANUP); |
3124 |
|
|
EV_INVOKE_PENDING; |
3125 |
|
|
} |
3126 |
|
|
#endif |
3127 |
|
|
|
3128 |
root |
1.359 |
#if EV_CHILD_ENABLE |
3129 |
root |
1.433 |
if (ev_is_default_loop (EV_A) && ev_is_active (&childev)) |
3130 |
root |
1.359 |
{ |
3131 |
|
|
ev_ref (EV_A); /* child watcher */ |
3132 |
|
|
ev_signal_stop (EV_A_ &childev); |
3133 |
|
|
} |
3134 |
|
|
#endif |
3135 |
|
|
|
3136 |
root |
1.288 |
if (ev_is_active (&pipe_w)) |
3137 |
root |
1.207 |
{ |
3138 |
root |
1.303 |
/*ev_ref (EV_A);*/ |
3139 |
|
|
/*ev_io_stop (EV_A_ &pipe_w);*/ |
3140 |
root |
1.207 |
|
3141 |
root |
1.448 |
if (evpipe [0] >= 0) EV_WIN32_CLOSE_FD (evpipe [0]); |
3142 |
|
|
if (evpipe [1] >= 0) EV_WIN32_CLOSE_FD (evpipe [1]); |
3143 |
root |
1.207 |
} |
3144 |
|
|
|
3145 |
root |
1.303 |
#if EV_USE_SIGNALFD |
3146 |
|
|
if (ev_is_active (&sigfd_w)) |
3147 |
root |
1.317 |
close (sigfd); |
3148 |
root |
1.303 |
#endif |
3149 |
|
|
|
3150 |
root |
1.152 |
#if EV_USE_INOTIFY |
3151 |
|
|
if (fs_fd >= 0) |
3152 |
|
|
close (fs_fd); |
3153 |
|
|
#endif |
3154 |
|
|
|
3155 |
|
|
if (backend_fd >= 0) |
3156 |
|
|
close (backend_fd); |
3157 |
|
|
|
3158 |
root |
1.357 |
#if EV_USE_IOCP |
3159 |
root |
1.490 |
if (backend == EVBACKEND_IOCP ) iocp_destroy (EV_A); |
3160 |
root |
1.357 |
#endif |
3161 |
root |
1.118 |
#if EV_USE_PORT |
3162 |
root |
1.490 |
if (backend == EVBACKEND_PORT ) port_destroy (EV_A); |
3163 |
root |
1.118 |
#endif |
3164 |
root |
1.56 |
#if EV_USE_KQUEUE |
3165 |
root |
1.490 |
if (backend == EVBACKEND_KQUEUE ) kqueue_destroy (EV_A); |
3166 |
|
|
#endif |
3167 |
root |
1.501 |
#if EV_USE_IOURING |
3168 |
|
|
if (backend == EVBACKEND_IOURING ) iouring_destroy (EV_A); |
3169 |
|
|
#endif |
3170 |
root |
1.490 |
#if EV_USE_LINUXAIO |
3171 |
|
|
if (backend == EVBACKEND_LINUXAIO) linuxaio_destroy (EV_A); |
3172 |
root |
1.56 |
#endif |
3173 |
|
|
#if EV_USE_EPOLL |
3174 |
root |
1.490 |
if (backend == EVBACKEND_EPOLL ) epoll_destroy (EV_A); |
3175 |
root |
1.56 |
#endif |
3176 |
root |
1.59 |
#if EV_USE_POLL |
3177 |
root |
1.490 |
if (backend == EVBACKEND_POLL ) poll_destroy (EV_A); |
3178 |
root |
1.56 |
#endif |
3179 |
|
|
#if EV_USE_SELECT |
3180 |
root |
1.490 |
if (backend == EVBACKEND_SELECT ) select_destroy (EV_A); |
3181 |
root |
1.56 |
#endif |
3182 |
root |
1.1 |
|
3183 |
root |
1.65 |
for (i = NUMPRI; i--; ) |
3184 |
root |
1.164 |
{ |
3185 |
|
|
array_free (pending, [i]); |
3186 |
|
|
#if EV_IDLE_ENABLE |
3187 |
|
|
array_free (idle, [i]); |
3188 |
|
|
#endif |
3189 |
|
|
} |
3190 |
root |
1.65 |
|
3191 |
root |
1.305 |
ev_free (anfds); anfds = 0; anfdmax = 0; |
3192 |
root |
1.186 |
|
3193 |
root |
1.71 |
/* have to use the microsoft-never-gets-it-right macro */ |
3194 |
root |
1.284 |
array_free (rfeed, EMPTY); |
3195 |
root |
1.164 |
array_free (fdchange, EMPTY); |
3196 |
|
|
array_free (timer, EMPTY); |
3197 |
root |
1.140 |
#if EV_PERIODIC_ENABLE |
3198 |
root |
1.164 |
array_free (periodic, EMPTY); |
3199 |
root |
1.93 |
#endif |
3200 |
root |
1.187 |
#if EV_FORK_ENABLE |
3201 |
|
|
array_free (fork, EMPTY); |
3202 |
|
|
#endif |
3203 |
root |
1.360 |
#if EV_CLEANUP_ENABLE |
3204 |
|
|
array_free (cleanup, EMPTY); |
3205 |
|
|
#endif |
3206 |
root |
1.164 |
array_free (prepare, EMPTY); |
3207 |
|
|
array_free (check, EMPTY); |
3208 |
root |
1.209 |
#if EV_ASYNC_ENABLE |
3209 |
|
|
array_free (async, EMPTY); |
3210 |
|
|
#endif |
3211 |
root |
1.65 |
|
3212 |
root |
1.130 |
backend = 0; |
3213 |
root |
1.359 |
|
3214 |
|
|
#if EV_MULTIPLICITY |
3215 |
|
|
if (ev_is_default_loop (EV_A)) |
3216 |
|
|
#endif |
3217 |
|
|
ev_default_loop_ptr = 0; |
3218 |
|
|
#if EV_MULTIPLICITY |
3219 |
|
|
else |
3220 |
|
|
ev_free (EV_A); |
3221 |
|
|
#endif |
3222 |
root |
1.56 |
} |
3223 |
root |
1.22 |
|
3224 |
root |
1.226 |
#if EV_USE_INOTIFY |
3225 |
root |
1.284 |
inline_size void infy_fork (EV_P); |
3226 |
root |
1.226 |
#endif |
3227 |
root |
1.154 |
|
3228 |
root |
1.284 |
inline_size void |
3229 |
root |
1.56 |
loop_fork (EV_P) |
3230 |
|
|
{ |
3231 |
root |
1.118 |
#if EV_USE_PORT |
3232 |
root |
1.490 |
if (backend == EVBACKEND_PORT ) port_fork (EV_A); |
3233 |
root |
1.56 |
#endif |
3234 |
|
|
#if EV_USE_KQUEUE |
3235 |
root |
1.490 |
if (backend == EVBACKEND_KQUEUE ) kqueue_fork (EV_A); |
3236 |
|
|
#endif |
3237 |
root |
1.501 |
#if EV_USE_IOURING |
3238 |
|
|
if (backend == EVBACKEND_IOURING ) iouring_fork (EV_A); |
3239 |
|
|
#endif |
3240 |
root |
1.490 |
#if EV_USE_LINUXAIO |
3241 |
|
|
if (backend == EVBACKEND_LINUXAIO) linuxaio_fork (EV_A); |
3242 |
root |
1.45 |
#endif |
3243 |
root |
1.118 |
#if EV_USE_EPOLL |
3244 |
root |
1.490 |
if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A); |
3245 |
root |
1.118 |
#endif |
3246 |
root |
1.154 |
#if EV_USE_INOTIFY |
3247 |
|
|
infy_fork (EV_A); |
3248 |
|
|
#endif |
3249 |
root |
1.70 |
|
3250 |
root |
1.448 |
#if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE |
3251 |
root |
1.478 |
if (ev_is_active (&pipe_w) && postfork != 2) |
3252 |
root |
1.70 |
{ |
3253 |
root |
1.378 |
/* pipe_write_wanted must be false now, so modifying fd vars should be safe */ |
3254 |
root |
1.70 |
|
3255 |
|
|
ev_ref (EV_A); |
3256 |
root |
1.288 |
ev_io_stop (EV_A_ &pipe_w); |
3257 |
root |
1.220 |
|
3258 |
|
|
if (evpipe [0] >= 0) |
3259 |
root |
1.448 |
EV_WIN32_CLOSE_FD (evpipe [0]); |
3260 |
root |
1.207 |
|
3261 |
|
|
evpipe_init (EV_A); |
3262 |
root |
1.443 |
/* iterate over everything, in case we missed something before */ |
3263 |
|
|
ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM); |
3264 |
root |
1.448 |
} |
3265 |
root |
1.337 |
#endif |
3266 |
root |
1.70 |
|
3267 |
|
|
postfork = 0; |
3268 |
root |
1.1 |
} |
3269 |
|
|
|
|