1 |
root |
1.17 |
/* |
2 |
root |
1.36 |
* libev event processing core, watcher management |
3 |
|
|
* |
4 |
root |
1.326 |
* Copyright (c) 2007,2008,2009,2010 Marc Alexander Lehmann <libev@schmorp.de> |
5 |
root |
1.17 |
* All rights reserved. |
6 |
|
|
* |
7 |
root |
1.199 |
* Redistribution and use in source and binary forms, with or without modifica- |
8 |
|
|
* tion, are permitted provided that the following conditions are met: |
9 |
|
|
* |
10 |
|
|
* 1. Redistributions of source code must retain the above copyright notice, |
11 |
|
|
* this list of conditions and the following disclaimer. |
12 |
|
|
* |
13 |
|
|
* 2. Redistributions in binary form must reproduce the above copyright |
14 |
|
|
* notice, this list of conditions and the following disclaimer in the |
15 |
|
|
* documentation and/or other materials provided with the distribution. |
16 |
|
|
* |
17 |
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
18 |
|
|
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- |
19 |
|
|
* CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO |
20 |
|
|
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- |
21 |
|
|
* CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
22 |
|
|
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; |
23 |
|
|
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
24 |
|
|
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- |
25 |
|
|
* ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED |
26 |
|
|
* OF THE POSSIBILITY OF SUCH DAMAGE. |
27 |
root |
1.17 |
* |
28 |
root |
1.199 |
* Alternatively, the contents of this file may be used under the terms of |
29 |
|
|
* the GNU General Public License ("GPL") version 2 or any later version, |
30 |
|
|
* in which case the provisions of the GPL are applicable instead of |
31 |
|
|
* the above. If you wish to allow the use of your version of this file |
32 |
|
|
* only under the terms of the GPL and not to allow others to use your |
33 |
|
|
* version of this file under the BSD license, indicate your decision |
34 |
|
|
* by deleting the provisions above and replace them with the notice |
35 |
|
|
* and other provisions required by the GPL. If you do not delete the |
36 |
|
|
* provisions above, a recipient may use your version of this file under |
37 |
|
|
* either the BSD or the GPL. |
38 |
root |
1.17 |
*/ |
39 |
root |
1.87 |
|
40 |
|
|
#ifdef __cplusplus |
41 |
|
|
extern "C" { |
42 |
|
|
#endif |
43 |
|
|
|
44 |
root |
1.220 |
/* this big block deduces configuration from config.h */ |
45 |
root |
1.59 |
#ifndef EV_STANDALONE |
46 |
root |
1.133 |
# ifdef EV_CONFIG_H |
47 |
|
|
# include EV_CONFIG_H |
48 |
|
|
# else |
49 |
|
|
# include "config.h" |
50 |
|
|
# endif |
51 |
root |
1.60 |
|
52 |
root |
1.274 |
# if HAVE_CLOCK_SYSCALL |
53 |
|
|
# ifndef EV_USE_CLOCK_SYSCALL |
54 |
|
|
# define EV_USE_CLOCK_SYSCALL 1 |
55 |
|
|
# ifndef EV_USE_REALTIME |
56 |
|
|
# define EV_USE_REALTIME 0 |
57 |
|
|
# endif |
58 |
|
|
# ifndef EV_USE_MONOTONIC |
59 |
|
|
# define EV_USE_MONOTONIC 1 |
60 |
|
|
# endif |
61 |
|
|
# endif |
62 |
root |
1.290 |
# elif !defined(EV_USE_CLOCK_SYSCALL) |
63 |
|
|
# define EV_USE_CLOCK_SYSCALL 0 |
64 |
root |
1.274 |
# endif |
65 |
|
|
|
66 |
root |
1.60 |
# if HAVE_CLOCK_GETTIME |
67 |
root |
1.97 |
# ifndef EV_USE_MONOTONIC |
68 |
|
|
# define EV_USE_MONOTONIC 1 |
69 |
|
|
# endif |
70 |
|
|
# ifndef EV_USE_REALTIME |
71 |
root |
1.279 |
# define EV_USE_REALTIME 0 |
72 |
root |
1.97 |
# endif |
73 |
root |
1.126 |
# else |
74 |
|
|
# ifndef EV_USE_MONOTONIC |
75 |
|
|
# define EV_USE_MONOTONIC 0 |
76 |
|
|
# endif |
77 |
|
|
# ifndef EV_USE_REALTIME |
78 |
|
|
# define EV_USE_REALTIME 0 |
79 |
|
|
# endif |
80 |
root |
1.60 |
# endif |
81 |
|
|
|
82 |
root |
1.193 |
# ifndef EV_USE_NANOSLEEP |
83 |
|
|
# if HAVE_NANOSLEEP |
84 |
|
|
# define EV_USE_NANOSLEEP 1 |
85 |
|
|
# else |
86 |
|
|
# define EV_USE_NANOSLEEP 0 |
87 |
|
|
# endif |
88 |
|
|
# endif |
89 |
|
|
|
90 |
root |
1.127 |
# ifndef EV_USE_SELECT |
91 |
|
|
# if HAVE_SELECT && HAVE_SYS_SELECT_H |
92 |
|
|
# define EV_USE_SELECT 1 |
93 |
|
|
# else |
94 |
|
|
# define EV_USE_SELECT 0 |
95 |
|
|
# endif |
96 |
root |
1.60 |
# endif |
97 |
|
|
|
98 |
root |
1.127 |
# ifndef EV_USE_POLL |
99 |
|
|
# if HAVE_POLL && HAVE_POLL_H |
100 |
|
|
# define EV_USE_POLL 1 |
101 |
|
|
# else |
102 |
|
|
# define EV_USE_POLL 0 |
103 |
|
|
# endif |
104 |
root |
1.60 |
# endif |
105 |
root |
1.127 |
|
106 |
|
|
# ifndef EV_USE_EPOLL |
107 |
|
|
# if HAVE_EPOLL_CTL && HAVE_SYS_EPOLL_H |
108 |
|
|
# define EV_USE_EPOLL 1 |
109 |
|
|
# else |
110 |
|
|
# define EV_USE_EPOLL 0 |
111 |
|
|
# endif |
112 |
root |
1.60 |
# endif |
113 |
root |
1.127 |
|
114 |
|
|
# ifndef EV_USE_KQUEUE |
115 |
root |
1.323 |
# if HAVE_KQUEUE && HAVE_SYS_EVENT_H |
116 |
root |
1.127 |
# define EV_USE_KQUEUE 1 |
117 |
|
|
# else |
118 |
|
|
# define EV_USE_KQUEUE 0 |
119 |
|
|
# endif |
120 |
root |
1.60 |
# endif |
121 |
root |
1.127 |
|
122 |
|
|
# ifndef EV_USE_PORT |
123 |
|
|
# if HAVE_PORT_H && HAVE_PORT_CREATE |
124 |
|
|
# define EV_USE_PORT 1 |
125 |
|
|
# else |
126 |
|
|
# define EV_USE_PORT 0 |
127 |
|
|
# endif |
128 |
root |
1.118 |
# endif |
129 |
|
|
|
130 |
root |
1.152 |
# ifndef EV_USE_INOTIFY |
131 |
|
|
# if HAVE_INOTIFY_INIT && HAVE_SYS_INOTIFY_H |
132 |
|
|
# define EV_USE_INOTIFY 1 |
133 |
|
|
# else |
134 |
|
|
# define EV_USE_INOTIFY 0 |
135 |
|
|
# endif |
136 |
|
|
# endif |
137 |
|
|
|
138 |
root |
1.303 |
# ifndef EV_USE_SIGNALFD |
139 |
|
|
# if HAVE_SIGNALFD && HAVE_SYS_SIGNALFD_H |
140 |
|
|
# define EV_USE_SIGNALFD 1 |
141 |
|
|
# else |
142 |
|
|
# define EV_USE_SIGNALFD 0 |
143 |
|
|
# endif |
144 |
|
|
# endif |
145 |
|
|
|
146 |
root |
1.220 |
# ifndef EV_USE_EVENTFD |
147 |
|
|
# if HAVE_EVENTFD |
148 |
|
|
# define EV_USE_EVENTFD 1 |
149 |
|
|
# else |
150 |
|
|
# define EV_USE_EVENTFD 0 |
151 |
|
|
# endif |
152 |
|
|
# endif |
153 |
root |
1.250 |
|
154 |
root |
1.29 |
#endif |
155 |
root |
1.17 |
|
156 |
root |
1.1 |
#include <math.h> |
157 |
|
|
#include <stdlib.h> |
158 |
root |
1.319 |
#include <string.h> |
159 |
root |
1.7 |
#include <fcntl.h> |
160 |
root |
1.16 |
#include <stddef.h> |
161 |
root |
1.1 |
|
162 |
|
|
#include <stdio.h> |
163 |
|
|
|
164 |
root |
1.4 |
#include <assert.h> |
165 |
root |
1.1 |
#include <errno.h> |
166 |
root |
1.22 |
#include <sys/types.h> |
167 |
root |
1.71 |
#include <time.h> |
168 |
root |
1.326 |
#include <limits.h> |
169 |
root |
1.71 |
|
170 |
root |
1.72 |
#include <signal.h> |
171 |
root |
1.71 |
|
172 |
root |
1.152 |
#ifdef EV_H |
173 |
|
|
# include EV_H |
174 |
|
|
#else |
175 |
|
|
# include "ev.h" |
176 |
|
|
#endif |
177 |
|
|
|
178 |
root |
1.103 |
#ifndef _WIN32 |
179 |
root |
1.71 |
# include <sys/time.h> |
180 |
root |
1.45 |
# include <sys/wait.h> |
181 |
root |
1.140 |
# include <unistd.h> |
182 |
root |
1.103 |
#else |
183 |
root |
1.256 |
# include <io.h> |
184 |
root |
1.103 |
# define WIN32_LEAN_AND_MEAN |
185 |
|
|
# include <windows.h> |
186 |
|
|
# ifndef EV_SELECT_IS_WINSOCKET |
187 |
|
|
# define EV_SELECT_IS_WINSOCKET 1 |
188 |
|
|
# endif |
189 |
root |
1.45 |
#endif |
190 |
root |
1.103 |
|
191 |
root |
1.220 |
/* this block tries to deduce configuration from header-defined symbols and defaults */ |
192 |
root |
1.40 |
|
193 |
root |
1.305 |
/* try to deduce the maximum number of signals on this platform */ |
194 |
|
|
#if defined (EV_NSIG) |
195 |
|
|
/* use what's provided */ |
196 |
|
|
#elif defined (NSIG) |
197 |
|
|
# define EV_NSIG (NSIG) |
198 |
|
|
#elif defined(_NSIG) |
199 |
|
|
# define EV_NSIG (_NSIG) |
200 |
|
|
#elif defined (SIGMAX) |
201 |
|
|
# define EV_NSIG (SIGMAX+1) |
202 |
|
|
#elif defined (SIG_MAX) |
203 |
|
|
# define EV_NSIG (SIG_MAX+1) |
204 |
|
|
#elif defined (_SIG_MAX) |
205 |
|
|
# define EV_NSIG (_SIG_MAX+1) |
206 |
|
|
#elif defined (MAXSIG) |
207 |
|
|
# define EV_NSIG (MAXSIG+1) |
208 |
|
|
#elif defined (MAX_SIG) |
209 |
|
|
# define EV_NSIG (MAX_SIG+1) |
210 |
|
|
#elif defined (SIGARRAYSIZE) |
211 |
|
|
# define EV_NSIG SIGARRAYSIZE /* Assume ary[SIGARRAYSIZE] */ |
212 |
|
|
#elif defined (_sys_nsig) |
213 |
|
|
# define EV_NSIG (_sys_nsig) /* Solaris 2.5 */ |
214 |
|
|
#else |
215 |
|
|
# error "unable to find value for NSIG, please report" |
216 |
|
|
/* to make it compile regardless, just remove the above line */ |
217 |
root |
1.306 |
# define EV_NSIG 65 |
218 |
root |
1.305 |
#endif |
219 |
|
|
|
220 |
root |
1.274 |
#ifndef EV_USE_CLOCK_SYSCALL |
221 |
|
|
# if __linux && __GLIBC__ >= 2 |
222 |
|
|
# define EV_USE_CLOCK_SYSCALL 1 |
223 |
|
|
# else |
224 |
|
|
# define EV_USE_CLOCK_SYSCALL 0 |
225 |
|
|
# endif |
226 |
|
|
#endif |
227 |
|
|
|
228 |
root |
1.29 |
#ifndef EV_USE_MONOTONIC |
229 |
root |
1.253 |
# if defined (_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0 |
230 |
|
|
# define EV_USE_MONOTONIC 1 |
231 |
|
|
# else |
232 |
|
|
# define EV_USE_MONOTONIC 0 |
233 |
|
|
# endif |
234 |
root |
1.37 |
#endif |
235 |
|
|
|
236 |
root |
1.118 |
#ifndef EV_USE_REALTIME |
237 |
root |
1.279 |
# define EV_USE_REALTIME !EV_USE_CLOCK_SYSCALL |
238 |
root |
1.118 |
#endif |
239 |
|
|
|
240 |
root |
1.193 |
#ifndef EV_USE_NANOSLEEP |
241 |
root |
1.253 |
# if _POSIX_C_SOURCE >= 199309L |
242 |
|
|
# define EV_USE_NANOSLEEP 1 |
243 |
|
|
# else |
244 |
|
|
# define EV_USE_NANOSLEEP 0 |
245 |
|
|
# endif |
246 |
root |
1.193 |
#endif |
247 |
|
|
|
248 |
root |
1.29 |
#ifndef EV_USE_SELECT |
249 |
|
|
# define EV_USE_SELECT 1 |
250 |
root |
1.10 |
#endif |
251 |
|
|
|
252 |
root |
1.59 |
#ifndef EV_USE_POLL |
253 |
root |
1.104 |
# ifdef _WIN32 |
254 |
|
|
# define EV_USE_POLL 0 |
255 |
|
|
# else |
256 |
|
|
# define EV_USE_POLL 1 |
257 |
|
|
# endif |
258 |
root |
1.41 |
#endif |
259 |
|
|
|
260 |
root |
1.29 |
#ifndef EV_USE_EPOLL |
261 |
root |
1.220 |
# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4)) |
262 |
|
|
# define EV_USE_EPOLL 1 |
263 |
|
|
# else |
264 |
|
|
# define EV_USE_EPOLL 0 |
265 |
|
|
# endif |
266 |
root |
1.10 |
#endif |
267 |
|
|
|
268 |
root |
1.44 |
#ifndef EV_USE_KQUEUE |
269 |
|
|
# define EV_USE_KQUEUE 0 |
270 |
|
|
#endif |
271 |
|
|
|
272 |
root |
1.118 |
#ifndef EV_USE_PORT |
273 |
|
|
# define EV_USE_PORT 0 |
274 |
root |
1.40 |
#endif |
275 |
|
|
|
276 |
root |
1.152 |
#ifndef EV_USE_INOTIFY |
277 |
root |
1.220 |
# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4)) |
278 |
|
|
# define EV_USE_INOTIFY 1 |
279 |
|
|
# else |
280 |
|
|
# define EV_USE_INOTIFY 0 |
281 |
|
|
# endif |
282 |
root |
1.152 |
#endif |
283 |
|
|
|
284 |
root |
1.149 |
#ifndef EV_PID_HASHSIZE |
285 |
|
|
# if EV_MINIMAL |
286 |
|
|
# define EV_PID_HASHSIZE 1 |
287 |
|
|
# else |
288 |
|
|
# define EV_PID_HASHSIZE 16 |
289 |
|
|
# endif |
290 |
|
|
#endif |
291 |
|
|
|
292 |
root |
1.152 |
#ifndef EV_INOTIFY_HASHSIZE |
293 |
|
|
# if EV_MINIMAL |
294 |
|
|
# define EV_INOTIFY_HASHSIZE 1 |
295 |
|
|
# else |
296 |
|
|
# define EV_INOTIFY_HASHSIZE 16 |
297 |
|
|
# endif |
298 |
|
|
#endif |
299 |
|
|
|
300 |
root |
1.220 |
#ifndef EV_USE_EVENTFD |
301 |
|
|
# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7)) |
302 |
|
|
# define EV_USE_EVENTFD 1 |
303 |
|
|
# else |
304 |
|
|
# define EV_USE_EVENTFD 0 |
305 |
|
|
# endif |
306 |
|
|
#endif |
307 |
|
|
|
308 |
root |
1.303 |
#ifndef EV_USE_SIGNALFD |
309 |
root |
1.314 |
# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7)) |
310 |
root |
1.303 |
# define EV_USE_SIGNALFD 1 |
311 |
|
|
# else |
312 |
|
|
# define EV_USE_SIGNALFD 0 |
313 |
|
|
# endif |
314 |
|
|
#endif |
315 |
|
|
|
316 |
root |
1.249 |
#if 0 /* debugging */ |
317 |
root |
1.250 |
# define EV_VERIFY 3 |
318 |
root |
1.249 |
# define EV_USE_4HEAP 1 |
319 |
|
|
# define EV_HEAP_CACHE_AT 1 |
320 |
|
|
#endif |
321 |
|
|
|
322 |
root |
1.250 |
#ifndef EV_VERIFY |
323 |
|
|
# define EV_VERIFY !EV_MINIMAL |
324 |
|
|
#endif |
325 |
|
|
|
326 |
root |
1.243 |
#ifndef EV_USE_4HEAP |
327 |
|
|
# define EV_USE_4HEAP !EV_MINIMAL |
328 |
|
|
#endif |
329 |
|
|
|
330 |
|
|
#ifndef EV_HEAP_CACHE_AT |
331 |
|
|
# define EV_HEAP_CACHE_AT !EV_MINIMAL |
332 |
|
|
#endif |
333 |
|
|
|
334 |
root |
1.291 |
/* on linux, we can use a (slow) syscall to avoid a dependency on pthread, */ |
335 |
|
|
/* which makes programs even slower. might work on other unices, too. */ |
336 |
|
|
#if EV_USE_CLOCK_SYSCALL |
337 |
|
|
# include <syscall.h> |
338 |
|
|
# ifdef SYS_clock_gettime |
339 |
|
|
# define clock_gettime(id, ts) syscall (SYS_clock_gettime, (id), (ts)) |
340 |
|
|
# undef EV_USE_MONOTONIC |
341 |
|
|
# define EV_USE_MONOTONIC 1 |
342 |
|
|
# else |
343 |
|
|
# undef EV_USE_CLOCK_SYSCALL |
344 |
|
|
# define EV_USE_CLOCK_SYSCALL 0 |
345 |
|
|
# endif |
346 |
|
|
#endif |
347 |
|
|
|
348 |
root |
1.220 |
/* this block fixes any misconfiguration where we know we run into trouble otherwise */ |
349 |
root |
1.40 |
|
350 |
root |
1.325 |
#ifdef _AIX |
351 |
|
|
/* AIX has a completely broken poll.h header */ |
352 |
|
|
# undef EV_USE_POLL |
353 |
|
|
# define EV_USE_POLL 0 |
354 |
|
|
#endif |
355 |
|
|
|
356 |
root |
1.40 |
#ifndef CLOCK_MONOTONIC |
357 |
|
|
# undef EV_USE_MONOTONIC |
358 |
|
|
# define EV_USE_MONOTONIC 0 |
359 |
|
|
#endif |
360 |
|
|
|
361 |
root |
1.31 |
#ifndef CLOCK_REALTIME |
362 |
root |
1.40 |
# undef EV_USE_REALTIME |
363 |
root |
1.31 |
# define EV_USE_REALTIME 0 |
364 |
|
|
#endif |
365 |
root |
1.40 |
|
366 |
root |
1.152 |
#if !EV_STAT_ENABLE |
367 |
root |
1.185 |
# undef EV_USE_INOTIFY |
368 |
root |
1.152 |
# define EV_USE_INOTIFY 0 |
369 |
|
|
#endif |
370 |
|
|
|
371 |
root |
1.193 |
#if !EV_USE_NANOSLEEP |
372 |
|
|
# ifndef _WIN32 |
373 |
|
|
# include <sys/select.h> |
374 |
|
|
# endif |
375 |
|
|
#endif |
376 |
|
|
|
377 |
root |
1.152 |
#if EV_USE_INOTIFY |
378 |
root |
1.264 |
# include <sys/utsname.h> |
379 |
root |
1.273 |
# include <sys/statfs.h> |
380 |
root |
1.152 |
# include <sys/inotify.h> |
381 |
root |
1.263 |
/* some very old inotify.h headers don't have IN_DONT_FOLLOW */ |
382 |
|
|
# ifndef IN_DONT_FOLLOW |
383 |
|
|
# undef EV_USE_INOTIFY |
384 |
|
|
# define EV_USE_INOTIFY 0 |
385 |
|
|
# endif |
386 |
root |
1.152 |
#endif |
387 |
|
|
|
388 |
root |
1.185 |
#if EV_SELECT_IS_WINSOCKET |
389 |
|
|
# include <winsock.h> |
390 |
|
|
#endif |
391 |
|
|
|
392 |
root |
1.220 |
#if EV_USE_EVENTFD |
393 |
|
|
/* our minimum requirement is glibc 2.7 which has the stub, but not the header */ |
394 |
root |
1.221 |
# include <stdint.h> |
395 |
root |
1.303 |
# ifndef EFD_NONBLOCK |
396 |
|
|
# define EFD_NONBLOCK O_NONBLOCK |
397 |
|
|
# endif |
398 |
|
|
# ifndef EFD_CLOEXEC |
399 |
root |
1.311 |
# ifdef O_CLOEXEC |
400 |
|
|
# define EFD_CLOEXEC O_CLOEXEC |
401 |
|
|
# else |
402 |
|
|
# define EFD_CLOEXEC 02000000 |
403 |
|
|
# endif |
404 |
root |
1.303 |
# endif |
405 |
root |
1.222 |
# ifdef __cplusplus |
406 |
|
|
extern "C" { |
407 |
|
|
# endif |
408 |
root |
1.329 |
int (eventfd) (unsigned int initval, int flags); |
409 |
root |
1.222 |
# ifdef __cplusplus |
410 |
|
|
} |
411 |
|
|
# endif |
412 |
root |
1.220 |
#endif |
413 |
|
|
|
414 |
root |
1.303 |
#if EV_USE_SIGNALFD |
415 |
root |
1.314 |
/* our minimum requirement is glibc 2.7 which has the stub, but not the header */ |
416 |
|
|
# include <stdint.h> |
417 |
|
|
# ifndef SFD_NONBLOCK |
418 |
|
|
# define SFD_NONBLOCK O_NONBLOCK |
419 |
|
|
# endif |
420 |
|
|
# ifndef SFD_CLOEXEC |
421 |
|
|
# ifdef O_CLOEXEC |
422 |
|
|
# define SFD_CLOEXEC O_CLOEXEC |
423 |
|
|
# else |
424 |
|
|
# define SFD_CLOEXEC 02000000 |
425 |
|
|
# endif |
426 |
|
|
# endif |
427 |
|
|
# ifdef __cplusplus |
428 |
|
|
extern "C" { |
429 |
|
|
# endif |
430 |
|
|
int signalfd (int fd, const sigset_t *mask, int flags); |
431 |
|
|
|
432 |
|
|
struct signalfd_siginfo |
433 |
|
|
{ |
434 |
|
|
uint32_t ssi_signo; |
435 |
|
|
char pad[128 - sizeof (uint32_t)]; |
436 |
|
|
}; |
437 |
|
|
# ifdef __cplusplus |
438 |
|
|
} |
439 |
|
|
# endif |
440 |
root |
1.303 |
#endif |
441 |
|
|
|
442 |
root |
1.314 |
|
443 |
root |
1.40 |
/**/ |
444 |
root |
1.1 |
|
445 |
root |
1.250 |
#if EV_VERIFY >= 3 |
446 |
root |
1.248 |
# define EV_FREQUENT_CHECK ev_loop_verify (EV_A) |
447 |
|
|
#else |
448 |
|
|
# define EV_FREQUENT_CHECK do { } while (0) |
449 |
|
|
#endif |
450 |
|
|
|
451 |
root |
1.176 |
/* |
452 |
|
|
* This is used to avoid floating point rounding problems. |
453 |
|
|
* It is added to ev_rt_now when scheduling periodics |
454 |
|
|
* to ensure progress, time-wise, even when rounding |
455 |
|
|
* errors are against us. |
456 |
root |
1.177 |
* This value is good at least till the year 4000. |
457 |
root |
1.176 |
* Better solutions welcome. |
458 |
|
|
*/ |
459 |
|
|
#define TIME_EPSILON 0.0001220703125 /* 1/8192 */ |
460 |
|
|
|
461 |
root |
1.4 |
#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */ |
462 |
root |
1.120 |
#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */ |
463 |
root |
1.1 |
|
464 |
root |
1.185 |
#if __GNUC__ >= 4 |
465 |
root |
1.40 |
# define expect(expr,value) __builtin_expect ((expr),(value)) |
466 |
root |
1.169 |
# define noinline __attribute__ ((noinline)) |
467 |
root |
1.40 |
#else |
468 |
|
|
# define expect(expr,value) (expr) |
469 |
root |
1.140 |
# define noinline |
470 |
root |
1.223 |
# if __STDC_VERSION__ < 199901L && __GNUC__ < 2 |
471 |
root |
1.169 |
# define inline |
472 |
|
|
# endif |
473 |
root |
1.40 |
#endif |
474 |
|
|
|
475 |
|
|
#define expect_false(expr) expect ((expr) != 0, 0) |
476 |
|
|
#define expect_true(expr) expect ((expr) != 0, 1) |
477 |
root |
1.169 |
#define inline_size static inline |
478 |
|
|
|
479 |
|
|
#if EV_MINIMAL |
480 |
|
|
# define inline_speed static noinline |
481 |
|
|
#else |
482 |
|
|
# define inline_speed static inline |
483 |
|
|
#endif |
484 |
root |
1.40 |
|
485 |
root |
1.295 |
#define NUMPRI (EV_MAXPRI - EV_MINPRI + 1) |
486 |
|
|
|
487 |
|
|
#if EV_MINPRI == EV_MAXPRI |
488 |
|
|
# define ABSPRI(w) (((W)w), 0) |
489 |
|
|
#else |
490 |
|
|
# define ABSPRI(w) (((W)w)->priority - EV_MINPRI) |
491 |
|
|
#endif |
492 |
root |
1.42 |
|
493 |
root |
1.164 |
#define EMPTY /* required for microsofts broken pseudo-c compiler */ |
494 |
root |
1.114 |
#define EMPTY2(a,b) /* used to suppress some warnings */ |
495 |
root |
1.103 |
|
496 |
root |
1.136 |
typedef ev_watcher *W; |
497 |
|
|
typedef ev_watcher_list *WL; |
498 |
|
|
typedef ev_watcher_time *WT; |
499 |
root |
1.10 |
|
500 |
root |
1.229 |
#define ev_active(w) ((W)(w))->active |
501 |
root |
1.228 |
#define ev_at(w) ((WT)(w))->at |
502 |
|
|
|
503 |
root |
1.279 |
#if EV_USE_REALTIME |
504 |
root |
1.194 |
/* sig_atomic_t is used to avoid per-thread variables or locking but still */ |
505 |
|
|
/* giving it a reasonably high chance of working on typical architetcures */ |
506 |
root |
1.279 |
static EV_ATOMIC_T have_realtime; /* did clock_gettime (CLOCK_REALTIME) work? */ |
507 |
|
|
#endif |
508 |
|
|
|
509 |
|
|
#if EV_USE_MONOTONIC |
510 |
root |
1.207 |
static EV_ATOMIC_T have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work? */ |
511 |
root |
1.198 |
#endif |
512 |
root |
1.54 |
|
513 |
root |
1.313 |
#ifndef EV_FD_TO_WIN32_HANDLE |
514 |
|
|
# define EV_FD_TO_WIN32_HANDLE(fd) _get_osfhandle (fd) |
515 |
|
|
#endif |
516 |
|
|
#ifndef EV_WIN32_HANDLE_TO_FD |
517 |
root |
1.322 |
# define EV_WIN32_HANDLE_TO_FD(handle) _open_osfhandle (handle, 0) |
518 |
root |
1.313 |
#endif |
519 |
|
|
#ifndef EV_WIN32_CLOSE_FD |
520 |
|
|
# define EV_WIN32_CLOSE_FD(fd) close (fd) |
521 |
|
|
#endif |
522 |
|
|
|
523 |
root |
1.103 |
#ifdef _WIN32 |
524 |
root |
1.98 |
# include "ev_win32.c" |
525 |
|
|
#endif |
526 |
root |
1.67 |
|
527 |
root |
1.53 |
/*****************************************************************************/ |
528 |
root |
1.1 |
|
529 |
root |
1.70 |
static void (*syserr_cb)(const char *msg); |
530 |
root |
1.69 |
|
531 |
root |
1.141 |
void |
532 |
|
|
ev_set_syserr_cb (void (*cb)(const char *msg)) |
533 |
root |
1.69 |
{ |
534 |
|
|
syserr_cb = cb; |
535 |
|
|
} |
536 |
|
|
|
537 |
root |
1.141 |
static void noinline |
538 |
root |
1.269 |
ev_syserr (const char *msg) |
539 |
root |
1.69 |
{ |
540 |
root |
1.70 |
if (!msg) |
541 |
|
|
msg = "(libev) system error"; |
542 |
|
|
|
543 |
root |
1.69 |
if (syserr_cb) |
544 |
root |
1.70 |
syserr_cb (msg); |
545 |
root |
1.69 |
else |
546 |
|
|
{ |
547 |
root |
1.330 |
#if EV_AVOID_STDIO |
548 |
|
|
write (STDERR_FILENO, msg, strlen (msg)); |
549 |
|
|
write (STDERR_FILENO, ": ", 2); |
550 |
|
|
msg = strerror (errno); |
551 |
|
|
write (STDERR_FILENO, msg, strlen (msg)); |
552 |
|
|
write (STDERR_FILENO, "\n", 1); |
553 |
|
|
#else |
554 |
root |
1.70 |
perror (msg); |
555 |
root |
1.330 |
#endif |
556 |
root |
1.69 |
abort (); |
557 |
|
|
} |
558 |
|
|
} |
559 |
|
|
|
560 |
root |
1.224 |
static void * |
561 |
|
|
ev_realloc_emul (void *ptr, long size) |
562 |
|
|
{ |
563 |
|
|
/* some systems, notably openbsd and darwin, fail to properly |
564 |
|
|
* implement realloc (x, 0) (as required by both ansi c-98 and |
565 |
|
|
* the single unix specification, so work around them here. |
566 |
|
|
*/ |
567 |
|
|
|
568 |
|
|
if (size) |
569 |
|
|
return realloc (ptr, size); |
570 |
|
|
|
571 |
|
|
free (ptr); |
572 |
|
|
return 0; |
573 |
|
|
} |
574 |
|
|
|
575 |
|
|
static void *(*alloc)(void *ptr, long size) = ev_realloc_emul; |
576 |
root |
1.69 |
|
577 |
root |
1.141 |
void |
578 |
root |
1.155 |
ev_set_allocator (void *(*cb)(void *ptr, long size)) |
579 |
root |
1.69 |
{ |
580 |
|
|
alloc = cb; |
581 |
|
|
} |
582 |
|
|
|
583 |
root |
1.150 |
inline_speed void * |
584 |
root |
1.155 |
ev_realloc (void *ptr, long size) |
585 |
root |
1.69 |
{ |
586 |
root |
1.224 |
ptr = alloc (ptr, size); |
587 |
root |
1.69 |
|
588 |
|
|
if (!ptr && size) |
589 |
|
|
{ |
590 |
root |
1.330 |
#if EV_AVOID_STDIO |
591 |
|
|
write (STDERR_FILENO, "libev: memory allocation failed, aborting.", |
592 |
|
|
sizeof ("libev: memory allocation failed, aborting.") - 1); |
593 |
|
|
#else |
594 |
root |
1.155 |
fprintf (stderr, "libev: cannot allocate %ld bytes, aborting.", size); |
595 |
root |
1.330 |
#endif |
596 |
root |
1.69 |
abort (); |
597 |
|
|
} |
598 |
|
|
|
599 |
|
|
return ptr; |
600 |
|
|
} |
601 |
|
|
|
602 |
|
|
#define ev_malloc(size) ev_realloc (0, (size)) |
603 |
|
|
#define ev_free(ptr) ev_realloc ((ptr), 0) |
604 |
|
|
|
605 |
|
|
/*****************************************************************************/ |
606 |
|
|
|
607 |
root |
1.298 |
/* set in reify when reification needed */ |
608 |
|
|
#define EV_ANFD_REIFY 1 |
609 |
|
|
|
610 |
root |
1.288 |
/* file descriptor info structure */ |
611 |
root |
1.53 |
typedef struct |
612 |
|
|
{ |
613 |
root |
1.68 |
WL head; |
614 |
root |
1.288 |
unsigned char events; /* the events watched for */ |
615 |
root |
1.298 |
unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */ |
616 |
root |
1.288 |
unsigned char emask; /* the epoll backend stores the actual kernel mask in here */ |
617 |
root |
1.269 |
unsigned char unused; |
618 |
|
|
#if EV_USE_EPOLL |
619 |
root |
1.288 |
unsigned int egen; /* generation counter to counter epoll bugs */ |
620 |
root |
1.269 |
#endif |
621 |
root |
1.103 |
#if EV_SELECT_IS_WINSOCKET |
622 |
|
|
SOCKET handle; |
623 |
|
|
#endif |
624 |
root |
1.53 |
} ANFD; |
625 |
root |
1.1 |
|
626 |
root |
1.288 |
/* stores the pending event set for a given watcher */ |
627 |
root |
1.53 |
typedef struct |
628 |
|
|
{ |
629 |
|
|
W w; |
630 |
root |
1.288 |
int events; /* the pending event set for the given watcher */ |
631 |
root |
1.53 |
} ANPENDING; |
632 |
root |
1.51 |
|
633 |
root |
1.155 |
#if EV_USE_INOTIFY |
634 |
root |
1.241 |
/* hash table entry per inotify-id */ |
635 |
root |
1.152 |
typedef struct |
636 |
|
|
{ |
637 |
|
|
WL head; |
638 |
root |
1.155 |
} ANFS; |
639 |
root |
1.152 |
#endif |
640 |
|
|
|
641 |
root |
1.241 |
/* Heap Entry */ |
642 |
|
|
#if EV_HEAP_CACHE_AT |
643 |
root |
1.288 |
/* a heap element */ |
644 |
root |
1.241 |
typedef struct { |
645 |
root |
1.243 |
ev_tstamp at; |
646 |
root |
1.241 |
WT w; |
647 |
|
|
} ANHE; |
648 |
|
|
|
649 |
root |
1.248 |
#define ANHE_w(he) (he).w /* access watcher, read-write */ |
650 |
|
|
#define ANHE_at(he) (he).at /* access cached at, read-only */ |
651 |
|
|
#define ANHE_at_cache(he) (he).at = (he).w->at /* update at from watcher */ |
652 |
root |
1.241 |
#else |
653 |
root |
1.288 |
/* a heap element */ |
654 |
root |
1.241 |
typedef WT ANHE; |
655 |
|
|
|
656 |
root |
1.248 |
#define ANHE_w(he) (he) |
657 |
|
|
#define ANHE_at(he) (he)->at |
658 |
|
|
#define ANHE_at_cache(he) |
659 |
root |
1.241 |
#endif |
660 |
|
|
|
661 |
root |
1.55 |
#if EV_MULTIPLICITY |
662 |
root |
1.54 |
|
663 |
root |
1.80 |
struct ev_loop |
664 |
|
|
{ |
665 |
root |
1.86 |
ev_tstamp ev_rt_now; |
666 |
root |
1.99 |
#define ev_rt_now ((loop)->ev_rt_now) |
667 |
root |
1.80 |
#define VAR(name,decl) decl; |
668 |
|
|
#include "ev_vars.h" |
669 |
|
|
#undef VAR |
670 |
|
|
}; |
671 |
|
|
#include "ev_wrap.h" |
672 |
|
|
|
673 |
root |
1.116 |
static struct ev_loop default_loop_struct; |
674 |
|
|
struct ev_loop *ev_default_loop_ptr; |
675 |
root |
1.54 |
|
676 |
root |
1.53 |
#else |
677 |
root |
1.54 |
|
678 |
root |
1.86 |
ev_tstamp ev_rt_now; |
679 |
root |
1.80 |
#define VAR(name,decl) static decl; |
680 |
|
|
#include "ev_vars.h" |
681 |
|
|
#undef VAR |
682 |
|
|
|
683 |
root |
1.116 |
static int ev_default_loop_ptr; |
684 |
root |
1.54 |
|
685 |
root |
1.51 |
#endif |
686 |
root |
1.1 |
|
687 |
root |
1.297 |
#if EV_MINIMAL < 2 |
688 |
root |
1.298 |
# define EV_RELEASE_CB if (expect_false (release_cb)) release_cb (EV_A) |
689 |
|
|
# define EV_ACQUIRE_CB if (expect_false (acquire_cb)) acquire_cb (EV_A) |
690 |
root |
1.297 |
# define EV_INVOKE_PENDING invoke_cb (EV_A) |
691 |
|
|
#else |
692 |
root |
1.298 |
# define EV_RELEASE_CB (void)0 |
693 |
|
|
# define EV_ACQUIRE_CB (void)0 |
694 |
root |
1.297 |
# define EV_INVOKE_PENDING ev_invoke_pending (EV_A) |
695 |
|
|
#endif |
696 |
|
|
|
697 |
root |
1.298 |
#define EVUNLOOP_RECURSE 0x80 |
698 |
|
|
|
699 |
root |
1.8 |
/*****************************************************************************/ |
700 |
|
|
|
701 |
root |
1.292 |
#ifndef EV_HAVE_EV_TIME |
702 |
root |
1.141 |
ev_tstamp |
703 |
root |
1.1 |
ev_time (void) |
704 |
|
|
{ |
705 |
root |
1.29 |
#if EV_USE_REALTIME |
706 |
root |
1.279 |
if (expect_true (have_realtime)) |
707 |
|
|
{ |
708 |
|
|
struct timespec ts; |
709 |
|
|
clock_gettime (CLOCK_REALTIME, &ts); |
710 |
|
|
return ts.tv_sec + ts.tv_nsec * 1e-9; |
711 |
|
|
} |
712 |
|
|
#endif |
713 |
|
|
|
714 |
root |
1.1 |
struct timeval tv; |
715 |
|
|
gettimeofday (&tv, 0); |
716 |
|
|
return tv.tv_sec + tv.tv_usec * 1e-6; |
717 |
|
|
} |
718 |
root |
1.292 |
#endif |
719 |
root |
1.1 |
|
720 |
root |
1.284 |
inline_size ev_tstamp |
721 |
root |
1.1 |
get_clock (void) |
722 |
|
|
{ |
723 |
root |
1.29 |
#if EV_USE_MONOTONIC |
724 |
root |
1.40 |
if (expect_true (have_monotonic)) |
725 |
root |
1.1 |
{ |
726 |
|
|
struct timespec ts; |
727 |
|
|
clock_gettime (CLOCK_MONOTONIC, &ts); |
728 |
|
|
return ts.tv_sec + ts.tv_nsec * 1e-9; |
729 |
|
|
} |
730 |
|
|
#endif |
731 |
|
|
|
732 |
|
|
return ev_time (); |
733 |
|
|
} |
734 |
|
|
|
735 |
root |
1.85 |
#if EV_MULTIPLICITY |
736 |
root |
1.51 |
ev_tstamp |
737 |
|
|
ev_now (EV_P) |
738 |
|
|
{ |
739 |
root |
1.85 |
return ev_rt_now; |
740 |
root |
1.51 |
} |
741 |
root |
1.85 |
#endif |
742 |
root |
1.51 |
|
743 |
root |
1.193 |
void |
744 |
|
|
ev_sleep (ev_tstamp delay) |
745 |
|
|
{ |
746 |
|
|
if (delay > 0.) |
747 |
|
|
{ |
748 |
|
|
#if EV_USE_NANOSLEEP |
749 |
|
|
struct timespec ts; |
750 |
|
|
|
751 |
|
|
ts.tv_sec = (time_t)delay; |
752 |
|
|
ts.tv_nsec = (long)((delay - (ev_tstamp)(ts.tv_sec)) * 1e9); |
753 |
|
|
|
754 |
|
|
nanosleep (&ts, 0); |
755 |
|
|
#elif defined(_WIN32) |
756 |
root |
1.217 |
Sleep ((unsigned long)(delay * 1e3)); |
757 |
root |
1.193 |
#else |
758 |
|
|
struct timeval tv; |
759 |
|
|
|
760 |
|
|
tv.tv_sec = (time_t)delay; |
761 |
|
|
tv.tv_usec = (long)((delay - (ev_tstamp)(tv.tv_sec)) * 1e6); |
762 |
|
|
|
763 |
root |
1.257 |
/* here we rely on sys/time.h + sys/types.h + unistd.h providing select */ |
764 |
root |
1.302 |
/* something not guaranteed by newer posix versions, but guaranteed */ |
765 |
root |
1.257 |
/* by older ones */ |
766 |
root |
1.193 |
select (0, 0, 0, 0, &tv); |
767 |
|
|
#endif |
768 |
|
|
} |
769 |
|
|
} |
770 |
|
|
|
771 |
|
|
/*****************************************************************************/ |
772 |
|
|
|
773 |
root |
1.233 |
#define MALLOC_ROUND 4096 /* prefer to allocate in chunks of this size, must be 2**n and >> 4 longs */ |
774 |
root |
1.232 |
|
775 |
root |
1.288 |
/* find a suitable new size for the given array, */ |
776 |
|
|
/* hopefully by rounding to a ncie-to-malloc size */ |
777 |
root |
1.284 |
inline_size int |
778 |
root |
1.163 |
array_nextsize (int elem, int cur, int cnt) |
779 |
|
|
{ |
780 |
|
|
int ncur = cur + 1; |
781 |
|
|
|
782 |
|
|
do |
783 |
|
|
ncur <<= 1; |
784 |
|
|
while (cnt > ncur); |
785 |
|
|
|
786 |
root |
1.232 |
/* if size is large, round to MALLOC_ROUND - 4 * longs to accomodate malloc overhead */ |
787 |
|
|
if (elem * ncur > MALLOC_ROUND - sizeof (void *) * 4) |
788 |
root |
1.163 |
{ |
789 |
|
|
ncur *= elem; |
790 |
root |
1.232 |
ncur = (ncur + elem + (MALLOC_ROUND - 1) + sizeof (void *) * 4) & ~(MALLOC_ROUND - 1); |
791 |
root |
1.163 |
ncur = ncur - sizeof (void *) * 4; |
792 |
|
|
ncur /= elem; |
793 |
|
|
} |
794 |
|
|
|
795 |
|
|
return ncur; |
796 |
|
|
} |
797 |
|
|
|
798 |
root |
1.171 |
static noinline void * |
799 |
root |
1.163 |
array_realloc (int elem, void *base, int *cur, int cnt) |
800 |
|
|
{ |
801 |
|
|
*cur = array_nextsize (elem, *cur, cnt); |
802 |
|
|
return ev_realloc (base, elem * *cur); |
803 |
|
|
} |
804 |
root |
1.29 |
|
805 |
root |
1.265 |
#define array_init_zero(base,count) \ |
806 |
|
|
memset ((void *)(base), 0, sizeof (*(base)) * (count)) |
807 |
|
|
|
808 |
root |
1.74 |
#define array_needsize(type,base,cur,cnt,init) \ |
809 |
root |
1.163 |
if (expect_false ((cnt) > (cur))) \ |
810 |
root |
1.69 |
{ \ |
811 |
root |
1.163 |
int ocur_ = (cur); \ |
812 |
|
|
(base) = (type *)array_realloc \ |
813 |
|
|
(sizeof (type), (base), &(cur), (cnt)); \ |
814 |
|
|
init ((base) + (ocur_), (cur) - ocur_); \ |
815 |
root |
1.1 |
} |
816 |
|
|
|
817 |
root |
1.163 |
#if 0 |
818 |
root |
1.74 |
#define array_slim(type,stem) \ |
819 |
root |
1.67 |
if (stem ## max < array_roundsize (stem ## cnt >> 2)) \ |
820 |
|
|
{ \ |
821 |
|
|
stem ## max = array_roundsize (stem ## cnt >> 1); \ |
822 |
root |
1.74 |
base = (type *)ev_realloc (base, sizeof (type) * (stem ## max));\ |
823 |
root |
1.67 |
fprintf (stderr, "slimmed down " # stem " to %d\n", stem ## max);/*D*/\ |
824 |
|
|
} |
825 |
root |
1.163 |
#endif |
826 |
root |
1.67 |
|
827 |
root |
1.65 |
#define array_free(stem, idx) \ |
828 |
root |
1.280 |
ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0; stem ## s idx = 0 |
829 |
root |
1.65 |
|
830 |
root |
1.8 |
/*****************************************************************************/ |
831 |
|
|
|
832 |
root |
1.288 |
/* dummy callback for pending events */ |
833 |
|
|
static void noinline |
834 |
|
|
pendingcb (EV_P_ ev_prepare *w, int revents) |
835 |
|
|
{ |
836 |
|
|
} |
837 |
|
|
|
838 |
root |
1.140 |
void noinline |
839 |
root |
1.78 |
ev_feed_event (EV_P_ void *w, int revents) |
840 |
root |
1.1 |
{ |
841 |
root |
1.78 |
W w_ = (W)w; |
842 |
root |
1.171 |
int pri = ABSPRI (w_); |
843 |
root |
1.78 |
|
844 |
root |
1.123 |
if (expect_false (w_->pending)) |
845 |
root |
1.171 |
pendings [pri][w_->pending - 1].events |= revents; |
846 |
|
|
else |
847 |
root |
1.32 |
{ |
848 |
root |
1.171 |
w_->pending = ++pendingcnt [pri]; |
849 |
|
|
array_needsize (ANPENDING, pendings [pri], pendingmax [pri], w_->pending, EMPTY2); |
850 |
|
|
pendings [pri][w_->pending - 1].w = w_; |
851 |
|
|
pendings [pri][w_->pending - 1].events = revents; |
852 |
root |
1.32 |
} |
853 |
root |
1.1 |
} |
854 |
|
|
|
855 |
root |
1.284 |
inline_speed void |
856 |
|
|
feed_reverse (EV_P_ W w) |
857 |
|
|
{ |
858 |
|
|
array_needsize (W, rfeeds, rfeedmax, rfeedcnt + 1, EMPTY2); |
859 |
|
|
rfeeds [rfeedcnt++] = w; |
860 |
|
|
} |
861 |
|
|
|
862 |
|
|
inline_size void |
863 |
|
|
feed_reverse_done (EV_P_ int revents) |
864 |
|
|
{ |
865 |
|
|
do |
866 |
|
|
ev_feed_event (EV_A_ rfeeds [--rfeedcnt], revents); |
867 |
|
|
while (rfeedcnt); |
868 |
|
|
} |
869 |
|
|
|
870 |
|
|
inline_speed void |
871 |
root |
1.51 |
queue_events (EV_P_ W *events, int eventcnt, int type) |
872 |
root |
1.27 |
{ |
873 |
|
|
int i; |
874 |
|
|
|
875 |
|
|
for (i = 0; i < eventcnt; ++i) |
876 |
root |
1.78 |
ev_feed_event (EV_A_ events [i], type); |
877 |
root |
1.27 |
} |
878 |
|
|
|
879 |
root |
1.141 |
/*****************************************************************************/ |
880 |
|
|
|
881 |
root |
1.284 |
inline_speed void |
882 |
root |
1.298 |
fd_event_nc (EV_P_ int fd, int revents) |
883 |
root |
1.1 |
{ |
884 |
|
|
ANFD *anfd = anfds + fd; |
885 |
root |
1.136 |
ev_io *w; |
886 |
root |
1.1 |
|
887 |
root |
1.136 |
for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next) |
888 |
root |
1.1 |
{ |
889 |
root |
1.79 |
int ev = w->events & revents; |
890 |
root |
1.1 |
|
891 |
|
|
if (ev) |
892 |
root |
1.78 |
ev_feed_event (EV_A_ (W)w, ev); |
893 |
root |
1.1 |
} |
894 |
|
|
} |
895 |
|
|
|
896 |
root |
1.298 |
/* do not submit kernel events for fds that have reify set */ |
897 |
|
|
/* because that means they changed while we were polling for new events */ |
898 |
|
|
inline_speed void |
899 |
|
|
fd_event (EV_P_ int fd, int revents) |
900 |
|
|
{ |
901 |
|
|
ANFD *anfd = anfds + fd; |
902 |
|
|
|
903 |
|
|
if (expect_true (!anfd->reify)) |
904 |
|
|
fd_event_nc (EV_A_ fd, revents); |
905 |
|
|
} |
906 |
|
|
|
907 |
root |
1.79 |
void |
908 |
|
|
ev_feed_fd_event (EV_P_ int fd, int revents) |
909 |
|
|
{ |
910 |
root |
1.168 |
if (fd >= 0 && fd < anfdmax) |
911 |
root |
1.298 |
fd_event_nc (EV_A_ fd, revents); |
912 |
root |
1.79 |
} |
913 |
|
|
|
914 |
root |
1.288 |
/* make sure the external fd watch events are in-sync */ |
915 |
|
|
/* with the kernel/libev internal state */ |
916 |
root |
1.284 |
inline_size void |
917 |
root |
1.51 |
fd_reify (EV_P) |
918 |
root |
1.9 |
{ |
919 |
|
|
int i; |
920 |
|
|
|
921 |
root |
1.27 |
for (i = 0; i < fdchangecnt; ++i) |
922 |
|
|
{ |
923 |
|
|
int fd = fdchanges [i]; |
924 |
|
|
ANFD *anfd = anfds + fd; |
925 |
root |
1.136 |
ev_io *w; |
926 |
root |
1.27 |
|
927 |
root |
1.184 |
unsigned char events = 0; |
928 |
root |
1.27 |
|
929 |
root |
1.136 |
for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next) |
930 |
root |
1.184 |
events |= (unsigned char)w->events; |
931 |
root |
1.27 |
|
932 |
root |
1.103 |
#if EV_SELECT_IS_WINSOCKET |
933 |
|
|
if (events) |
934 |
|
|
{ |
935 |
root |
1.254 |
unsigned long arg; |
936 |
root |
1.313 |
anfd->handle = EV_FD_TO_WIN32_HANDLE (fd); |
937 |
root |
1.278 |
assert (("libev: only socket fds supported in this configuration", ioctlsocket (anfd->handle, FIONREAD, &arg) == 0)); |
938 |
root |
1.103 |
} |
939 |
|
|
#endif |
940 |
|
|
|
941 |
root |
1.184 |
{ |
942 |
|
|
unsigned char o_events = anfd->events; |
943 |
|
|
unsigned char o_reify = anfd->reify; |
944 |
|
|
|
945 |
|
|
anfd->reify = 0; |
946 |
|
|
anfd->events = events; |
947 |
root |
1.27 |
|
948 |
root |
1.281 |
if (o_events != events || o_reify & EV__IOFDSET) |
949 |
root |
1.184 |
backend_modify (EV_A_ fd, o_events, events); |
950 |
|
|
} |
951 |
root |
1.27 |
} |
952 |
|
|
|
953 |
|
|
fdchangecnt = 0; |
954 |
|
|
} |
955 |
|
|
|
956 |
root |
1.288 |
/* something about the given fd changed */ |
957 |
root |
1.284 |
inline_size void |
958 |
root |
1.183 |
fd_change (EV_P_ int fd, int flags) |
959 |
root |
1.27 |
{ |
960 |
root |
1.183 |
unsigned char reify = anfds [fd].reify; |
961 |
root |
1.184 |
anfds [fd].reify |= flags; |
962 |
root |
1.27 |
|
963 |
root |
1.183 |
if (expect_true (!reify)) |
964 |
|
|
{ |
965 |
|
|
++fdchangecnt; |
966 |
|
|
array_needsize (int, fdchanges, fdchangemax, fdchangecnt, EMPTY2); |
967 |
|
|
fdchanges [fdchangecnt - 1] = fd; |
968 |
|
|
} |
969 |
root |
1.9 |
} |
970 |
|
|
|
971 |
root |
1.288 |
/* the given fd is invalid/unusable, so make sure it doesn't hurt us anymore */ |
972 |
root |
1.284 |
inline_speed void |
973 |
root |
1.51 |
fd_kill (EV_P_ int fd) |
974 |
root |
1.41 |
{ |
975 |
root |
1.136 |
ev_io *w; |
976 |
root |
1.41 |
|
977 |
root |
1.136 |
while ((w = (ev_io *)anfds [fd].head)) |
978 |
root |
1.41 |
{ |
979 |
root |
1.51 |
ev_io_stop (EV_A_ w); |
980 |
root |
1.78 |
ev_feed_event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE); |
981 |
root |
1.41 |
} |
982 |
|
|
} |
983 |
|
|
|
984 |
root |
1.288 |
/* check whether the given fd is atcually valid, for error recovery */ |
985 |
root |
1.284 |
inline_size int |
986 |
root |
1.71 |
fd_valid (int fd) |
987 |
|
|
{ |
988 |
root |
1.103 |
#ifdef _WIN32 |
989 |
root |
1.322 |
return EV_FD_TO_WIN32_HANDLE (fd) != -1; |
990 |
root |
1.71 |
#else |
991 |
|
|
return fcntl (fd, F_GETFD) != -1; |
992 |
|
|
#endif |
993 |
|
|
} |
994 |
|
|
|
995 |
root |
1.19 |
/* called on EBADF to verify fds */ |
996 |
root |
1.140 |
static void noinline |
997 |
root |
1.51 |
fd_ebadf (EV_P) |
998 |
root |
1.19 |
{ |
999 |
|
|
int fd; |
1000 |
|
|
|
1001 |
|
|
for (fd = 0; fd < anfdmax; ++fd) |
1002 |
root |
1.27 |
if (anfds [fd].events) |
1003 |
root |
1.254 |
if (!fd_valid (fd) && errno == EBADF) |
1004 |
root |
1.51 |
fd_kill (EV_A_ fd); |
1005 |
root |
1.41 |
} |
1006 |
|
|
|
1007 |
|
|
/* called on ENOMEM in select/poll to kill some fds and retry */ |
1008 |
root |
1.140 |
static void noinline |
1009 |
root |
1.51 |
fd_enomem (EV_P) |
1010 |
root |
1.41 |
{ |
1011 |
root |
1.62 |
int fd; |
1012 |
root |
1.41 |
|
1013 |
root |
1.62 |
for (fd = anfdmax; fd--; ) |
1014 |
root |
1.41 |
if (anfds [fd].events) |
1015 |
|
|
{ |
1016 |
root |
1.51 |
fd_kill (EV_A_ fd); |
1017 |
root |
1.307 |
break; |
1018 |
root |
1.41 |
} |
1019 |
root |
1.19 |
} |
1020 |
|
|
|
1021 |
root |
1.130 |
/* usually called after fork if backend needs to re-arm all fds from scratch */ |
1022 |
root |
1.140 |
static void noinline |
1023 |
root |
1.56 |
fd_rearm_all (EV_P) |
1024 |
|
|
{ |
1025 |
|
|
int fd; |
1026 |
|
|
|
1027 |
|
|
for (fd = 0; fd < anfdmax; ++fd) |
1028 |
|
|
if (anfds [fd].events) |
1029 |
|
|
{ |
1030 |
|
|
anfds [fd].events = 0; |
1031 |
root |
1.268 |
anfds [fd].emask = 0; |
1032 |
root |
1.298 |
fd_change (EV_A_ fd, EV__IOFDSET | EV_ANFD_REIFY); |
1033 |
root |
1.56 |
} |
1034 |
|
|
} |
1035 |
|
|
|
1036 |
root |
1.8 |
/*****************************************************************************/ |
1037 |
|
|
|
1038 |
root |
1.235 |
/* |
1039 |
root |
1.241 |
* the heap functions want a real array index. array index 0 uis guaranteed to not |
1040 |
|
|
* be in-use at any time. the first heap entry is at array [HEAP0]. DHEAP gives |
1041 |
|
|
* the branching factor of the d-tree. |
1042 |
|
|
*/ |
1043 |
|
|
|
1044 |
|
|
/* |
1045 |
root |
1.235 |
* at the moment we allow libev the luxury of two heaps, |
1046 |
|
|
* a small-code-size 2-heap one and a ~1.5kb larger 4-heap |
1047 |
|
|
* which is more cache-efficient. |
1048 |
|
|
* the difference is about 5% with 50000+ watchers. |
1049 |
|
|
*/ |
1050 |
root |
1.241 |
#if EV_USE_4HEAP |
1051 |
root |
1.235 |
|
1052 |
root |
1.237 |
#define DHEAP 4 |
1053 |
|
|
#define HEAP0 (DHEAP - 1) /* index of first element in heap */ |
1054 |
root |
1.247 |
#define HPARENT(k) ((((k) - HEAP0 - 1) / DHEAP) + HEAP0) |
1055 |
root |
1.248 |
#define UPHEAP_DONE(p,k) ((p) == (k)) |
1056 |
root |
1.235 |
|
1057 |
|
|
/* away from the root */ |
1058 |
root |
1.284 |
inline_speed void |
1059 |
root |
1.241 |
downheap (ANHE *heap, int N, int k) |
1060 |
root |
1.235 |
{ |
1061 |
root |
1.241 |
ANHE he = heap [k]; |
1062 |
|
|
ANHE *E = heap + N + HEAP0; |
1063 |
root |
1.235 |
|
1064 |
|
|
for (;;) |
1065 |
|
|
{ |
1066 |
|
|
ev_tstamp minat; |
1067 |
root |
1.241 |
ANHE *minpos; |
1068 |
root |
1.248 |
ANHE *pos = heap + DHEAP * (k - HEAP0) + HEAP0 + 1; |
1069 |
root |
1.235 |
|
1070 |
root |
1.248 |
/* find minimum child */ |
1071 |
root |
1.237 |
if (expect_true (pos + DHEAP - 1 < E)) |
1072 |
root |
1.235 |
{ |
1073 |
root |
1.245 |
/* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos)); |
1074 |
|
|
if ( ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos)); |
1075 |
|
|
if ( ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos)); |
1076 |
|
|
if ( ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos)); |
1077 |
root |
1.235 |
} |
1078 |
root |
1.240 |
else if (pos < E) |
1079 |
root |
1.235 |
{ |
1080 |
root |
1.241 |
/* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos)); |
1081 |
|
|
if (pos + 1 < E && ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos)); |
1082 |
|
|
if (pos + 2 < E && ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos)); |
1083 |
|
|
if (pos + 3 < E && ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos)); |
1084 |
root |
1.235 |
} |
1085 |
root |
1.240 |
else |
1086 |
|
|
break; |
1087 |
root |
1.235 |
|
1088 |
root |
1.241 |
if (ANHE_at (he) <= minat) |
1089 |
root |
1.235 |
break; |
1090 |
|
|
|
1091 |
root |
1.247 |
heap [k] = *minpos; |
1092 |
root |
1.241 |
ev_active (ANHE_w (*minpos)) = k; |
1093 |
root |
1.235 |
|
1094 |
|
|
k = minpos - heap; |
1095 |
|
|
} |
1096 |
|
|
|
1097 |
root |
1.247 |
heap [k] = he; |
1098 |
root |
1.241 |
ev_active (ANHE_w (he)) = k; |
1099 |
root |
1.235 |
} |
1100 |
|
|
|
1101 |
root |
1.248 |
#else /* 4HEAP */ |
1102 |
root |
1.235 |
|
1103 |
|
|
#define HEAP0 1 |
1104 |
root |
1.247 |
#define HPARENT(k) ((k) >> 1) |
1105 |
root |
1.248 |
#define UPHEAP_DONE(p,k) (!(p)) |
1106 |
root |
1.235 |
|
1107 |
root |
1.248 |
/* away from the root */ |
1108 |
root |
1.284 |
inline_speed void |
1109 |
root |
1.248 |
downheap (ANHE *heap, int N, int k) |
1110 |
root |
1.1 |
{ |
1111 |
root |
1.241 |
ANHE he = heap [k]; |
1112 |
root |
1.1 |
|
1113 |
root |
1.228 |
for (;;) |
1114 |
root |
1.1 |
{ |
1115 |
root |
1.248 |
int c = k << 1; |
1116 |
root |
1.179 |
|
1117 |
root |
1.309 |
if (c >= N + HEAP0) |
1118 |
root |
1.179 |
break; |
1119 |
|
|
|
1120 |
root |
1.248 |
c += c + 1 < N + HEAP0 && ANHE_at (heap [c]) > ANHE_at (heap [c + 1]) |
1121 |
|
|
? 1 : 0; |
1122 |
|
|
|
1123 |
|
|
if (ANHE_at (he) <= ANHE_at (heap [c])) |
1124 |
|
|
break; |
1125 |
|
|
|
1126 |
|
|
heap [k] = heap [c]; |
1127 |
root |
1.241 |
ev_active (ANHE_w (heap [k])) = k; |
1128 |
root |
1.248 |
|
1129 |
|
|
k = c; |
1130 |
root |
1.1 |
} |
1131 |
|
|
|
1132 |
root |
1.243 |
heap [k] = he; |
1133 |
root |
1.248 |
ev_active (ANHE_w (he)) = k; |
1134 |
root |
1.1 |
} |
1135 |
root |
1.248 |
#endif |
1136 |
root |
1.1 |
|
1137 |
root |
1.248 |
/* towards the root */ |
1138 |
root |
1.284 |
inline_speed void |
1139 |
root |
1.248 |
upheap (ANHE *heap, int k) |
1140 |
root |
1.1 |
{ |
1141 |
root |
1.241 |
ANHE he = heap [k]; |
1142 |
root |
1.1 |
|
1143 |
root |
1.179 |
for (;;) |
1144 |
root |
1.1 |
{ |
1145 |
root |
1.248 |
int p = HPARENT (k); |
1146 |
root |
1.179 |
|
1147 |
root |
1.248 |
if (UPHEAP_DONE (p, k) || ANHE_at (heap [p]) <= ANHE_at (he)) |
1148 |
root |
1.179 |
break; |
1149 |
root |
1.1 |
|
1150 |
root |
1.248 |
heap [k] = heap [p]; |
1151 |
root |
1.241 |
ev_active (ANHE_w (heap [k])) = k; |
1152 |
root |
1.248 |
k = p; |
1153 |
root |
1.1 |
} |
1154 |
|
|
|
1155 |
root |
1.241 |
heap [k] = he; |
1156 |
|
|
ev_active (ANHE_w (he)) = k; |
1157 |
root |
1.1 |
} |
1158 |
|
|
|
1159 |
root |
1.288 |
/* move an element suitably so it is in a correct place */ |
1160 |
root |
1.284 |
inline_size void |
1161 |
root |
1.241 |
adjustheap (ANHE *heap, int N, int k) |
1162 |
root |
1.84 |
{ |
1163 |
root |
1.310 |
if (k > HEAP0 && ANHE_at (heap [k]) <= ANHE_at (heap [HPARENT (k)])) |
1164 |
root |
1.247 |
upheap (heap, k); |
1165 |
|
|
else |
1166 |
|
|
downheap (heap, N, k); |
1167 |
root |
1.84 |
} |
1168 |
|
|
|
1169 |
root |
1.248 |
/* rebuild the heap: this function is used only once and executed rarely */ |
1170 |
root |
1.284 |
inline_size void |
1171 |
root |
1.248 |
reheap (ANHE *heap, int N) |
1172 |
|
|
{ |
1173 |
|
|
int i; |
1174 |
root |
1.251 |
|
1175 |
root |
1.248 |
/* we don't use floyds algorithm, upheap is simpler and is more cache-efficient */ |
1176 |
|
|
/* also, this is easy to implement and correct for both 2-heaps and 4-heaps */ |
1177 |
|
|
for (i = 0; i < N; ++i) |
1178 |
|
|
upheap (heap, i + HEAP0); |
1179 |
|
|
} |
1180 |
|
|
|
1181 |
root |
1.8 |
/*****************************************************************************/ |
1182 |
|
|
|
1183 |
root |
1.288 |
/* associate signal watchers to a signal signal */ |
1184 |
root |
1.7 |
typedef struct |
1185 |
|
|
{ |
1186 |
root |
1.307 |
EV_ATOMIC_T pending; |
1187 |
root |
1.306 |
#if EV_MULTIPLICITY |
1188 |
|
|
EV_P; |
1189 |
|
|
#endif |
1190 |
root |
1.68 |
WL head; |
1191 |
root |
1.7 |
} ANSIG; |
1192 |
|
|
|
1193 |
root |
1.306 |
static ANSIG signals [EV_NSIG - 1]; |
1194 |
root |
1.7 |
|
1195 |
root |
1.207 |
/*****************************************************************************/ |
1196 |
|
|
|
1197 |
root |
1.288 |
/* used to prepare libev internal fd's */ |
1198 |
|
|
/* this is not fork-safe */ |
1199 |
root |
1.284 |
inline_speed void |
1200 |
root |
1.207 |
fd_intern (int fd) |
1201 |
|
|
{ |
1202 |
|
|
#ifdef _WIN32 |
1203 |
root |
1.254 |
unsigned long arg = 1; |
1204 |
root |
1.322 |
ioctlsocket (EV_FD_TO_WIN32_HANDLE (fd), FIONBIO, &arg); |
1205 |
root |
1.207 |
#else |
1206 |
|
|
fcntl (fd, F_SETFD, FD_CLOEXEC); |
1207 |
|
|
fcntl (fd, F_SETFL, O_NONBLOCK); |
1208 |
|
|
#endif |
1209 |
|
|
} |
1210 |
|
|
|
1211 |
|
|
static void noinline |
1212 |
|
|
evpipe_init (EV_P) |
1213 |
|
|
{ |
1214 |
root |
1.288 |
if (!ev_is_active (&pipe_w)) |
1215 |
root |
1.207 |
{ |
1216 |
root |
1.220 |
#if EV_USE_EVENTFD |
1217 |
root |
1.303 |
evfd = eventfd (0, EFD_NONBLOCK | EFD_CLOEXEC); |
1218 |
|
|
if (evfd < 0 && errno == EINVAL) |
1219 |
|
|
evfd = eventfd (0, 0); |
1220 |
|
|
|
1221 |
|
|
if (evfd >= 0) |
1222 |
root |
1.220 |
{ |
1223 |
|
|
evpipe [0] = -1; |
1224 |
root |
1.303 |
fd_intern (evfd); /* doing it twice doesn't hurt */ |
1225 |
root |
1.288 |
ev_io_set (&pipe_w, evfd, EV_READ); |
1226 |
root |
1.220 |
} |
1227 |
|
|
else |
1228 |
|
|
#endif |
1229 |
|
|
{ |
1230 |
|
|
while (pipe (evpipe)) |
1231 |
root |
1.269 |
ev_syserr ("(libev) error creating signal/async pipe"); |
1232 |
root |
1.207 |
|
1233 |
root |
1.220 |
fd_intern (evpipe [0]); |
1234 |
|
|
fd_intern (evpipe [1]); |
1235 |
root |
1.288 |
ev_io_set (&pipe_w, evpipe [0], EV_READ); |
1236 |
root |
1.220 |
} |
1237 |
root |
1.207 |
|
1238 |
root |
1.288 |
ev_io_start (EV_A_ &pipe_w); |
1239 |
root |
1.210 |
ev_unref (EV_A); /* watcher should not keep loop alive */ |
1240 |
root |
1.207 |
} |
1241 |
|
|
} |
1242 |
|
|
|
1243 |
root |
1.284 |
inline_size void |
1244 |
root |
1.214 |
evpipe_write (EV_P_ EV_ATOMIC_T *flag) |
1245 |
root |
1.207 |
{ |
1246 |
root |
1.214 |
if (!*flag) |
1247 |
root |
1.207 |
{ |
1248 |
ayin |
1.215 |
int old_errno = errno; /* save errno because write might clobber it */ |
1249 |
root |
1.214 |
|
1250 |
|
|
*flag = 1; |
1251 |
root |
1.220 |
|
1252 |
|
|
#if EV_USE_EVENTFD |
1253 |
|
|
if (evfd >= 0) |
1254 |
|
|
{ |
1255 |
|
|
uint64_t counter = 1; |
1256 |
|
|
write (evfd, &counter, sizeof (uint64_t)); |
1257 |
|
|
} |
1258 |
|
|
else |
1259 |
|
|
#endif |
1260 |
|
|
write (evpipe [1], &old_errno, 1); |
1261 |
root |
1.214 |
|
1262 |
root |
1.207 |
errno = old_errno; |
1263 |
|
|
} |
1264 |
|
|
} |
1265 |
|
|
|
1266 |
root |
1.288 |
/* called whenever the libev signal pipe */ |
1267 |
|
|
/* got some events (signal, async) */ |
1268 |
root |
1.207 |
static void |
1269 |
|
|
pipecb (EV_P_ ev_io *iow, int revents) |
1270 |
|
|
{ |
1271 |
root |
1.307 |
int i; |
1272 |
|
|
|
1273 |
root |
1.220 |
#if EV_USE_EVENTFD |
1274 |
|
|
if (evfd >= 0) |
1275 |
|
|
{ |
1276 |
root |
1.232 |
uint64_t counter; |
1277 |
root |
1.220 |
read (evfd, &counter, sizeof (uint64_t)); |
1278 |
|
|
} |
1279 |
|
|
else |
1280 |
|
|
#endif |
1281 |
|
|
{ |
1282 |
|
|
char dummy; |
1283 |
|
|
read (evpipe [0], &dummy, 1); |
1284 |
|
|
} |
1285 |
root |
1.207 |
|
1286 |
root |
1.307 |
if (sig_pending) |
1287 |
root |
1.207 |
{ |
1288 |
root |
1.307 |
sig_pending = 0; |
1289 |
root |
1.207 |
|
1290 |
root |
1.307 |
for (i = EV_NSIG - 1; i--; ) |
1291 |
|
|
if (expect_false (signals [i].pending)) |
1292 |
|
|
ev_feed_signal_event (EV_A_ i + 1); |
1293 |
root |
1.207 |
} |
1294 |
|
|
|
1295 |
root |
1.209 |
#if EV_ASYNC_ENABLE |
1296 |
root |
1.307 |
if (async_pending) |
1297 |
root |
1.207 |
{ |
1298 |
root |
1.307 |
async_pending = 0; |
1299 |
root |
1.207 |
|
1300 |
|
|
for (i = asynccnt; i--; ) |
1301 |
|
|
if (asyncs [i]->sent) |
1302 |
|
|
{ |
1303 |
|
|
asyncs [i]->sent = 0; |
1304 |
|
|
ev_feed_event (EV_A_ asyncs [i], EV_ASYNC); |
1305 |
|
|
} |
1306 |
|
|
} |
1307 |
root |
1.209 |
#endif |
1308 |
root |
1.207 |
} |
1309 |
|
|
|
1310 |
|
|
/*****************************************************************************/ |
1311 |
|
|
|
1312 |
root |
1.7 |
static void |
1313 |
root |
1.218 |
ev_sighandler (int signum) |
1314 |
root |
1.7 |
{ |
1315 |
root |
1.207 |
#if EV_MULTIPLICITY |
1316 |
root |
1.306 |
EV_P = signals [signum - 1].loop; |
1317 |
root |
1.207 |
#endif |
1318 |
|
|
|
1319 |
root |
1.322 |
#ifdef _WIN32 |
1320 |
root |
1.218 |
signal (signum, ev_sighandler); |
1321 |
root |
1.67 |
#endif |
1322 |
|
|
|
1323 |
root |
1.307 |
signals [signum - 1].pending = 1; |
1324 |
|
|
evpipe_write (EV_A_ &sig_pending); |
1325 |
root |
1.7 |
} |
1326 |
|
|
|
1327 |
root |
1.140 |
void noinline |
1328 |
root |
1.79 |
ev_feed_signal_event (EV_P_ int signum) |
1329 |
|
|
{ |
1330 |
root |
1.80 |
WL w; |
1331 |
|
|
|
1332 |
root |
1.307 |
if (expect_false (signum <= 0 || signum > EV_NSIG)) |
1333 |
|
|
return; |
1334 |
|
|
|
1335 |
|
|
--signum; |
1336 |
|
|
|
1337 |
root |
1.79 |
#if EV_MULTIPLICITY |
1338 |
root |
1.307 |
/* it is permissible to try to feed a signal to the wrong loop */ |
1339 |
|
|
/* or, likely more useful, feeding a signal nobody is waiting for */ |
1340 |
root |
1.79 |
|
1341 |
root |
1.307 |
if (expect_false (signals [signum].loop != EV_A)) |
1342 |
root |
1.306 |
return; |
1343 |
root |
1.307 |
#endif |
1344 |
root |
1.306 |
|
1345 |
root |
1.307 |
signals [signum].pending = 0; |
1346 |
root |
1.79 |
|
1347 |
|
|
for (w = signals [signum].head; w; w = w->next) |
1348 |
|
|
ev_feed_event (EV_A_ (W)w, EV_SIGNAL); |
1349 |
|
|
} |
1350 |
|
|
|
1351 |
root |
1.303 |
#if EV_USE_SIGNALFD |
1352 |
|
|
static void |
1353 |
|
|
sigfdcb (EV_P_ ev_io *iow, int revents) |
1354 |
|
|
{ |
1355 |
root |
1.306 |
struct signalfd_siginfo si[2], *sip; /* these structs are big */ |
1356 |
root |
1.303 |
|
1357 |
|
|
for (;;) |
1358 |
|
|
{ |
1359 |
|
|
ssize_t res = read (sigfd, si, sizeof (si)); |
1360 |
|
|
|
1361 |
|
|
/* not ISO-C, as res might be -1, but works with SuS */ |
1362 |
|
|
for (sip = si; (char *)sip < (char *)si + res; ++sip) |
1363 |
|
|
ev_feed_signal_event (EV_A_ sip->ssi_signo); |
1364 |
|
|
|
1365 |
|
|
if (res < (ssize_t)sizeof (si)) |
1366 |
|
|
break; |
1367 |
|
|
} |
1368 |
|
|
} |
1369 |
|
|
#endif |
1370 |
|
|
|
1371 |
root |
1.8 |
/*****************************************************************************/ |
1372 |
|
|
|
1373 |
root |
1.182 |
static WL childs [EV_PID_HASHSIZE]; |
1374 |
root |
1.71 |
|
1375 |
root |
1.103 |
#ifndef _WIN32 |
1376 |
root |
1.45 |
|
1377 |
root |
1.136 |
static ev_signal childev; |
1378 |
root |
1.59 |
|
1379 |
root |
1.206 |
#ifndef WIFCONTINUED |
1380 |
|
|
# define WIFCONTINUED(status) 0 |
1381 |
|
|
#endif |
1382 |
|
|
|
1383 |
root |
1.288 |
/* handle a single child status event */ |
1384 |
root |
1.284 |
inline_speed void |
1385 |
root |
1.216 |
child_reap (EV_P_ int chain, int pid, int status) |
1386 |
root |
1.47 |
{ |
1387 |
root |
1.136 |
ev_child *w; |
1388 |
root |
1.206 |
int traced = WIFSTOPPED (status) || WIFCONTINUED (status); |
1389 |
root |
1.47 |
|
1390 |
root |
1.149 |
for (w = (ev_child *)childs [chain & (EV_PID_HASHSIZE - 1)]; w; w = (ev_child *)((WL)w)->next) |
1391 |
root |
1.206 |
{ |
1392 |
|
|
if ((w->pid == pid || !w->pid) |
1393 |
|
|
&& (!traced || (w->flags & 1))) |
1394 |
|
|
{ |
1395 |
root |
1.216 |
ev_set_priority (w, EV_MAXPRI); /* need to do it *now*, this *must* be the same prio as the signal watcher itself */ |
1396 |
root |
1.206 |
w->rpid = pid; |
1397 |
|
|
w->rstatus = status; |
1398 |
|
|
ev_feed_event (EV_A_ (W)w, EV_CHILD); |
1399 |
|
|
} |
1400 |
|
|
} |
1401 |
root |
1.47 |
} |
1402 |
|
|
|
1403 |
root |
1.142 |
#ifndef WCONTINUED |
1404 |
|
|
# define WCONTINUED 0 |
1405 |
|
|
#endif |
1406 |
|
|
|
1407 |
root |
1.288 |
/* called on sigchld etc., calls waitpid */ |
1408 |
root |
1.47 |
static void |
1409 |
root |
1.136 |
childcb (EV_P_ ev_signal *sw, int revents) |
1410 |
root |
1.22 |
{ |
1411 |
|
|
int pid, status; |
1412 |
|
|
|
1413 |
root |
1.142 |
/* some systems define WCONTINUED but then fail to support it (linux 2.4) */ |
1414 |
|
|
if (0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED | WCONTINUED))) |
1415 |
|
|
if (!WCONTINUED |
1416 |
|
|
|| errno != EINVAL |
1417 |
|
|
|| 0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED))) |
1418 |
|
|
return; |
1419 |
|
|
|
1420 |
root |
1.216 |
/* make sure we are called again until all children have been reaped */ |
1421 |
root |
1.142 |
/* we need to do it this way so that the callback gets called before we continue */ |
1422 |
|
|
ev_feed_event (EV_A_ (W)sw, EV_SIGNAL); |
1423 |
root |
1.47 |
|
1424 |
root |
1.216 |
child_reap (EV_A_ pid, pid, status); |
1425 |
root |
1.149 |
if (EV_PID_HASHSIZE > 1) |
1426 |
root |
1.216 |
child_reap (EV_A_ 0, pid, status); /* this might trigger a watcher twice, but feed_event catches that */ |
1427 |
root |
1.22 |
} |
1428 |
|
|
|
1429 |
root |
1.45 |
#endif |
1430 |
|
|
|
1431 |
root |
1.22 |
/*****************************************************************************/ |
1432 |
|
|
|
1433 |
root |
1.118 |
#if EV_USE_PORT |
1434 |
|
|
# include "ev_port.c" |
1435 |
|
|
#endif |
1436 |
root |
1.44 |
#if EV_USE_KQUEUE |
1437 |
|
|
# include "ev_kqueue.c" |
1438 |
|
|
#endif |
1439 |
root |
1.29 |
#if EV_USE_EPOLL |
1440 |
root |
1.1 |
# include "ev_epoll.c" |
1441 |
|
|
#endif |
1442 |
root |
1.59 |
#if EV_USE_POLL |
1443 |
root |
1.41 |
# include "ev_poll.c" |
1444 |
|
|
#endif |
1445 |
root |
1.29 |
#if EV_USE_SELECT |
1446 |
root |
1.1 |
# include "ev_select.c" |
1447 |
|
|
#endif |
1448 |
|
|
|
1449 |
root |
1.24 |
int |
1450 |
|
|
ev_version_major (void) |
1451 |
|
|
{ |
1452 |
|
|
return EV_VERSION_MAJOR; |
1453 |
|
|
} |
1454 |
|
|
|
1455 |
|
|
int |
1456 |
|
|
ev_version_minor (void) |
1457 |
|
|
{ |
1458 |
|
|
return EV_VERSION_MINOR; |
1459 |
|
|
} |
1460 |
|
|
|
1461 |
root |
1.49 |
/* return true if we are running with elevated privileges and should ignore env variables */ |
1462 |
root |
1.140 |
int inline_size |
1463 |
root |
1.51 |
enable_secure (void) |
1464 |
root |
1.41 |
{ |
1465 |
root |
1.103 |
#ifdef _WIN32 |
1466 |
root |
1.49 |
return 0; |
1467 |
|
|
#else |
1468 |
root |
1.41 |
return getuid () != geteuid () |
1469 |
|
|
|| getgid () != getegid (); |
1470 |
root |
1.49 |
#endif |
1471 |
root |
1.41 |
} |
1472 |
|
|
|
1473 |
root |
1.111 |
unsigned int |
1474 |
root |
1.129 |
ev_supported_backends (void) |
1475 |
|
|
{ |
1476 |
root |
1.130 |
unsigned int flags = 0; |
1477 |
root |
1.129 |
|
1478 |
|
|
if (EV_USE_PORT ) flags |= EVBACKEND_PORT; |
1479 |
|
|
if (EV_USE_KQUEUE) flags |= EVBACKEND_KQUEUE; |
1480 |
|
|
if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL; |
1481 |
|
|
if (EV_USE_POLL ) flags |= EVBACKEND_POLL; |
1482 |
|
|
if (EV_USE_SELECT) flags |= EVBACKEND_SELECT; |
1483 |
|
|
|
1484 |
|
|
return flags; |
1485 |
|
|
} |
1486 |
|
|
|
1487 |
|
|
unsigned int |
1488 |
root |
1.130 |
ev_recommended_backends (void) |
1489 |
root |
1.1 |
{ |
1490 |
root |
1.131 |
unsigned int flags = ev_supported_backends (); |
1491 |
root |
1.129 |
|
1492 |
|
|
#ifndef __NetBSD__ |
1493 |
|
|
/* kqueue is borked on everything but netbsd apparently */ |
1494 |
|
|
/* it usually doesn't work correctly on anything but sockets and pipes */ |
1495 |
|
|
flags &= ~EVBACKEND_KQUEUE; |
1496 |
|
|
#endif |
1497 |
|
|
#ifdef __APPLE__ |
1498 |
root |
1.278 |
/* only select works correctly on that "unix-certified" platform */ |
1499 |
|
|
flags &= ~EVBACKEND_KQUEUE; /* horribly broken, even for sockets */ |
1500 |
|
|
flags &= ~EVBACKEND_POLL; /* poll is based on kqueue from 10.5 onwards */ |
1501 |
root |
1.129 |
#endif |
1502 |
|
|
|
1503 |
|
|
return flags; |
1504 |
root |
1.51 |
} |
1505 |
|
|
|
1506 |
root |
1.130 |
unsigned int |
1507 |
root |
1.134 |
ev_embeddable_backends (void) |
1508 |
|
|
{ |
1509 |
root |
1.196 |
int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT; |
1510 |
|
|
|
1511 |
root |
1.192 |
/* epoll embeddability broken on all linux versions up to at least 2.6.23 */ |
1512 |
root |
1.196 |
/* please fix it and tell me how to detect the fix */ |
1513 |
|
|
flags &= ~EVBACKEND_EPOLL; |
1514 |
|
|
|
1515 |
|
|
return flags; |
1516 |
root |
1.134 |
} |
1517 |
|
|
|
1518 |
|
|
unsigned int |
1519 |
root |
1.130 |
ev_backend (EV_P) |
1520 |
|
|
{ |
1521 |
|
|
return backend; |
1522 |
|
|
} |
1523 |
|
|
|
1524 |
root |
1.297 |
#if EV_MINIMAL < 2 |
1525 |
root |
1.162 |
unsigned int |
1526 |
|
|
ev_loop_count (EV_P) |
1527 |
|
|
{ |
1528 |
|
|
return loop_count; |
1529 |
|
|
} |
1530 |
|
|
|
1531 |
root |
1.294 |
unsigned int |
1532 |
|
|
ev_loop_depth (EV_P) |
1533 |
|
|
{ |
1534 |
|
|
return loop_depth; |
1535 |
|
|
} |
1536 |
|
|
|
1537 |
root |
1.193 |
void |
1538 |
|
|
ev_set_io_collect_interval (EV_P_ ev_tstamp interval) |
1539 |
|
|
{ |
1540 |
|
|
io_blocktime = interval; |
1541 |
|
|
} |
1542 |
|
|
|
1543 |
|
|
void |
1544 |
|
|
ev_set_timeout_collect_interval (EV_P_ ev_tstamp interval) |
1545 |
|
|
{ |
1546 |
|
|
timeout_blocktime = interval; |
1547 |
|
|
} |
1548 |
|
|
|
1549 |
root |
1.297 |
void |
1550 |
|
|
ev_set_userdata (EV_P_ void *data) |
1551 |
|
|
{ |
1552 |
|
|
userdata = data; |
1553 |
|
|
} |
1554 |
|
|
|
1555 |
|
|
void * |
1556 |
|
|
ev_userdata (EV_P) |
1557 |
|
|
{ |
1558 |
|
|
return userdata; |
1559 |
|
|
} |
1560 |
|
|
|
1561 |
|
|
void ev_set_invoke_pending_cb (EV_P_ void (*invoke_pending_cb)(EV_P)) |
1562 |
|
|
{ |
1563 |
|
|
invoke_cb = invoke_pending_cb; |
1564 |
|
|
} |
1565 |
|
|
|
1566 |
root |
1.298 |
void ev_set_loop_release_cb (EV_P_ void (*release)(EV_P), void (*acquire)(EV_P)) |
1567 |
root |
1.297 |
{ |
1568 |
root |
1.298 |
release_cb = release; |
1569 |
|
|
acquire_cb = acquire; |
1570 |
root |
1.297 |
} |
1571 |
|
|
#endif |
1572 |
|
|
|
1573 |
root |
1.288 |
/* initialise a loop structure, must be zero-initialised */ |
1574 |
root |
1.151 |
static void noinline |
1575 |
root |
1.108 |
loop_init (EV_P_ unsigned int flags) |
1576 |
root |
1.51 |
{ |
1577 |
root |
1.130 |
if (!backend) |
1578 |
root |
1.23 |
{ |
1579 |
root |
1.279 |
#if EV_USE_REALTIME |
1580 |
|
|
if (!have_realtime) |
1581 |
|
|
{ |
1582 |
|
|
struct timespec ts; |
1583 |
|
|
|
1584 |
|
|
if (!clock_gettime (CLOCK_REALTIME, &ts)) |
1585 |
|
|
have_realtime = 1; |
1586 |
|
|
} |
1587 |
|
|
#endif |
1588 |
|
|
|
1589 |
root |
1.29 |
#if EV_USE_MONOTONIC |
1590 |
root |
1.279 |
if (!have_monotonic) |
1591 |
|
|
{ |
1592 |
|
|
struct timespec ts; |
1593 |
|
|
|
1594 |
|
|
if (!clock_gettime (CLOCK_MONOTONIC, &ts)) |
1595 |
|
|
have_monotonic = 1; |
1596 |
|
|
} |
1597 |
root |
1.1 |
#endif |
1598 |
|
|
|
1599 |
root |
1.306 |
/* pid check not overridable via env */ |
1600 |
|
|
#ifndef _WIN32 |
1601 |
|
|
if (flags & EVFLAG_FORKCHECK) |
1602 |
|
|
curpid = getpid (); |
1603 |
|
|
#endif |
1604 |
|
|
|
1605 |
|
|
if (!(flags & EVFLAG_NOENV) |
1606 |
|
|
&& !enable_secure () |
1607 |
|
|
&& getenv ("LIBEV_FLAGS")) |
1608 |
|
|
flags = atoi (getenv ("LIBEV_FLAGS")); |
1609 |
|
|
|
1610 |
root |
1.209 |
ev_rt_now = ev_time (); |
1611 |
|
|
mn_now = get_clock (); |
1612 |
|
|
now_floor = mn_now; |
1613 |
|
|
rtmn_diff = ev_rt_now - mn_now; |
1614 |
root |
1.297 |
#if EV_MINIMAL < 2 |
1615 |
root |
1.296 |
invoke_cb = ev_invoke_pending; |
1616 |
root |
1.297 |
#endif |
1617 |
root |
1.1 |
|
1618 |
root |
1.193 |
io_blocktime = 0.; |
1619 |
|
|
timeout_blocktime = 0.; |
1620 |
root |
1.209 |
backend = 0; |
1621 |
|
|
backend_fd = -1; |
1622 |
root |
1.307 |
sig_pending = 0; |
1623 |
|
|
#if EV_ASYNC_ENABLE |
1624 |
|
|
async_pending = 0; |
1625 |
|
|
#endif |
1626 |
root |
1.209 |
#if EV_USE_INOTIFY |
1627 |
root |
1.306 |
fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2; |
1628 |
root |
1.209 |
#endif |
1629 |
root |
1.303 |
#if EV_USE_SIGNALFD |
1630 |
root |
1.321 |
sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1; |
1631 |
root |
1.303 |
#endif |
1632 |
root |
1.193 |
|
1633 |
root |
1.225 |
if (!(flags & 0x0000ffffU)) |
1634 |
root |
1.129 |
flags |= ev_recommended_backends (); |
1635 |
root |
1.41 |
|
1636 |
root |
1.118 |
#if EV_USE_PORT |
1637 |
root |
1.130 |
if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags); |
1638 |
root |
1.118 |
#endif |
1639 |
root |
1.44 |
#if EV_USE_KQUEUE |
1640 |
root |
1.130 |
if (!backend && (flags & EVBACKEND_KQUEUE)) backend = kqueue_init (EV_A_ flags); |
1641 |
root |
1.44 |
#endif |
1642 |
root |
1.29 |
#if EV_USE_EPOLL |
1643 |
root |
1.130 |
if (!backend && (flags & EVBACKEND_EPOLL )) backend = epoll_init (EV_A_ flags); |
1644 |
root |
1.41 |
#endif |
1645 |
root |
1.59 |
#if EV_USE_POLL |
1646 |
root |
1.130 |
if (!backend && (flags & EVBACKEND_POLL )) backend = poll_init (EV_A_ flags); |
1647 |
root |
1.1 |
#endif |
1648 |
root |
1.29 |
#if EV_USE_SELECT |
1649 |
root |
1.130 |
if (!backend && (flags & EVBACKEND_SELECT)) backend = select_init (EV_A_ flags); |
1650 |
root |
1.1 |
#endif |
1651 |
root |
1.70 |
|
1652 |
root |
1.288 |
ev_prepare_init (&pending_w, pendingcb); |
1653 |
|
|
|
1654 |
|
|
ev_init (&pipe_w, pipecb); |
1655 |
|
|
ev_set_priority (&pipe_w, EV_MAXPRI); |
1656 |
root |
1.56 |
} |
1657 |
|
|
} |
1658 |
|
|
|
1659 |
root |
1.288 |
/* free up a loop structure */ |
1660 |
root |
1.151 |
static void noinline |
1661 |
root |
1.56 |
loop_destroy (EV_P) |
1662 |
|
|
{ |
1663 |
root |
1.65 |
int i; |
1664 |
|
|
|
1665 |
root |
1.288 |
if (ev_is_active (&pipe_w)) |
1666 |
root |
1.207 |
{ |
1667 |
root |
1.303 |
/*ev_ref (EV_A);*/ |
1668 |
|
|
/*ev_io_stop (EV_A_ &pipe_w);*/ |
1669 |
root |
1.207 |
|
1670 |
root |
1.220 |
#if EV_USE_EVENTFD |
1671 |
|
|
if (evfd >= 0) |
1672 |
|
|
close (evfd); |
1673 |
|
|
#endif |
1674 |
|
|
|
1675 |
|
|
if (evpipe [0] >= 0) |
1676 |
|
|
{ |
1677 |
root |
1.313 |
EV_WIN32_CLOSE_FD (evpipe [0]); |
1678 |
|
|
EV_WIN32_CLOSE_FD (evpipe [1]); |
1679 |
root |
1.220 |
} |
1680 |
root |
1.207 |
} |
1681 |
|
|
|
1682 |
root |
1.303 |
#if EV_USE_SIGNALFD |
1683 |
|
|
if (ev_is_active (&sigfd_w)) |
1684 |
root |
1.317 |
close (sigfd); |
1685 |
root |
1.303 |
#endif |
1686 |
|
|
|
1687 |
root |
1.152 |
#if EV_USE_INOTIFY |
1688 |
|
|
if (fs_fd >= 0) |
1689 |
|
|
close (fs_fd); |
1690 |
|
|
#endif |
1691 |
|
|
|
1692 |
|
|
if (backend_fd >= 0) |
1693 |
|
|
close (backend_fd); |
1694 |
|
|
|
1695 |
root |
1.118 |
#if EV_USE_PORT |
1696 |
root |
1.130 |
if (backend == EVBACKEND_PORT ) port_destroy (EV_A); |
1697 |
root |
1.118 |
#endif |
1698 |
root |
1.56 |
#if EV_USE_KQUEUE |
1699 |
root |
1.130 |
if (backend == EVBACKEND_KQUEUE) kqueue_destroy (EV_A); |
1700 |
root |
1.56 |
#endif |
1701 |
|
|
#if EV_USE_EPOLL |
1702 |
root |
1.130 |
if (backend == EVBACKEND_EPOLL ) epoll_destroy (EV_A); |
1703 |
root |
1.56 |
#endif |
1704 |
root |
1.59 |
#if EV_USE_POLL |
1705 |
root |
1.130 |
if (backend == EVBACKEND_POLL ) poll_destroy (EV_A); |
1706 |
root |
1.56 |
#endif |
1707 |
|
|
#if EV_USE_SELECT |
1708 |
root |
1.130 |
if (backend == EVBACKEND_SELECT) select_destroy (EV_A); |
1709 |
root |
1.56 |
#endif |
1710 |
root |
1.1 |
|
1711 |
root |
1.65 |
for (i = NUMPRI; i--; ) |
1712 |
root |
1.164 |
{ |
1713 |
|
|
array_free (pending, [i]); |
1714 |
|
|
#if EV_IDLE_ENABLE |
1715 |
|
|
array_free (idle, [i]); |
1716 |
|
|
#endif |
1717 |
|
|
} |
1718 |
root |
1.65 |
|
1719 |
root |
1.305 |
ev_free (anfds); anfds = 0; anfdmax = 0; |
1720 |
root |
1.186 |
|
1721 |
root |
1.71 |
/* have to use the microsoft-never-gets-it-right macro */ |
1722 |
root |
1.284 |
array_free (rfeed, EMPTY); |
1723 |
root |
1.164 |
array_free (fdchange, EMPTY); |
1724 |
|
|
array_free (timer, EMPTY); |
1725 |
root |
1.140 |
#if EV_PERIODIC_ENABLE |
1726 |
root |
1.164 |
array_free (periodic, EMPTY); |
1727 |
root |
1.93 |
#endif |
1728 |
root |
1.187 |
#if EV_FORK_ENABLE |
1729 |
|
|
array_free (fork, EMPTY); |
1730 |
|
|
#endif |
1731 |
root |
1.164 |
array_free (prepare, EMPTY); |
1732 |
|
|
array_free (check, EMPTY); |
1733 |
root |
1.209 |
#if EV_ASYNC_ENABLE |
1734 |
|
|
array_free (async, EMPTY); |
1735 |
|
|
#endif |
1736 |
root |
1.65 |
|
1737 |
root |
1.130 |
backend = 0; |
1738 |
root |
1.56 |
} |
1739 |
root |
1.22 |
|
1740 |
root |
1.226 |
#if EV_USE_INOTIFY |
1741 |
root |
1.284 |
inline_size void infy_fork (EV_P); |
1742 |
root |
1.226 |
#endif |
1743 |
root |
1.154 |
|
1744 |
root |
1.284 |
inline_size void |
1745 |
root |
1.56 |
loop_fork (EV_P) |
1746 |
|
|
{ |
1747 |
root |
1.118 |
#if EV_USE_PORT |
1748 |
root |
1.130 |
if (backend == EVBACKEND_PORT ) port_fork (EV_A); |
1749 |
root |
1.56 |
#endif |
1750 |
|
|
#if EV_USE_KQUEUE |
1751 |
root |
1.130 |
if (backend == EVBACKEND_KQUEUE) kqueue_fork (EV_A); |
1752 |
root |
1.45 |
#endif |
1753 |
root |
1.118 |
#if EV_USE_EPOLL |
1754 |
root |
1.130 |
if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A); |
1755 |
root |
1.118 |
#endif |
1756 |
root |
1.154 |
#if EV_USE_INOTIFY |
1757 |
|
|
infy_fork (EV_A); |
1758 |
|
|
#endif |
1759 |
root |
1.70 |
|
1760 |
root |
1.288 |
if (ev_is_active (&pipe_w)) |
1761 |
root |
1.70 |
{ |
1762 |
root |
1.207 |
/* this "locks" the handlers against writing to the pipe */ |
1763 |
root |
1.212 |
/* while we modify the fd vars */ |
1764 |
root |
1.307 |
sig_pending = 1; |
1765 |
root |
1.212 |
#if EV_ASYNC_ENABLE |
1766 |
root |
1.307 |
async_pending = 1; |
1767 |
root |
1.212 |
#endif |
1768 |
root |
1.70 |
|
1769 |
|
|
ev_ref (EV_A); |
1770 |
root |
1.288 |
ev_io_stop (EV_A_ &pipe_w); |
1771 |
root |
1.220 |
|
1772 |
|
|
#if EV_USE_EVENTFD |
1773 |
|
|
if (evfd >= 0) |
1774 |
|
|
close (evfd); |
1775 |
|
|
#endif |
1776 |
|
|
|
1777 |
|
|
if (evpipe [0] >= 0) |
1778 |
|
|
{ |
1779 |
root |
1.313 |
EV_WIN32_CLOSE_FD (evpipe [0]); |
1780 |
|
|
EV_WIN32_CLOSE_FD (evpipe [1]); |
1781 |
root |
1.220 |
} |
1782 |
root |
1.207 |
|
1783 |
|
|
evpipe_init (EV_A); |
1784 |
root |
1.208 |
/* now iterate over everything, in case we missed something */ |
1785 |
root |
1.288 |
pipecb (EV_A_ &pipe_w, EV_READ); |
1786 |
root |
1.70 |
} |
1787 |
|
|
|
1788 |
|
|
postfork = 0; |
1789 |
root |
1.1 |
} |
1790 |
|
|
|
1791 |
root |
1.55 |
#if EV_MULTIPLICITY |
1792 |
root |
1.250 |
|
1793 |
root |
1.54 |
struct ev_loop * |
1794 |
root |
1.108 |
ev_loop_new (unsigned int flags) |
1795 |
root |
1.54 |
{ |
1796 |
root |
1.306 |
EV_P = (struct ev_loop *)ev_malloc (sizeof (struct ev_loop)); |
1797 |
root |
1.69 |
|
1798 |
root |
1.306 |
memset (EV_A, 0, sizeof (struct ev_loop)); |
1799 |
root |
1.108 |
loop_init (EV_A_ flags); |
1800 |
root |
1.56 |
|
1801 |
root |
1.130 |
if (ev_backend (EV_A)) |
1802 |
root |
1.306 |
return EV_A; |
1803 |
root |
1.54 |
|
1804 |
root |
1.55 |
return 0; |
1805 |
root |
1.54 |
} |
1806 |
|
|
|
1807 |
|
|
void |
1808 |
root |
1.56 |
ev_loop_destroy (EV_P) |
1809 |
root |
1.54 |
{ |
1810 |
root |
1.56 |
loop_destroy (EV_A); |
1811 |
root |
1.69 |
ev_free (loop); |
1812 |
root |
1.54 |
} |
1813 |
|
|
|
1814 |
root |
1.56 |
void |
1815 |
|
|
ev_loop_fork (EV_P) |
1816 |
|
|
{ |
1817 |
root |
1.205 |
postfork = 1; /* must be in line with ev_default_fork */ |
1818 |
root |
1.56 |
} |
1819 |
root |
1.297 |
#endif /* multiplicity */ |
1820 |
root |
1.248 |
|
1821 |
|
|
#if EV_VERIFY |
1822 |
root |
1.258 |
static void noinline |
1823 |
root |
1.251 |
verify_watcher (EV_P_ W w) |
1824 |
|
|
{ |
1825 |
root |
1.278 |
assert (("libev: watcher has invalid priority", ABSPRI (w) >= 0 && ABSPRI (w) < NUMPRI)); |
1826 |
root |
1.251 |
|
1827 |
|
|
if (w->pending) |
1828 |
root |
1.278 |
assert (("libev: pending watcher not on pending queue", pendings [ABSPRI (w)][w->pending - 1].w == w)); |
1829 |
root |
1.251 |
} |
1830 |
|
|
|
1831 |
|
|
static void noinline |
1832 |
|
|
verify_heap (EV_P_ ANHE *heap, int N) |
1833 |
|
|
{ |
1834 |
|
|
int i; |
1835 |
|
|
|
1836 |
|
|
for (i = HEAP0; i < N + HEAP0; ++i) |
1837 |
|
|
{ |
1838 |
root |
1.278 |
assert (("libev: active index mismatch in heap", ev_active (ANHE_w (heap [i])) == i)); |
1839 |
|
|
assert (("libev: heap condition violated", i == HEAP0 || ANHE_at (heap [HPARENT (i)]) <= ANHE_at (heap [i]))); |
1840 |
|
|
assert (("libev: heap at cache mismatch", ANHE_at (heap [i]) == ev_at (ANHE_w (heap [i])))); |
1841 |
root |
1.251 |
|
1842 |
|
|
verify_watcher (EV_A_ (W)ANHE_w (heap [i])); |
1843 |
|
|
} |
1844 |
|
|
} |
1845 |
|
|
|
1846 |
|
|
static void noinline |
1847 |
|
|
array_verify (EV_P_ W *ws, int cnt) |
1848 |
root |
1.248 |
{ |
1849 |
|
|
while (cnt--) |
1850 |
root |
1.251 |
{ |
1851 |
root |
1.278 |
assert (("libev: active index mismatch", ev_active (ws [cnt]) == cnt + 1)); |
1852 |
root |
1.251 |
verify_watcher (EV_A_ ws [cnt]); |
1853 |
|
|
} |
1854 |
root |
1.248 |
} |
1855 |
root |
1.250 |
#endif |
1856 |
root |
1.248 |
|
1857 |
root |
1.297 |
#if EV_MINIMAL < 2 |
1858 |
root |
1.250 |
void |
1859 |
root |
1.248 |
ev_loop_verify (EV_P) |
1860 |
|
|
{ |
1861 |
root |
1.250 |
#if EV_VERIFY |
1862 |
root |
1.248 |
int i; |
1863 |
root |
1.251 |
WL w; |
1864 |
|
|
|
1865 |
|
|
assert (activecnt >= -1); |
1866 |
|
|
|
1867 |
|
|
assert (fdchangemax >= fdchangecnt); |
1868 |
|
|
for (i = 0; i < fdchangecnt; ++i) |
1869 |
root |
1.278 |
assert (("libev: negative fd in fdchanges", fdchanges [i] >= 0)); |
1870 |
root |
1.251 |
|
1871 |
|
|
assert (anfdmax >= 0); |
1872 |
|
|
for (i = 0; i < anfdmax; ++i) |
1873 |
|
|
for (w = anfds [i].head; w; w = w->next) |
1874 |
|
|
{ |
1875 |
|
|
verify_watcher (EV_A_ (W)w); |
1876 |
root |
1.278 |
assert (("libev: inactive fd watcher on anfd list", ev_active (w) == 1)); |
1877 |
|
|
assert (("libev: fd mismatch between watcher and anfd", ((ev_io *)w)->fd == i)); |
1878 |
root |
1.251 |
} |
1879 |
|
|
|
1880 |
|
|
assert (timermax >= timercnt); |
1881 |
|
|
verify_heap (EV_A_ timers, timercnt); |
1882 |
root |
1.248 |
|
1883 |
|
|
#if EV_PERIODIC_ENABLE |
1884 |
root |
1.251 |
assert (periodicmax >= periodiccnt); |
1885 |
|
|
verify_heap (EV_A_ periodics, periodiccnt); |
1886 |
root |
1.248 |
#endif |
1887 |
|
|
|
1888 |
root |
1.251 |
for (i = NUMPRI; i--; ) |
1889 |
|
|
{ |
1890 |
|
|
assert (pendingmax [i] >= pendingcnt [i]); |
1891 |
root |
1.248 |
#if EV_IDLE_ENABLE |
1892 |
root |
1.252 |
assert (idleall >= 0); |
1893 |
root |
1.251 |
assert (idlemax [i] >= idlecnt [i]); |
1894 |
|
|
array_verify (EV_A_ (W *)idles [i], idlecnt [i]); |
1895 |
root |
1.248 |
#endif |
1896 |
root |
1.251 |
} |
1897 |
|
|
|
1898 |
root |
1.248 |
#if EV_FORK_ENABLE |
1899 |
root |
1.251 |
assert (forkmax >= forkcnt); |
1900 |
|
|
array_verify (EV_A_ (W *)forks, forkcnt); |
1901 |
root |
1.248 |
#endif |
1902 |
root |
1.251 |
|
1903 |
root |
1.250 |
#if EV_ASYNC_ENABLE |
1904 |
root |
1.251 |
assert (asyncmax >= asynccnt); |
1905 |
|
|
array_verify (EV_A_ (W *)asyncs, asynccnt); |
1906 |
root |
1.250 |
#endif |
1907 |
root |
1.251 |
|
1908 |
|
|
assert (preparemax >= preparecnt); |
1909 |
|
|
array_verify (EV_A_ (W *)prepares, preparecnt); |
1910 |
|
|
|
1911 |
|
|
assert (checkmax >= checkcnt); |
1912 |
|
|
array_verify (EV_A_ (W *)checks, checkcnt); |
1913 |
|
|
|
1914 |
|
|
# if 0 |
1915 |
|
|
for (w = (ev_child *)childs [chain & (EV_PID_HASHSIZE - 1)]; w; w = (ev_child *)((WL)w)->next) |
1916 |
root |
1.307 |
for (signum = EV_NSIG; signum--; ) if (signals [signum].pending) |
1917 |
root |
1.251 |
# endif |
1918 |
root |
1.248 |
#endif |
1919 |
|
|
} |
1920 |
root |
1.297 |
#endif |
1921 |
root |
1.56 |
|
1922 |
|
|
#if EV_MULTIPLICITY |
1923 |
|
|
struct ev_loop * |
1924 |
root |
1.125 |
ev_default_loop_init (unsigned int flags) |
1925 |
root |
1.54 |
#else |
1926 |
|
|
int |
1927 |
root |
1.116 |
ev_default_loop (unsigned int flags) |
1928 |
root |
1.56 |
#endif |
1929 |
root |
1.54 |
{ |
1930 |
root |
1.116 |
if (!ev_default_loop_ptr) |
1931 |
root |
1.56 |
{ |
1932 |
|
|
#if EV_MULTIPLICITY |
1933 |
root |
1.306 |
EV_P = ev_default_loop_ptr = &default_loop_struct; |
1934 |
root |
1.56 |
#else |
1935 |
ayin |
1.117 |
ev_default_loop_ptr = 1; |
1936 |
root |
1.54 |
#endif |
1937 |
|
|
|
1938 |
root |
1.110 |
loop_init (EV_A_ flags); |
1939 |
root |
1.56 |
|
1940 |
root |
1.130 |
if (ev_backend (EV_A)) |
1941 |
root |
1.56 |
{ |
1942 |
root |
1.103 |
#ifndef _WIN32 |
1943 |
root |
1.56 |
ev_signal_init (&childev, childcb, SIGCHLD); |
1944 |
|
|
ev_set_priority (&childev, EV_MAXPRI); |
1945 |
|
|
ev_signal_start (EV_A_ &childev); |
1946 |
|
|
ev_unref (EV_A); /* child watcher should not keep loop alive */ |
1947 |
|
|
#endif |
1948 |
|
|
} |
1949 |
|
|
else |
1950 |
root |
1.116 |
ev_default_loop_ptr = 0; |
1951 |
root |
1.56 |
} |
1952 |
root |
1.8 |
|
1953 |
root |
1.116 |
return ev_default_loop_ptr; |
1954 |
root |
1.1 |
} |
1955 |
|
|
|
1956 |
root |
1.24 |
void |
1957 |
root |
1.56 |
ev_default_destroy (void) |
1958 |
root |
1.1 |
{ |
1959 |
root |
1.57 |
#if EV_MULTIPLICITY |
1960 |
root |
1.306 |
EV_P = ev_default_loop_ptr; |
1961 |
root |
1.57 |
#endif |
1962 |
root |
1.56 |
|
1963 |
root |
1.266 |
ev_default_loop_ptr = 0; |
1964 |
|
|
|
1965 |
root |
1.103 |
#ifndef _WIN32 |
1966 |
root |
1.56 |
ev_ref (EV_A); /* child watcher */ |
1967 |
|
|
ev_signal_stop (EV_A_ &childev); |
1968 |
root |
1.71 |
#endif |
1969 |
root |
1.56 |
|
1970 |
|
|
loop_destroy (EV_A); |
1971 |
root |
1.1 |
} |
1972 |
|
|
|
1973 |
root |
1.24 |
void |
1974 |
root |
1.60 |
ev_default_fork (void) |
1975 |
root |
1.1 |
{ |
1976 |
root |
1.60 |
#if EV_MULTIPLICITY |
1977 |
root |
1.306 |
EV_P = ev_default_loop_ptr; |
1978 |
root |
1.60 |
#endif |
1979 |
|
|
|
1980 |
root |
1.270 |
postfork = 1; /* must be in line with ev_loop_fork */ |
1981 |
root |
1.1 |
} |
1982 |
|
|
|
1983 |
root |
1.8 |
/*****************************************************************************/ |
1984 |
|
|
|
1985 |
root |
1.168 |
void |
1986 |
|
|
ev_invoke (EV_P_ void *w, int revents) |
1987 |
|
|
{ |
1988 |
|
|
EV_CB_INVOKE ((W)w, revents); |
1989 |
|
|
} |
1990 |
|
|
|
1991 |
root |
1.300 |
unsigned int |
1992 |
|
|
ev_pending_count (EV_P) |
1993 |
|
|
{ |
1994 |
|
|
int pri; |
1995 |
|
|
unsigned int count = 0; |
1996 |
|
|
|
1997 |
|
|
for (pri = NUMPRI; pri--; ) |
1998 |
|
|
count += pendingcnt [pri]; |
1999 |
|
|
|
2000 |
|
|
return count; |
2001 |
|
|
} |
2002 |
|
|
|
2003 |
root |
1.297 |
void noinline |
2004 |
root |
1.296 |
ev_invoke_pending (EV_P) |
2005 |
root |
1.1 |
{ |
2006 |
root |
1.42 |
int pri; |
2007 |
|
|
|
2008 |
|
|
for (pri = NUMPRI; pri--; ) |
2009 |
|
|
while (pendingcnt [pri]) |
2010 |
|
|
{ |
2011 |
|
|
ANPENDING *p = pendings [pri] + --pendingcnt [pri]; |
2012 |
root |
1.1 |
|
2013 |
root |
1.288 |
/*assert (("libev: non-pending watcher on pending list", p->w->pending));*/ |
2014 |
|
|
/* ^ this is no longer true, as pending_w could be here */ |
2015 |
root |
1.139 |
|
2016 |
root |
1.288 |
p->w->pending = 0; |
2017 |
|
|
EV_CB_INVOKE (p->w, p->events); |
2018 |
|
|
EV_FREQUENT_CHECK; |
2019 |
root |
1.42 |
} |
2020 |
root |
1.1 |
} |
2021 |
|
|
|
2022 |
root |
1.234 |
#if EV_IDLE_ENABLE |
2023 |
root |
1.288 |
/* make idle watchers pending. this handles the "call-idle */ |
2024 |
|
|
/* only when higher priorities are idle" logic */ |
2025 |
root |
1.284 |
inline_size void |
2026 |
root |
1.234 |
idle_reify (EV_P) |
2027 |
|
|
{ |
2028 |
|
|
if (expect_false (idleall)) |
2029 |
|
|
{ |
2030 |
|
|
int pri; |
2031 |
|
|
|
2032 |
|
|
for (pri = NUMPRI; pri--; ) |
2033 |
|
|
{ |
2034 |
|
|
if (pendingcnt [pri]) |
2035 |
|
|
break; |
2036 |
|
|
|
2037 |
|
|
if (idlecnt [pri]) |
2038 |
|
|
{ |
2039 |
|
|
queue_events (EV_A_ (W *)idles [pri], idlecnt [pri], EV_IDLE); |
2040 |
|
|
break; |
2041 |
|
|
} |
2042 |
|
|
} |
2043 |
|
|
} |
2044 |
|
|
} |
2045 |
|
|
#endif |
2046 |
|
|
|
2047 |
root |
1.288 |
/* make timers pending */ |
2048 |
root |
1.284 |
inline_size void |
2049 |
root |
1.51 |
timers_reify (EV_P) |
2050 |
root |
1.1 |
{ |
2051 |
root |
1.248 |
EV_FREQUENT_CHECK; |
2052 |
|
|
|
2053 |
root |
1.284 |
if (timercnt && ANHE_at (timers [HEAP0]) < mn_now) |
2054 |
root |
1.1 |
{ |
2055 |
root |
1.284 |
do |
2056 |
|
|
{ |
2057 |
|
|
ev_timer *w = (ev_timer *)ANHE_w (timers [HEAP0]); |
2058 |
root |
1.1 |
|
2059 |
root |
1.284 |
/*assert (("libev: inactive timer on timer heap detected", ev_is_active (w)));*/ |
2060 |
|
|
|
2061 |
|
|
/* first reschedule or stop timer */ |
2062 |
|
|
if (w->repeat) |
2063 |
|
|
{ |
2064 |
|
|
ev_at (w) += w->repeat; |
2065 |
|
|
if (ev_at (w) < mn_now) |
2066 |
|
|
ev_at (w) = mn_now; |
2067 |
root |
1.61 |
|
2068 |
root |
1.284 |
assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > 0.)); |
2069 |
root |
1.90 |
|
2070 |
root |
1.284 |
ANHE_at_cache (timers [HEAP0]); |
2071 |
|
|
downheap (timers, timercnt, HEAP0); |
2072 |
|
|
} |
2073 |
|
|
else |
2074 |
|
|
ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */ |
2075 |
root |
1.243 |
|
2076 |
root |
1.284 |
EV_FREQUENT_CHECK; |
2077 |
|
|
feed_reverse (EV_A_ (W)w); |
2078 |
root |
1.12 |
} |
2079 |
root |
1.284 |
while (timercnt && ANHE_at (timers [HEAP0]) < mn_now); |
2080 |
root |
1.30 |
|
2081 |
root |
1.284 |
feed_reverse_done (EV_A_ EV_TIMEOUT); |
2082 |
root |
1.12 |
} |
2083 |
|
|
} |
2084 |
root |
1.4 |
|
2085 |
root |
1.140 |
#if EV_PERIODIC_ENABLE |
2086 |
root |
1.288 |
/* make periodics pending */ |
2087 |
root |
1.284 |
inline_size void |
2088 |
root |
1.51 |
periodics_reify (EV_P) |
2089 |
root |
1.12 |
{ |
2090 |
root |
1.248 |
EV_FREQUENT_CHECK; |
2091 |
root |
1.250 |
|
2092 |
root |
1.244 |
while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now) |
2093 |
root |
1.12 |
{ |
2094 |
root |
1.284 |
int feed_count = 0; |
2095 |
|
|
|
2096 |
|
|
do |
2097 |
|
|
{ |
2098 |
|
|
ev_periodic *w = (ev_periodic *)ANHE_w (periodics [HEAP0]); |
2099 |
root |
1.1 |
|
2100 |
root |
1.284 |
/*assert (("libev: inactive timer on periodic heap detected", ev_is_active (w)));*/ |
2101 |
root |
1.61 |
|
2102 |
root |
1.284 |
/* first reschedule or stop timer */ |
2103 |
|
|
if (w->reschedule_cb) |
2104 |
|
|
{ |
2105 |
|
|
ev_at (w) = w->reschedule_cb (w, ev_rt_now); |
2106 |
root |
1.243 |
|
2107 |
root |
1.284 |
assert (("libev: ev_periodic reschedule callback returned time in the past", ev_at (w) >= ev_rt_now)); |
2108 |
root |
1.243 |
|
2109 |
root |
1.284 |
ANHE_at_cache (periodics [HEAP0]); |
2110 |
|
|
downheap (periodics, periodiccnt, HEAP0); |
2111 |
|
|
} |
2112 |
|
|
else if (w->interval) |
2113 |
root |
1.246 |
{ |
2114 |
root |
1.284 |
ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval; |
2115 |
|
|
/* if next trigger time is not sufficiently in the future, put it there */ |
2116 |
|
|
/* this might happen because of floating point inexactness */ |
2117 |
|
|
if (ev_at (w) - ev_rt_now < TIME_EPSILON) |
2118 |
|
|
{ |
2119 |
|
|
ev_at (w) += w->interval; |
2120 |
|
|
|
2121 |
|
|
/* if interval is unreasonably low we might still have a time in the past */ |
2122 |
|
|
/* so correct this. this will make the periodic very inexact, but the user */ |
2123 |
|
|
/* has effectively asked to get triggered more often than possible */ |
2124 |
|
|
if (ev_at (w) < ev_rt_now) |
2125 |
|
|
ev_at (w) = ev_rt_now; |
2126 |
|
|
} |
2127 |
root |
1.243 |
|
2128 |
root |
1.284 |
ANHE_at_cache (periodics [HEAP0]); |
2129 |
|
|
downheap (periodics, periodiccnt, HEAP0); |
2130 |
root |
1.246 |
} |
2131 |
root |
1.284 |
else |
2132 |
|
|
ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */ |
2133 |
root |
1.243 |
|
2134 |
root |
1.284 |
EV_FREQUENT_CHECK; |
2135 |
|
|
feed_reverse (EV_A_ (W)w); |
2136 |
root |
1.1 |
} |
2137 |
root |
1.284 |
while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now); |
2138 |
root |
1.12 |
|
2139 |
root |
1.284 |
feed_reverse_done (EV_A_ EV_PERIODIC); |
2140 |
root |
1.12 |
} |
2141 |
|
|
} |
2142 |
|
|
|
2143 |
root |
1.288 |
/* simply recalculate all periodics */ |
2144 |
|
|
/* TODO: maybe ensure that at leats one event happens when jumping forward? */ |
2145 |
root |
1.140 |
static void noinline |
2146 |
root |
1.54 |
periodics_reschedule (EV_P) |
2147 |
root |
1.12 |
{ |
2148 |
|
|
int i; |
2149 |
|
|
|
2150 |
root |
1.13 |
/* adjust periodics after time jump */ |
2151 |
root |
1.241 |
for (i = HEAP0; i < periodiccnt + HEAP0; ++i) |
2152 |
root |
1.12 |
{ |
2153 |
root |
1.241 |
ev_periodic *w = (ev_periodic *)ANHE_w (periodics [i]); |
2154 |
root |
1.12 |
|
2155 |
root |
1.77 |
if (w->reschedule_cb) |
2156 |
root |
1.228 |
ev_at (w) = w->reschedule_cb (w, ev_rt_now); |
2157 |
root |
1.77 |
else if (w->interval) |
2158 |
root |
1.228 |
ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval; |
2159 |
root |
1.242 |
|
2160 |
root |
1.248 |
ANHE_at_cache (periodics [i]); |
2161 |
root |
1.77 |
} |
2162 |
root |
1.12 |
|
2163 |
root |
1.248 |
reheap (periodics, periodiccnt); |
2164 |
root |
1.1 |
} |
2165 |
root |
1.93 |
#endif |
2166 |
root |
1.1 |
|
2167 |
root |
1.288 |
/* adjust all timers by a given offset */ |
2168 |
root |
1.285 |
static void noinline |
2169 |
|
|
timers_reschedule (EV_P_ ev_tstamp adjust) |
2170 |
|
|
{ |
2171 |
|
|
int i; |
2172 |
|
|
|
2173 |
|
|
for (i = 0; i < timercnt; ++i) |
2174 |
|
|
{ |
2175 |
|
|
ANHE *he = timers + i + HEAP0; |
2176 |
|
|
ANHE_w (*he)->at += adjust; |
2177 |
|
|
ANHE_at_cache (*he); |
2178 |
|
|
} |
2179 |
|
|
} |
2180 |
|
|
|
2181 |
root |
1.288 |
/* fetch new monotonic and realtime times from the kernel */ |
2182 |
root |
1.324 |
/* also detect if there was a timejump, and act accordingly */ |
2183 |
root |
1.284 |
inline_speed void |
2184 |
root |
1.178 |
time_update (EV_P_ ev_tstamp max_block) |
2185 |
root |
1.4 |
{ |
2186 |
root |
1.40 |
#if EV_USE_MONOTONIC |
2187 |
|
|
if (expect_true (have_monotonic)) |
2188 |
|
|
{ |
2189 |
root |
1.289 |
int i; |
2190 |
root |
1.178 |
ev_tstamp odiff = rtmn_diff; |
2191 |
|
|
|
2192 |
|
|
mn_now = get_clock (); |
2193 |
|
|
|
2194 |
|
|
/* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */ |
2195 |
|
|
/* interpolate in the meantime */ |
2196 |
|
|
if (expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5)) |
2197 |
root |
1.40 |
{ |
2198 |
root |
1.178 |
ev_rt_now = rtmn_diff + mn_now; |
2199 |
|
|
return; |
2200 |
|
|
} |
2201 |
|
|
|
2202 |
|
|
now_floor = mn_now; |
2203 |
|
|
ev_rt_now = ev_time (); |
2204 |
root |
1.4 |
|
2205 |
root |
1.178 |
/* loop a few times, before making important decisions. |
2206 |
|
|
* on the choice of "4": one iteration isn't enough, |
2207 |
|
|
* in case we get preempted during the calls to |
2208 |
|
|
* ev_time and get_clock. a second call is almost guaranteed |
2209 |
|
|
* to succeed in that case, though. and looping a few more times |
2210 |
|
|
* doesn't hurt either as we only do this on time-jumps or |
2211 |
|
|
* in the unlikely event of having been preempted here. |
2212 |
|
|
*/ |
2213 |
|
|
for (i = 4; --i; ) |
2214 |
|
|
{ |
2215 |
|
|
rtmn_diff = ev_rt_now - mn_now; |
2216 |
root |
1.4 |
|
2217 |
root |
1.234 |
if (expect_true (fabs (odiff - rtmn_diff) < MIN_TIMEJUMP)) |
2218 |
root |
1.178 |
return; /* all is well */ |
2219 |
root |
1.4 |
|
2220 |
root |
1.178 |
ev_rt_now = ev_time (); |
2221 |
|
|
mn_now = get_clock (); |
2222 |
|
|
now_floor = mn_now; |
2223 |
|
|
} |
2224 |
root |
1.4 |
|
2225 |
root |
1.285 |
/* no timer adjustment, as the monotonic clock doesn't jump */ |
2226 |
|
|
/* timers_reschedule (EV_A_ rtmn_diff - odiff) */ |
2227 |
root |
1.140 |
# if EV_PERIODIC_ENABLE |
2228 |
root |
1.178 |
periodics_reschedule (EV_A); |
2229 |
root |
1.93 |
# endif |
2230 |
root |
1.4 |
} |
2231 |
|
|
else |
2232 |
root |
1.40 |
#endif |
2233 |
root |
1.4 |
{ |
2234 |
root |
1.85 |
ev_rt_now = ev_time (); |
2235 |
root |
1.40 |
|
2236 |
root |
1.178 |
if (expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + MIN_TIMEJUMP)) |
2237 |
root |
1.13 |
{ |
2238 |
root |
1.285 |
/* adjust timers. this is easy, as the offset is the same for all of them */ |
2239 |
|
|
timers_reschedule (EV_A_ ev_rt_now - mn_now); |
2240 |
root |
1.140 |
#if EV_PERIODIC_ENABLE |
2241 |
root |
1.54 |
periodics_reschedule (EV_A); |
2242 |
root |
1.93 |
#endif |
2243 |
root |
1.13 |
} |
2244 |
root |
1.4 |
|
2245 |
root |
1.85 |
mn_now = ev_rt_now; |
2246 |
root |
1.4 |
} |
2247 |
|
|
} |
2248 |
|
|
|
2249 |
root |
1.51 |
void |
2250 |
|
|
ev_loop (EV_P_ int flags) |
2251 |
root |
1.1 |
{ |
2252 |
root |
1.297 |
#if EV_MINIMAL < 2 |
2253 |
root |
1.294 |
++loop_depth; |
2254 |
root |
1.297 |
#endif |
2255 |
root |
1.294 |
|
2256 |
root |
1.298 |
assert (("libev: ev_loop recursion during release detected", loop_done != EVUNLOOP_RECURSE)); |
2257 |
|
|
|
2258 |
root |
1.219 |
loop_done = EVUNLOOP_CANCEL; |
2259 |
root |
1.1 |
|
2260 |
root |
1.297 |
EV_INVOKE_PENDING; /* in case we recurse, ensure ordering stays nice and clean */ |
2261 |
root |
1.158 |
|
2262 |
root |
1.161 |
do |
2263 |
root |
1.9 |
{ |
2264 |
root |
1.250 |
#if EV_VERIFY >= 2 |
2265 |
|
|
ev_loop_verify (EV_A); |
2266 |
|
|
#endif |
2267 |
|
|
|
2268 |
root |
1.158 |
#ifndef _WIN32 |
2269 |
|
|
if (expect_false (curpid)) /* penalise the forking check even more */ |
2270 |
|
|
if (expect_false (getpid () != curpid)) |
2271 |
|
|
{ |
2272 |
|
|
curpid = getpid (); |
2273 |
|
|
postfork = 1; |
2274 |
|
|
} |
2275 |
|
|
#endif |
2276 |
|
|
|
2277 |
root |
1.157 |
#if EV_FORK_ENABLE |
2278 |
|
|
/* we might have forked, so queue fork handlers */ |
2279 |
|
|
if (expect_false (postfork)) |
2280 |
|
|
if (forkcnt) |
2281 |
|
|
{ |
2282 |
|
|
queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK); |
2283 |
root |
1.297 |
EV_INVOKE_PENDING; |
2284 |
root |
1.157 |
} |
2285 |
|
|
#endif |
2286 |
root |
1.147 |
|
2287 |
root |
1.170 |
/* queue prepare watchers (and execute them) */ |
2288 |
root |
1.40 |
if (expect_false (preparecnt)) |
2289 |
root |
1.20 |
{ |
2290 |
root |
1.51 |
queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE); |
2291 |
root |
1.297 |
EV_INVOKE_PENDING; |
2292 |
root |
1.20 |
} |
2293 |
root |
1.9 |
|
2294 |
root |
1.298 |
if (expect_false (loop_done)) |
2295 |
|
|
break; |
2296 |
|
|
|
2297 |
root |
1.70 |
/* we might have forked, so reify kernel state if necessary */ |
2298 |
|
|
if (expect_false (postfork)) |
2299 |
|
|
loop_fork (EV_A); |
2300 |
|
|
|
2301 |
root |
1.1 |
/* update fd-related kernel structures */ |
2302 |
root |
1.51 |
fd_reify (EV_A); |
2303 |
root |
1.1 |
|
2304 |
|
|
/* calculate blocking time */ |
2305 |
root |
1.135 |
{ |
2306 |
root |
1.193 |
ev_tstamp waittime = 0.; |
2307 |
|
|
ev_tstamp sleeptime = 0.; |
2308 |
root |
1.12 |
|
2309 |
root |
1.193 |
if (expect_true (!(flags & EVLOOP_NONBLOCK || idleall || !activecnt))) |
2310 |
root |
1.135 |
{ |
2311 |
root |
1.293 |
/* remember old timestamp for io_blocktime calculation */ |
2312 |
|
|
ev_tstamp prev_mn_now = mn_now; |
2313 |
|
|
|
2314 |
root |
1.135 |
/* update time to cancel out callback processing overhead */ |
2315 |
root |
1.178 |
time_update (EV_A_ 1e100); |
2316 |
root |
1.135 |
|
2317 |
root |
1.287 |
waittime = MAX_BLOCKTIME; |
2318 |
|
|
|
2319 |
root |
1.135 |
if (timercnt) |
2320 |
|
|
{ |
2321 |
root |
1.241 |
ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now + backend_fudge; |
2322 |
root |
1.193 |
if (waittime > to) waittime = to; |
2323 |
root |
1.135 |
} |
2324 |
root |
1.4 |
|
2325 |
root |
1.140 |
#if EV_PERIODIC_ENABLE |
2326 |
root |
1.135 |
if (periodiccnt) |
2327 |
|
|
{ |
2328 |
root |
1.241 |
ev_tstamp to = ANHE_at (periodics [HEAP0]) - ev_rt_now + backend_fudge; |
2329 |
root |
1.193 |
if (waittime > to) waittime = to; |
2330 |
root |
1.135 |
} |
2331 |
root |
1.93 |
#endif |
2332 |
root |
1.4 |
|
2333 |
root |
1.293 |
/* don't let timeouts decrease the waittime below timeout_blocktime */ |
2334 |
root |
1.193 |
if (expect_false (waittime < timeout_blocktime)) |
2335 |
|
|
waittime = timeout_blocktime; |
2336 |
|
|
|
2337 |
root |
1.293 |
/* extra check because io_blocktime is commonly 0 */ |
2338 |
|
|
if (expect_false (io_blocktime)) |
2339 |
|
|
{ |
2340 |
|
|
sleeptime = io_blocktime - (mn_now - prev_mn_now); |
2341 |
root |
1.193 |
|
2342 |
root |
1.293 |
if (sleeptime > waittime - backend_fudge) |
2343 |
|
|
sleeptime = waittime - backend_fudge; |
2344 |
root |
1.193 |
|
2345 |
root |
1.293 |
if (expect_true (sleeptime > 0.)) |
2346 |
|
|
{ |
2347 |
|
|
ev_sleep (sleeptime); |
2348 |
|
|
waittime -= sleeptime; |
2349 |
|
|
} |
2350 |
root |
1.193 |
} |
2351 |
root |
1.135 |
} |
2352 |
root |
1.1 |
|
2353 |
root |
1.297 |
#if EV_MINIMAL < 2 |
2354 |
root |
1.162 |
++loop_count; |
2355 |
root |
1.297 |
#endif |
2356 |
root |
1.298 |
assert ((loop_done = EVUNLOOP_RECURSE, 1)); /* assert for side effect */ |
2357 |
root |
1.193 |
backend_poll (EV_A_ waittime); |
2358 |
root |
1.298 |
assert ((loop_done = EVUNLOOP_CANCEL, 1)); /* assert for side effect */ |
2359 |
root |
1.178 |
|
2360 |
|
|
/* update ev_rt_now, do magic */ |
2361 |
root |
1.193 |
time_update (EV_A_ waittime + sleeptime); |
2362 |
root |
1.135 |
} |
2363 |
root |
1.1 |
|
2364 |
root |
1.9 |
/* queue pending timers and reschedule them */ |
2365 |
root |
1.51 |
timers_reify (EV_A); /* relative timers called last */ |
2366 |
root |
1.140 |
#if EV_PERIODIC_ENABLE |
2367 |
root |
1.51 |
periodics_reify (EV_A); /* absolute timers called first */ |
2368 |
root |
1.93 |
#endif |
2369 |
root |
1.1 |
|
2370 |
root |
1.164 |
#if EV_IDLE_ENABLE |
2371 |
root |
1.137 |
/* queue idle watchers unless other events are pending */ |
2372 |
root |
1.164 |
idle_reify (EV_A); |
2373 |
|
|
#endif |
2374 |
root |
1.9 |
|
2375 |
root |
1.20 |
/* queue check watchers, to be executed first */ |
2376 |
root |
1.123 |
if (expect_false (checkcnt)) |
2377 |
root |
1.51 |
queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK); |
2378 |
root |
1.9 |
|
2379 |
root |
1.297 |
EV_INVOKE_PENDING; |
2380 |
root |
1.1 |
} |
2381 |
root |
1.219 |
while (expect_true ( |
2382 |
|
|
activecnt |
2383 |
|
|
&& !loop_done |
2384 |
|
|
&& !(flags & (EVLOOP_ONESHOT | EVLOOP_NONBLOCK)) |
2385 |
|
|
)); |
2386 |
root |
1.13 |
|
2387 |
root |
1.135 |
if (loop_done == EVUNLOOP_ONE) |
2388 |
|
|
loop_done = EVUNLOOP_CANCEL; |
2389 |
root |
1.294 |
|
2390 |
root |
1.297 |
#if EV_MINIMAL < 2 |
2391 |
root |
1.294 |
--loop_depth; |
2392 |
root |
1.297 |
#endif |
2393 |
root |
1.51 |
} |
2394 |
|
|
|
2395 |
|
|
void |
2396 |
|
|
ev_unloop (EV_P_ int how) |
2397 |
|
|
{ |
2398 |
|
|
loop_done = how; |
2399 |
root |
1.1 |
} |
2400 |
|
|
|
2401 |
root |
1.285 |
void |
2402 |
|
|
ev_ref (EV_P) |
2403 |
|
|
{ |
2404 |
|
|
++activecnt; |
2405 |
|
|
} |
2406 |
|
|
|
2407 |
|
|
void |
2408 |
|
|
ev_unref (EV_P) |
2409 |
|
|
{ |
2410 |
|
|
--activecnt; |
2411 |
|
|
} |
2412 |
|
|
|
2413 |
|
|
void |
2414 |
|
|
ev_now_update (EV_P) |
2415 |
|
|
{ |
2416 |
|
|
time_update (EV_A_ 1e100); |
2417 |
|
|
} |
2418 |
|
|
|
2419 |
|
|
void |
2420 |
|
|
ev_suspend (EV_P) |
2421 |
|
|
{ |
2422 |
|
|
ev_now_update (EV_A); |
2423 |
|
|
} |
2424 |
|
|
|
2425 |
|
|
void |
2426 |
|
|
ev_resume (EV_P) |
2427 |
|
|
{ |
2428 |
|
|
ev_tstamp mn_prev = mn_now; |
2429 |
|
|
|
2430 |
|
|
ev_now_update (EV_A); |
2431 |
|
|
timers_reschedule (EV_A_ mn_now - mn_prev); |
2432 |
root |
1.286 |
#if EV_PERIODIC_ENABLE |
2433 |
root |
1.288 |
/* TODO: really do this? */ |
2434 |
root |
1.285 |
periodics_reschedule (EV_A); |
2435 |
root |
1.286 |
#endif |
2436 |
root |
1.285 |
} |
2437 |
|
|
|
2438 |
root |
1.8 |
/*****************************************************************************/ |
2439 |
root |
1.288 |
/* singly-linked list management, used when the expected list length is short */ |
2440 |
root |
1.8 |
|
2441 |
root |
1.284 |
inline_size void |
2442 |
root |
1.10 |
wlist_add (WL *head, WL elem) |
2443 |
root |
1.1 |
{ |
2444 |
|
|
elem->next = *head; |
2445 |
|
|
*head = elem; |
2446 |
|
|
} |
2447 |
|
|
|
2448 |
root |
1.284 |
inline_size void |
2449 |
root |
1.10 |
wlist_del (WL *head, WL elem) |
2450 |
root |
1.1 |
{ |
2451 |
|
|
while (*head) |
2452 |
|
|
{ |
2453 |
root |
1.307 |
if (expect_true (*head == elem)) |
2454 |
root |
1.1 |
{ |
2455 |
|
|
*head = elem->next; |
2456 |
root |
1.307 |
break; |
2457 |
root |
1.1 |
} |
2458 |
|
|
|
2459 |
|
|
head = &(*head)->next; |
2460 |
|
|
} |
2461 |
|
|
} |
2462 |
|
|
|
2463 |
root |
1.288 |
/* internal, faster, version of ev_clear_pending */ |
2464 |
root |
1.284 |
inline_speed void |
2465 |
root |
1.166 |
clear_pending (EV_P_ W w) |
2466 |
root |
1.16 |
{ |
2467 |
|
|
if (w->pending) |
2468 |
|
|
{ |
2469 |
root |
1.288 |
pendings [ABSPRI (w)][w->pending - 1].w = (W)&pending_w; |
2470 |
root |
1.16 |
w->pending = 0; |
2471 |
|
|
} |
2472 |
|
|
} |
2473 |
|
|
|
2474 |
root |
1.167 |
int |
2475 |
|
|
ev_clear_pending (EV_P_ void *w) |
2476 |
root |
1.166 |
{ |
2477 |
|
|
W w_ = (W)w; |
2478 |
|
|
int pending = w_->pending; |
2479 |
|
|
|
2480 |
root |
1.172 |
if (expect_true (pending)) |
2481 |
|
|
{ |
2482 |
|
|
ANPENDING *p = pendings [ABSPRI (w_)] + pending - 1; |
2483 |
root |
1.288 |
p->w = (W)&pending_w; |
2484 |
root |
1.172 |
w_->pending = 0; |
2485 |
|
|
return p->events; |
2486 |
|
|
} |
2487 |
|
|
else |
2488 |
root |
1.167 |
return 0; |
2489 |
root |
1.166 |
} |
2490 |
|
|
|
2491 |
root |
1.284 |
inline_size void |
2492 |
root |
1.164 |
pri_adjust (EV_P_ W w) |
2493 |
|
|
{ |
2494 |
root |
1.295 |
int pri = ev_priority (w); |
2495 |
root |
1.164 |
pri = pri < EV_MINPRI ? EV_MINPRI : pri; |
2496 |
|
|
pri = pri > EV_MAXPRI ? EV_MAXPRI : pri; |
2497 |
root |
1.295 |
ev_set_priority (w, pri); |
2498 |
root |
1.164 |
} |
2499 |
|
|
|
2500 |
root |
1.284 |
inline_speed void |
2501 |
root |
1.51 |
ev_start (EV_P_ W w, int active) |
2502 |
root |
1.1 |
{ |
2503 |
root |
1.164 |
pri_adjust (EV_A_ w); |
2504 |
root |
1.1 |
w->active = active; |
2505 |
root |
1.51 |
ev_ref (EV_A); |
2506 |
root |
1.1 |
} |
2507 |
|
|
|
2508 |
root |
1.284 |
inline_size void |
2509 |
root |
1.51 |
ev_stop (EV_P_ W w) |
2510 |
root |
1.1 |
{ |
2511 |
root |
1.51 |
ev_unref (EV_A); |
2512 |
root |
1.1 |
w->active = 0; |
2513 |
|
|
} |
2514 |
|
|
|
2515 |
root |
1.8 |
/*****************************************************************************/ |
2516 |
|
|
|
2517 |
root |
1.171 |
void noinline |
2518 |
root |
1.136 |
ev_io_start (EV_P_ ev_io *w) |
2519 |
root |
1.1 |
{ |
2520 |
root |
1.37 |
int fd = w->fd; |
2521 |
|
|
|
2522 |
root |
1.123 |
if (expect_false (ev_is_active (w))) |
2523 |
root |
1.1 |
return; |
2524 |
|
|
|
2525 |
root |
1.278 |
assert (("libev: ev_io_start called with negative fd", fd >= 0)); |
2526 |
root |
1.327 |
assert (("libev: ev_io_start called with illegal event mask", !(w->events & ~(EV__IOFDSET | EV_READ | EV_WRITE)))); |
2527 |
root |
1.33 |
|
2528 |
root |
1.248 |
EV_FREQUENT_CHECK; |
2529 |
|
|
|
2530 |
root |
1.51 |
ev_start (EV_A_ (W)w, 1); |
2531 |
root |
1.265 |
array_needsize (ANFD, anfds, anfdmax, fd + 1, array_init_zero); |
2532 |
root |
1.182 |
wlist_add (&anfds[fd].head, (WL)w); |
2533 |
root |
1.1 |
|
2534 |
root |
1.298 |
fd_change (EV_A_ fd, w->events & EV__IOFDSET | EV_ANFD_REIFY); |
2535 |
root |
1.281 |
w->events &= ~EV__IOFDSET; |
2536 |
root |
1.248 |
|
2537 |
|
|
EV_FREQUENT_CHECK; |
2538 |
root |
1.1 |
} |
2539 |
|
|
|
2540 |
root |
1.171 |
void noinline |
2541 |
root |
1.136 |
ev_io_stop (EV_P_ ev_io *w) |
2542 |
root |
1.1 |
{ |
2543 |
root |
1.166 |
clear_pending (EV_A_ (W)w); |
2544 |
root |
1.123 |
if (expect_false (!ev_is_active (w))) |
2545 |
root |
1.1 |
return; |
2546 |
|
|
|
2547 |
root |
1.278 |
assert (("libev: ev_io_stop called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax)); |
2548 |
root |
1.89 |
|
2549 |
root |
1.248 |
EV_FREQUENT_CHECK; |
2550 |
|
|
|
2551 |
root |
1.182 |
wlist_del (&anfds[w->fd].head, (WL)w); |
2552 |
root |
1.51 |
ev_stop (EV_A_ (W)w); |
2553 |
root |
1.1 |
|
2554 |
root |
1.184 |
fd_change (EV_A_ w->fd, 1); |
2555 |
root |
1.248 |
|
2556 |
|
|
EV_FREQUENT_CHECK; |
2557 |
root |
1.1 |
} |
2558 |
|
|
|
2559 |
root |
1.171 |
void noinline |
2560 |
root |
1.136 |
ev_timer_start (EV_P_ ev_timer *w) |
2561 |
root |
1.1 |
{ |
2562 |
root |
1.123 |
if (expect_false (ev_is_active (w))) |
2563 |
root |
1.1 |
return; |
2564 |
|
|
|
2565 |
root |
1.228 |
ev_at (w) += mn_now; |
2566 |
root |
1.12 |
|
2567 |
root |
1.278 |
assert (("libev: ev_timer_start called with negative timer repeat value", w->repeat >= 0.)); |
2568 |
root |
1.13 |
|
2569 |
root |
1.248 |
EV_FREQUENT_CHECK; |
2570 |
|
|
|
2571 |
|
|
++timercnt; |
2572 |
|
|
ev_start (EV_A_ (W)w, timercnt + HEAP0 - 1); |
2573 |
root |
1.241 |
array_needsize (ANHE, timers, timermax, ev_active (w) + 1, EMPTY2); |
2574 |
|
|
ANHE_w (timers [ev_active (w)]) = (WT)w; |
2575 |
root |
1.248 |
ANHE_at_cache (timers [ev_active (w)]); |
2576 |
root |
1.235 |
upheap (timers, ev_active (w)); |
2577 |
root |
1.62 |
|
2578 |
root |
1.248 |
EV_FREQUENT_CHECK; |
2579 |
|
|
|
2580 |
root |
1.278 |
/*assert (("libev: internal timer heap corruption", timers [ev_active (w)] == (WT)w));*/ |
2581 |
root |
1.12 |
} |
2582 |
|
|
|
2583 |
root |
1.171 |
void noinline |
2584 |
root |
1.136 |
ev_timer_stop (EV_P_ ev_timer *w) |
2585 |
root |
1.12 |
{ |
2586 |
root |
1.166 |
clear_pending (EV_A_ (W)w); |
2587 |
root |
1.123 |
if (expect_false (!ev_is_active (w))) |
2588 |
root |
1.12 |
return; |
2589 |
|
|
|
2590 |
root |
1.248 |
EV_FREQUENT_CHECK; |
2591 |
|
|
|
2592 |
root |
1.230 |
{ |
2593 |
|
|
int active = ev_active (w); |
2594 |
root |
1.62 |
|
2595 |
root |
1.278 |
assert (("libev: internal timer heap corruption", ANHE_w (timers [active]) == (WT)w)); |
2596 |
root |
1.151 |
|
2597 |
root |
1.248 |
--timercnt; |
2598 |
|
|
|
2599 |
|
|
if (expect_true (active < timercnt + HEAP0)) |
2600 |
root |
1.151 |
{ |
2601 |
root |
1.248 |
timers [active] = timers [timercnt + HEAP0]; |
2602 |
root |
1.181 |
adjustheap (timers, timercnt, active); |
2603 |
root |
1.151 |
} |
2604 |
root |
1.248 |
} |
2605 |
root |
1.228 |
|
2606 |
|
|
ev_at (w) -= mn_now; |
2607 |
root |
1.14 |
|
2608 |
root |
1.51 |
ev_stop (EV_A_ (W)w); |
2609 |
root |
1.328 |
|
2610 |
|
|
EV_FREQUENT_CHECK; |
2611 |
root |
1.12 |
} |
2612 |
root |
1.4 |
|
2613 |
root |
1.171 |
void noinline |
2614 |
root |
1.136 |
ev_timer_again (EV_P_ ev_timer *w) |
2615 |
root |
1.14 |
{ |
2616 |
root |
1.248 |
EV_FREQUENT_CHECK; |
2617 |
|
|
|
2618 |
root |
1.14 |
if (ev_is_active (w)) |
2619 |
|
|
{ |
2620 |
|
|
if (w->repeat) |
2621 |
root |
1.99 |
{ |
2622 |
root |
1.228 |
ev_at (w) = mn_now + w->repeat; |
2623 |
root |
1.248 |
ANHE_at_cache (timers [ev_active (w)]); |
2624 |
root |
1.230 |
adjustheap (timers, timercnt, ev_active (w)); |
2625 |
root |
1.99 |
} |
2626 |
root |
1.14 |
else |
2627 |
root |
1.51 |
ev_timer_stop (EV_A_ w); |
2628 |
root |
1.14 |
} |
2629 |
|
|
else if (w->repeat) |
2630 |
root |
1.112 |
{ |
2631 |
root |
1.229 |
ev_at (w) = w->repeat; |
2632 |
root |
1.112 |
ev_timer_start (EV_A_ w); |
2633 |
|
|
} |
2634 |
root |
1.248 |
|
2635 |
|
|
EV_FREQUENT_CHECK; |
2636 |
root |
1.14 |
} |
2637 |
|
|
|
2638 |
root |
1.301 |
ev_tstamp |
2639 |
|
|
ev_timer_remaining (EV_P_ ev_timer *w) |
2640 |
|
|
{ |
2641 |
|
|
return ev_at (w) - (ev_is_active (w) ? mn_now : 0.); |
2642 |
|
|
} |
2643 |
|
|
|
2644 |
root |
1.140 |
#if EV_PERIODIC_ENABLE |
2645 |
root |
1.171 |
void noinline |
2646 |
root |
1.136 |
ev_periodic_start (EV_P_ ev_periodic *w) |
2647 |
root |
1.12 |
{ |
2648 |
root |
1.123 |
if (expect_false (ev_is_active (w))) |
2649 |
root |
1.12 |
return; |
2650 |
root |
1.1 |
|
2651 |
root |
1.77 |
if (w->reschedule_cb) |
2652 |
root |
1.228 |
ev_at (w) = w->reschedule_cb (w, ev_rt_now); |
2653 |
root |
1.77 |
else if (w->interval) |
2654 |
|
|
{ |
2655 |
root |
1.278 |
assert (("libev: ev_periodic_start called with negative interval value", w->interval >= 0.)); |
2656 |
root |
1.77 |
/* this formula differs from the one in periodic_reify because we do not always round up */ |
2657 |
root |
1.228 |
ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval; |
2658 |
root |
1.77 |
} |
2659 |
root |
1.173 |
else |
2660 |
root |
1.228 |
ev_at (w) = w->offset; |
2661 |
root |
1.12 |
|
2662 |
root |
1.248 |
EV_FREQUENT_CHECK; |
2663 |
|
|
|
2664 |
|
|
++periodiccnt; |
2665 |
|
|
ev_start (EV_A_ (W)w, periodiccnt + HEAP0 - 1); |
2666 |
root |
1.241 |
array_needsize (ANHE, periodics, periodicmax, ev_active (w) + 1, EMPTY2); |
2667 |
|
|
ANHE_w (periodics [ev_active (w)]) = (WT)w; |
2668 |
root |
1.248 |
ANHE_at_cache (periodics [ev_active (w)]); |
2669 |
root |
1.235 |
upheap (periodics, ev_active (w)); |
2670 |
root |
1.62 |
|
2671 |
root |
1.248 |
EV_FREQUENT_CHECK; |
2672 |
|
|
|
2673 |
root |
1.278 |
/*assert (("libev: internal periodic heap corruption", ANHE_w (periodics [ev_active (w)]) == (WT)w));*/ |
2674 |
root |
1.1 |
} |
2675 |
|
|
|
2676 |
root |
1.171 |
void noinline |
2677 |
root |
1.136 |
ev_periodic_stop (EV_P_ ev_periodic *w) |
2678 |
root |
1.1 |
{ |
2679 |
root |
1.166 |
clear_pending (EV_A_ (W)w); |
2680 |
root |
1.123 |
if (expect_false (!ev_is_active (w))) |
2681 |
root |
1.1 |
return; |
2682 |
|
|
|
2683 |
root |
1.248 |
EV_FREQUENT_CHECK; |
2684 |
|
|
|
2685 |
root |
1.230 |
{ |
2686 |
|
|
int active = ev_active (w); |
2687 |
root |
1.62 |
|
2688 |
root |
1.278 |
assert (("libev: internal periodic heap corruption", ANHE_w (periodics [active]) == (WT)w)); |
2689 |
root |
1.151 |
|
2690 |
root |
1.248 |
--periodiccnt; |
2691 |
|
|
|
2692 |
|
|
if (expect_true (active < periodiccnt + HEAP0)) |
2693 |
root |
1.151 |
{ |
2694 |
root |
1.248 |
periodics [active] = periodics [periodiccnt + HEAP0]; |
2695 |
root |
1.181 |
adjustheap (periodics, periodiccnt, active); |
2696 |
root |
1.151 |
} |
2697 |
root |
1.248 |
} |
2698 |
root |
1.228 |
|
2699 |
root |
1.328 |
ev_stop (EV_A_ (W)w); |
2700 |
|
|
|
2701 |
root |
1.248 |
EV_FREQUENT_CHECK; |
2702 |
root |
1.1 |
} |
2703 |
|
|
|
2704 |
root |
1.171 |
void noinline |
2705 |
root |
1.136 |
ev_periodic_again (EV_P_ ev_periodic *w) |
2706 |
root |
1.77 |
{ |
2707 |
root |
1.84 |
/* TODO: use adjustheap and recalculation */ |
2708 |
root |
1.77 |
ev_periodic_stop (EV_A_ w); |
2709 |
|
|
ev_periodic_start (EV_A_ w); |
2710 |
|
|
} |
2711 |
root |
1.93 |
#endif |
2712 |
root |
1.77 |
|
2713 |
root |
1.56 |
#ifndef SA_RESTART |
2714 |
|
|
# define SA_RESTART 0 |
2715 |
|
|
#endif |
2716 |
|
|
|
2717 |
root |
1.171 |
void noinline |
2718 |
root |
1.136 |
ev_signal_start (EV_P_ ev_signal *w) |
2719 |
root |
1.56 |
{ |
2720 |
root |
1.123 |
if (expect_false (ev_is_active (w))) |
2721 |
root |
1.56 |
return; |
2722 |
|
|
|
2723 |
root |
1.306 |
assert (("libev: ev_signal_start called with illegal signal number", w->signum > 0 && w->signum < EV_NSIG)); |
2724 |
|
|
|
2725 |
|
|
#if EV_MULTIPLICITY |
2726 |
root |
1.308 |
assert (("libev: a signal must not be attached to two different loops", |
2727 |
root |
1.306 |
!signals [w->signum - 1].loop || signals [w->signum - 1].loop == loop)); |
2728 |
|
|
|
2729 |
|
|
signals [w->signum - 1].loop = EV_A; |
2730 |
|
|
#endif |
2731 |
root |
1.56 |
|
2732 |
root |
1.303 |
EV_FREQUENT_CHECK; |
2733 |
|
|
|
2734 |
|
|
#if EV_USE_SIGNALFD |
2735 |
|
|
if (sigfd == -2) |
2736 |
|
|
{ |
2737 |
|
|
sigfd = signalfd (-1, &sigfd_set, SFD_NONBLOCK | SFD_CLOEXEC); |
2738 |
|
|
if (sigfd < 0 && errno == EINVAL) |
2739 |
|
|
sigfd = signalfd (-1, &sigfd_set, 0); /* retry without flags */ |
2740 |
|
|
|
2741 |
|
|
if (sigfd >= 0) |
2742 |
|
|
{ |
2743 |
|
|
fd_intern (sigfd); /* doing it twice will not hurt */ |
2744 |
|
|
|
2745 |
|
|
sigemptyset (&sigfd_set); |
2746 |
|
|
|
2747 |
|
|
ev_io_init (&sigfd_w, sigfdcb, sigfd, EV_READ); |
2748 |
|
|
ev_set_priority (&sigfd_w, EV_MAXPRI); |
2749 |
|
|
ev_io_start (EV_A_ &sigfd_w); |
2750 |
|
|
ev_unref (EV_A); /* signalfd watcher should not keep loop alive */ |
2751 |
|
|
} |
2752 |
|
|
} |
2753 |
|
|
|
2754 |
|
|
if (sigfd >= 0) |
2755 |
|
|
{ |
2756 |
|
|
/* TODO: check .head */ |
2757 |
|
|
sigaddset (&sigfd_set, w->signum); |
2758 |
|
|
sigprocmask (SIG_BLOCK, &sigfd_set, 0); |
2759 |
root |
1.207 |
|
2760 |
root |
1.303 |
signalfd (sigfd, &sigfd_set, 0); |
2761 |
|
|
} |
2762 |
root |
1.180 |
#endif |
2763 |
|
|
|
2764 |
root |
1.56 |
ev_start (EV_A_ (W)w, 1); |
2765 |
root |
1.182 |
wlist_add (&signals [w->signum - 1].head, (WL)w); |
2766 |
root |
1.56 |
|
2767 |
root |
1.63 |
if (!((WL)w)->next) |
2768 |
root |
1.304 |
# if EV_USE_SIGNALFD |
2769 |
root |
1.306 |
if (sigfd < 0) /*TODO*/ |
2770 |
root |
1.304 |
# endif |
2771 |
root |
1.306 |
{ |
2772 |
root |
1.322 |
# ifdef _WIN32 |
2773 |
root |
1.317 |
evpipe_init (EV_A); |
2774 |
|
|
|
2775 |
root |
1.306 |
signal (w->signum, ev_sighandler); |
2776 |
|
|
# else |
2777 |
|
|
struct sigaction sa; |
2778 |
|
|
|
2779 |
|
|
evpipe_init (EV_A); |
2780 |
|
|
|
2781 |
|
|
sa.sa_handler = ev_sighandler; |
2782 |
|
|
sigfillset (&sa.sa_mask); |
2783 |
|
|
sa.sa_flags = SA_RESTART; /* if restarting works we save one iteration */ |
2784 |
|
|
sigaction (w->signum, &sa, 0); |
2785 |
|
|
|
2786 |
|
|
sigemptyset (&sa.sa_mask); |
2787 |
|
|
sigaddset (&sa.sa_mask, w->signum); |
2788 |
|
|
sigprocmask (SIG_UNBLOCK, &sa.sa_mask, 0); |
2789 |
root |
1.67 |
#endif |
2790 |
root |
1.306 |
} |
2791 |
root |
1.248 |
|
2792 |
|
|
EV_FREQUENT_CHECK; |
2793 |
root |
1.56 |
} |
2794 |
|
|
|
2795 |
root |
1.171 |
void noinline |
2796 |
root |
1.136 |
ev_signal_stop (EV_P_ ev_signal *w) |
2797 |
root |
1.56 |
{ |
2798 |
root |
1.166 |
clear_pending (EV_A_ (W)w); |
2799 |
root |
1.123 |
if (expect_false (!ev_is_active (w))) |
2800 |
root |
1.56 |
return; |
2801 |
|
|
|
2802 |
root |
1.248 |
EV_FREQUENT_CHECK; |
2803 |
|
|
|
2804 |
root |
1.182 |
wlist_del (&signals [w->signum - 1].head, (WL)w); |
2805 |
root |
1.56 |
ev_stop (EV_A_ (W)w); |
2806 |
|
|
|
2807 |
|
|
if (!signals [w->signum - 1].head) |
2808 |
root |
1.306 |
{ |
2809 |
root |
1.307 |
#if EV_MULTIPLICITY |
2810 |
root |
1.306 |
signals [w->signum - 1].loop = 0; /* unattach from signal */ |
2811 |
root |
1.307 |
#endif |
2812 |
|
|
#if EV_USE_SIGNALFD |
2813 |
root |
1.306 |
if (sigfd >= 0) |
2814 |
|
|
{ |
2815 |
root |
1.321 |
sigset_t ss; |
2816 |
|
|
|
2817 |
|
|
sigemptyset (&ss); |
2818 |
|
|
sigaddset (&ss, w->signum); |
2819 |
root |
1.306 |
sigdelset (&sigfd_set, w->signum); |
2820 |
root |
1.321 |
|
2821 |
root |
1.306 |
signalfd (sigfd, &sigfd_set, 0); |
2822 |
root |
1.321 |
sigprocmask (SIG_UNBLOCK, &ss, 0); |
2823 |
root |
1.306 |
} |
2824 |
|
|
else |
2825 |
root |
1.307 |
#endif |
2826 |
root |
1.306 |
signal (w->signum, SIG_DFL); |
2827 |
|
|
} |
2828 |
root |
1.248 |
|
2829 |
|
|
EV_FREQUENT_CHECK; |
2830 |
root |
1.56 |
} |
2831 |
|
|
|
2832 |
root |
1.28 |
void |
2833 |
root |
1.136 |
ev_child_start (EV_P_ ev_child *w) |
2834 |
root |
1.22 |
{ |
2835 |
root |
1.56 |
#if EV_MULTIPLICITY |
2836 |
root |
1.278 |
assert (("libev: child watchers are only supported in the default loop", loop == ev_default_loop_ptr)); |
2837 |
root |
1.56 |
#endif |
2838 |
root |
1.123 |
if (expect_false (ev_is_active (w))) |
2839 |
root |
1.22 |
return; |
2840 |
|
|
|
2841 |
root |
1.248 |
EV_FREQUENT_CHECK; |
2842 |
|
|
|
2843 |
root |
1.51 |
ev_start (EV_A_ (W)w, 1); |
2844 |
root |
1.182 |
wlist_add (&childs [w->pid & (EV_PID_HASHSIZE - 1)], (WL)w); |
2845 |
root |
1.248 |
|
2846 |
|
|
EV_FREQUENT_CHECK; |
2847 |
root |
1.22 |
} |
2848 |
|
|
|
2849 |
root |
1.28 |
void |
2850 |
root |
1.136 |
ev_child_stop (EV_P_ ev_child *w) |
2851 |
root |
1.22 |
{ |
2852 |
root |
1.166 |
clear_pending (EV_A_ (W)w); |
2853 |
root |
1.123 |
if (expect_false (!ev_is_active (w))) |
2854 |
root |
1.22 |
return; |
2855 |
|
|
|
2856 |
root |
1.248 |
EV_FREQUENT_CHECK; |
2857 |
|
|
|
2858 |
root |
1.182 |
wlist_del (&childs [w->pid & (EV_PID_HASHSIZE - 1)], (WL)w); |
2859 |
root |
1.51 |
ev_stop (EV_A_ (W)w); |
2860 |
root |
1.248 |
|
2861 |
|
|
EV_FREQUENT_CHECK; |
2862 |
root |
1.22 |
} |
2863 |
|
|
|
2864 |
root |
1.140 |
#if EV_STAT_ENABLE |
2865 |
|
|
|
2866 |
|
|
# ifdef _WIN32 |
2867 |
root |
1.146 |
# undef lstat |
2868 |
|
|
# define lstat(a,b) _stati64 (a,b) |
2869 |
root |
1.140 |
# endif |
2870 |
|
|
|
2871 |
root |
1.273 |
#define DEF_STAT_INTERVAL 5.0074891 |
2872 |
|
|
#define NFS_STAT_INTERVAL 30.1074891 /* for filesystems potentially failing inotify */ |
2873 |
|
|
#define MIN_STAT_INTERVAL 0.1074891 |
2874 |
root |
1.143 |
|
2875 |
root |
1.157 |
static void noinline stat_timer_cb (EV_P_ ev_timer *w_, int revents); |
2876 |
root |
1.152 |
|
2877 |
|
|
#if EV_USE_INOTIFY |
2878 |
root |
1.326 |
|
2879 |
|
|
/* the * 2 is to allow for alignment padding, which for some reason is >> 8 */ |
2880 |
|
|
# define EV_INOTIFY_BUFSIZE (sizeof (struct inotify_event) * 2 + NAME_MAX) |
2881 |
root |
1.152 |
|
2882 |
|
|
static void noinline |
2883 |
|
|
infy_add (EV_P_ ev_stat *w) |
2884 |
|
|
{ |
2885 |
|
|
w->wd = inotify_add_watch (fs_fd, w->path, IN_ATTRIB | IN_DELETE_SELF | IN_MOVE_SELF | IN_MODIFY | IN_DONT_FOLLOW | IN_MASK_ADD); |
2886 |
|
|
|
2887 |
root |
1.318 |
if (w->wd >= 0) |
2888 |
root |
1.152 |
{ |
2889 |
root |
1.318 |
struct statfs sfs; |
2890 |
|
|
|
2891 |
|
|
/* now local changes will be tracked by inotify, but remote changes won't */ |
2892 |
|
|
/* unless the filesystem is known to be local, we therefore still poll */ |
2893 |
|
|
/* also do poll on <2.6.25, but with normal frequency */ |
2894 |
|
|
|
2895 |
|
|
if (!fs_2625) |
2896 |
|
|
w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL; |
2897 |
|
|
else if (!statfs (w->path, &sfs) |
2898 |
|
|
&& (sfs.f_type == 0x1373 /* devfs */ |
2899 |
|
|
|| sfs.f_type == 0xEF53 /* ext2/3 */ |
2900 |
|
|
|| sfs.f_type == 0x3153464a /* jfs */ |
2901 |
|
|
|| sfs.f_type == 0x52654973 /* reiser3 */ |
2902 |
|
|
|| sfs.f_type == 0x01021994 /* tempfs */ |
2903 |
|
|
|| sfs.f_type == 0x58465342 /* xfs */)) |
2904 |
|
|
w->timer.repeat = 0.; /* filesystem is local, kernel new enough */ |
2905 |
|
|
else |
2906 |
|
|
w->timer.repeat = w->interval ? w->interval : NFS_STAT_INTERVAL; /* remote, use reduced frequency */ |
2907 |
|
|
} |
2908 |
|
|
else |
2909 |
|
|
{ |
2910 |
|
|
/* can't use inotify, continue to stat */ |
2911 |
root |
1.273 |
w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL; |
2912 |
root |
1.152 |
|
2913 |
root |
1.318 |
/* if path is not there, monitor some parent directory for speedup hints */ |
2914 |
root |
1.271 |
/* note that exceeding the hardcoded path limit is not a correctness issue, */ |
2915 |
root |
1.233 |
/* but an efficiency issue only */ |
2916 |
root |
1.153 |
if ((errno == ENOENT || errno == EACCES) && strlen (w->path) < 4096) |
2917 |
root |
1.152 |
{ |
2918 |
root |
1.153 |
char path [4096]; |
2919 |
root |
1.152 |
strcpy (path, w->path); |
2920 |
|
|
|
2921 |
|
|
do |
2922 |
|
|
{ |
2923 |
|
|
int mask = IN_MASK_ADD | IN_DELETE_SELF | IN_MOVE_SELF |
2924 |
|
|
| (errno == EACCES ? IN_ATTRIB : IN_CREATE | IN_MOVED_TO); |
2925 |
|
|
|
2926 |
|
|
char *pend = strrchr (path, '/'); |
2927 |
|
|
|
2928 |
root |
1.275 |
if (!pend || pend == path) |
2929 |
|
|
break; |
2930 |
root |
1.152 |
|
2931 |
|
|
*pend = 0; |
2932 |
root |
1.153 |
w->wd = inotify_add_watch (fs_fd, path, mask); |
2933 |
root |
1.152 |
} |
2934 |
|
|
while (w->wd < 0 && (errno == ENOENT || errno == EACCES)); |
2935 |
|
|
} |
2936 |
|
|
} |
2937 |
root |
1.275 |
|
2938 |
|
|
if (w->wd >= 0) |
2939 |
root |
1.318 |
wlist_add (&fs_hash [w->wd & (EV_INOTIFY_HASHSIZE - 1)].head, (WL)w); |
2940 |
root |
1.152 |
|
2941 |
root |
1.318 |
/* now re-arm timer, if required */ |
2942 |
|
|
if (ev_is_active (&w->timer)) ev_ref (EV_A); |
2943 |
|
|
ev_timer_again (EV_A_ &w->timer); |
2944 |
|
|
if (ev_is_active (&w->timer)) ev_unref (EV_A); |
2945 |
root |
1.152 |
} |
2946 |
|
|
|
2947 |
|
|
static void noinline |
2948 |
|
|
infy_del (EV_P_ ev_stat *w) |
2949 |
|
|
{ |
2950 |
|
|
int slot; |
2951 |
|
|
int wd = w->wd; |
2952 |
|
|
|
2953 |
|
|
if (wd < 0) |
2954 |
|
|
return; |
2955 |
|
|
|
2956 |
|
|
w->wd = -2; |
2957 |
|
|
slot = wd & (EV_INOTIFY_HASHSIZE - 1); |
2958 |
|
|
wlist_del (&fs_hash [slot].head, (WL)w); |
2959 |
|
|
|
2960 |
|
|
/* remove this watcher, if others are watching it, they will rearm */ |
2961 |
|
|
inotify_rm_watch (fs_fd, wd); |
2962 |
|
|
} |
2963 |
|
|
|
2964 |
|
|
static void noinline |
2965 |
|
|
infy_wd (EV_P_ int slot, int wd, struct inotify_event *ev) |
2966 |
|
|
{ |
2967 |
|
|
if (slot < 0) |
2968 |
root |
1.264 |
/* overflow, need to check for all hash slots */ |
2969 |
root |
1.152 |
for (slot = 0; slot < EV_INOTIFY_HASHSIZE; ++slot) |
2970 |
|
|
infy_wd (EV_A_ slot, wd, ev); |
2971 |
|
|
else |
2972 |
|
|
{ |
2973 |
|
|
WL w_; |
2974 |
|
|
|
2975 |
|
|
for (w_ = fs_hash [slot & (EV_INOTIFY_HASHSIZE - 1)].head; w_; ) |
2976 |
|
|
{ |
2977 |
|
|
ev_stat *w = (ev_stat *)w_; |
2978 |
|
|
w_ = w_->next; /* lets us remove this watcher and all before it */ |
2979 |
|
|
|
2980 |
|
|
if (w->wd == wd || wd == -1) |
2981 |
|
|
{ |
2982 |
|
|
if (ev->mask & (IN_IGNORED | IN_UNMOUNT | IN_DELETE_SELF)) |
2983 |
|
|
{ |
2984 |
root |
1.275 |
wlist_del (&fs_hash [slot & (EV_INOTIFY_HASHSIZE - 1)].head, (WL)w); |
2985 |
root |
1.152 |
w->wd = -1; |
2986 |
|
|
infy_add (EV_A_ w); /* re-add, no matter what */ |
2987 |
|
|
} |
2988 |
|
|
|
2989 |
root |
1.153 |
stat_timer_cb (EV_A_ &w->timer, 0); |
2990 |
root |
1.152 |
} |
2991 |
|
|
} |
2992 |
|
|
} |
2993 |
|
|
} |
2994 |
|
|
|
2995 |
|
|
static void |
2996 |
|
|
infy_cb (EV_P_ ev_io *w, int revents) |
2997 |
|
|
{ |
2998 |
|
|
char buf [EV_INOTIFY_BUFSIZE]; |
2999 |
|
|
int ofs; |
3000 |
|
|
int len = read (fs_fd, buf, sizeof (buf)); |
3001 |
|
|
|
3002 |
root |
1.326 |
for (ofs = 0; ofs < len; ) |
3003 |
|
|
{ |
3004 |
|
|
struct inotify_event *ev = (struct inotify_event *)(buf + ofs); |
3005 |
|
|
infy_wd (EV_A_ ev->wd, ev->wd, ev); |
3006 |
|
|
ofs += sizeof (struct inotify_event) + ev->len; |
3007 |
|
|
} |
3008 |
root |
1.152 |
} |
3009 |
|
|
|
3010 |
root |
1.330 |
inline_size unsigned int |
3011 |
|
|
ev_linux_version (void) |
3012 |
root |
1.152 |
{ |
3013 |
root |
1.273 |
struct utsname buf; |
3014 |
root |
1.330 |
unsigned int v; |
3015 |
|
|
int i; |
3016 |
|
|
char *p = buf.release; |
3017 |
root |
1.273 |
|
3018 |
|
|
if (uname (&buf)) |
3019 |
root |
1.330 |
return 0; |
3020 |
|
|
|
3021 |
|
|
for (i = 3+1; --i; ) |
3022 |
|
|
{ |
3023 |
|
|
unsigned int c = 0; |
3024 |
|
|
|
3025 |
|
|
for (;;) |
3026 |
|
|
{ |
3027 |
|
|
if (*p >= '0' && *p <= '9') |
3028 |
|
|
c = c * 10 + *p++ - '0'; |
3029 |
|
|
else |
3030 |
|
|
{ |
3031 |
|
|
p += *p == '.'; |
3032 |
|
|
break; |
3033 |
|
|
} |
3034 |
|
|
} |
3035 |
|
|
|
3036 |
|
|
v = (v << 8) | c; |
3037 |
|
|
} |
3038 |
root |
1.273 |
|
3039 |
root |
1.330 |
return v; |
3040 |
|
|
} |
3041 |
root |
1.273 |
|
3042 |
root |
1.330 |
inline_size void |
3043 |
|
|
ev_check_2625 (EV_P) |
3044 |
|
|
{ |
3045 |
|
|
/* kernels < 2.6.25 are borked |
3046 |
|
|
* http://www.ussg.indiana.edu/hypermail/linux/kernel/0711.3/1208.html |
3047 |
|
|
*/ |
3048 |
|
|
if (ev_linux_version () < 0x020619) |
3049 |
root |
1.273 |
return; |
3050 |
root |
1.264 |
|
3051 |
root |
1.273 |
fs_2625 = 1; |
3052 |
|
|
} |
3053 |
root |
1.264 |
|
3054 |
root |
1.315 |
inline_size int |
3055 |
|
|
infy_newfd (void) |
3056 |
|
|
{ |
3057 |
|
|
#if defined (IN_CLOEXEC) && defined (IN_NONBLOCK) |
3058 |
|
|
int fd = inotify_init1 (IN_CLOEXEC | IN_NONBLOCK); |
3059 |
|
|
if (fd >= 0) |
3060 |
|
|
return fd; |
3061 |
|
|
#endif |
3062 |
|
|
return inotify_init (); |
3063 |
|
|
} |
3064 |
|
|
|
3065 |
root |
1.284 |
inline_size void |
3066 |
root |
1.273 |
infy_init (EV_P) |
3067 |
|
|
{ |
3068 |
|
|
if (fs_fd != -2) |
3069 |
|
|
return; |
3070 |
root |
1.264 |
|
3071 |
root |
1.273 |
fs_fd = -1; |
3072 |
root |
1.264 |
|
3073 |
root |
1.330 |
ev_check_2625 (EV_A); |
3074 |
root |
1.264 |
|
3075 |
root |
1.315 |
fs_fd = infy_newfd (); |
3076 |
root |
1.152 |
|
3077 |
|
|
if (fs_fd >= 0) |
3078 |
|
|
{ |
3079 |
root |
1.315 |
fd_intern (fs_fd); |
3080 |
root |
1.152 |
ev_io_init (&fs_w, infy_cb, fs_fd, EV_READ); |
3081 |
|
|
ev_set_priority (&fs_w, EV_MAXPRI); |
3082 |
|
|
ev_io_start (EV_A_ &fs_w); |
3083 |
root |
1.317 |
ev_unref (EV_A); |
3084 |
root |
1.152 |
} |
3085 |
|
|
} |
3086 |
|
|
|
3087 |
root |
1.284 |
inline_size void |
3088 |
root |
1.154 |
infy_fork (EV_P) |
3089 |
|
|
{ |
3090 |
|
|
int slot; |
3091 |
|
|
|
3092 |
|
|
if (fs_fd < 0) |
3093 |
|
|
return; |
3094 |
|
|
|
3095 |
root |
1.317 |
ev_ref (EV_A); |
3096 |
root |
1.315 |
ev_io_stop (EV_A_ &fs_w); |
3097 |
root |
1.154 |
close (fs_fd); |
3098 |
root |
1.315 |
fs_fd = infy_newfd (); |
3099 |
|
|
|
3100 |
|
|
if (fs_fd >= 0) |
3101 |
|
|
{ |
3102 |
|
|
fd_intern (fs_fd); |
3103 |
|
|
ev_io_set (&fs_w, fs_fd, EV_READ); |
3104 |
|
|
ev_io_start (EV_A_ &fs_w); |
3105 |
root |
1.317 |
ev_unref (EV_A); |
3106 |
root |
1.315 |
} |
3107 |
root |
1.154 |
|
3108 |
|
|
for (slot = 0; slot < EV_INOTIFY_HASHSIZE; ++slot) |
3109 |
|
|
{ |
3110 |
|
|
WL w_ = fs_hash [slot].head; |
3111 |
|
|
fs_hash [slot].head = 0; |
3112 |
|
|
|
3113 |
|
|
while (w_) |
3114 |
|
|
{ |
3115 |
|
|
ev_stat *w = (ev_stat *)w_; |
3116 |
|
|
w_ = w_->next; /* lets us add this watcher */ |
3117 |
|
|
|
3118 |
|
|
w->wd = -1; |
3119 |
|
|
|
3120 |
|
|
if (fs_fd >= 0) |
3121 |
|
|
infy_add (EV_A_ w); /* re-add, no matter what */ |
3122 |
|
|
else |
3123 |
root |
1.318 |
{ |
3124 |
|
|
w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL; |
3125 |
|
|
if (ev_is_active (&w->timer)) ev_ref (EV_A); |
3126 |
|
|
ev_timer_again (EV_A_ &w->timer); |
3127 |
|
|
if (ev_is_active (&w->timer)) ev_unref (EV_A); |
3128 |
|
|
} |
3129 |
root |
1.154 |
} |
3130 |
|
|
} |
3131 |
|
|
} |
3132 |
|
|
|
3133 |
root |
1.152 |
#endif |
3134 |
|
|
|
3135 |
root |
1.255 |
#ifdef _WIN32 |
3136 |
|
|
# define EV_LSTAT(p,b) _stati64 (p, b) |
3137 |
|
|
#else |
3138 |
|
|
# define EV_LSTAT(p,b) lstat (p, b) |
3139 |
|
|
#endif |
3140 |
|
|
|
3141 |
root |
1.140 |
void |
3142 |
|
|
ev_stat_stat (EV_P_ ev_stat *w) |
3143 |
|
|
{ |
3144 |
|
|
if (lstat (w->path, &w->attr) < 0) |
3145 |
|
|
w->attr.st_nlink = 0; |
3146 |
|
|
else if (!w->attr.st_nlink) |
3147 |
|
|
w->attr.st_nlink = 1; |
3148 |
|
|
} |
3149 |
|
|
|
3150 |
root |
1.157 |
static void noinline |
3151 |
root |
1.140 |
stat_timer_cb (EV_P_ ev_timer *w_, int revents) |
3152 |
|
|
{ |
3153 |
|
|
ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer)); |
3154 |
|
|
|
3155 |
root |
1.320 |
ev_statdata prev = w->attr; |
3156 |
root |
1.140 |
ev_stat_stat (EV_A_ w); |
3157 |
|
|
|
3158 |
root |
1.156 |
/* memcmp doesn't work on netbsd, they.... do stuff to their struct stat */ |
3159 |
|
|
if ( |
3160 |
root |
1.320 |
prev.st_dev != w->attr.st_dev |
3161 |
|
|
|| prev.st_ino != w->attr.st_ino |
3162 |
|
|
|| prev.st_mode != w->attr.st_mode |
3163 |
|
|
|| prev.st_nlink != w->attr.st_nlink |
3164 |
|
|
|| prev.st_uid != w->attr.st_uid |
3165 |
|
|
|| prev.st_gid != w->attr.st_gid |
3166 |
|
|
|| prev.st_rdev != w->attr.st_rdev |
3167 |
|
|
|| prev.st_size != w->attr.st_size |
3168 |
|
|
|| prev.st_atime != w->attr.st_atime |
3169 |
|
|
|| prev.st_mtime != w->attr.st_mtime |
3170 |
|
|
|| prev.st_ctime != w->attr.st_ctime |
3171 |
root |
1.156 |
) { |
3172 |
root |
1.320 |
/* we only update w->prev on actual differences */ |
3173 |
|
|
/* in case we test more often than invoke the callback, */ |
3174 |
|
|
/* to ensure that prev is always different to attr */ |
3175 |
|
|
w->prev = prev; |
3176 |
|
|
|
3177 |
root |
1.152 |
#if EV_USE_INOTIFY |
3178 |
root |
1.264 |
if (fs_fd >= 0) |
3179 |
|
|
{ |
3180 |
|
|
infy_del (EV_A_ w); |
3181 |
|
|
infy_add (EV_A_ w); |
3182 |
|
|
ev_stat_stat (EV_A_ w); /* avoid race... */ |
3183 |
|
|
} |
3184 |
root |
1.152 |
#endif |
3185 |
|
|
|
3186 |
|
|
ev_feed_event (EV_A_ w, EV_STAT); |
3187 |
|
|
} |
3188 |
root |
1.140 |
} |
3189 |
|
|
|
3190 |
|
|
void |
3191 |
|
|
ev_stat_start (EV_P_ ev_stat *w) |
3192 |
|
|
{ |
3193 |
|
|
if (expect_false (ev_is_active (w))) |
3194 |
|
|
return; |
3195 |
|
|
|
3196 |
|
|
ev_stat_stat (EV_A_ w); |
3197 |
|
|
|
3198 |
root |
1.273 |
if (w->interval < MIN_STAT_INTERVAL && w->interval) |
3199 |
|
|
w->interval = MIN_STAT_INTERVAL; |
3200 |
root |
1.143 |
|
3201 |
root |
1.273 |
ev_timer_init (&w->timer, stat_timer_cb, 0., w->interval ? w->interval : DEF_STAT_INTERVAL); |
3202 |
root |
1.140 |
ev_set_priority (&w->timer, ev_priority (w)); |
3203 |
root |
1.152 |
|
3204 |
|
|
#if EV_USE_INOTIFY |
3205 |
|
|
infy_init (EV_A); |
3206 |
|
|
|
3207 |
|
|
if (fs_fd >= 0) |
3208 |
|
|
infy_add (EV_A_ w); |
3209 |
|
|
else |
3210 |
|
|
#endif |
3211 |
root |
1.318 |
{ |
3212 |
|
|
ev_timer_again (EV_A_ &w->timer); |
3213 |
|
|
ev_unref (EV_A); |
3214 |
|
|
} |
3215 |
root |
1.140 |
|
3216 |
|
|
ev_start (EV_A_ (W)w, 1); |
3217 |
root |
1.248 |
|
3218 |
|
|
EV_FREQUENT_CHECK; |
3219 |
root |
1.140 |
} |
3220 |
|
|
|
3221 |
|
|
void |
3222 |
|
|
ev_stat_stop (EV_P_ ev_stat *w) |
3223 |
|
|
{ |
3224 |
root |
1.166 |
clear_pending (EV_A_ (W)w); |
3225 |
root |
1.140 |
if (expect_false (!ev_is_active (w))) |
3226 |
|
|
return; |
3227 |
|
|
|
3228 |
root |
1.248 |
EV_FREQUENT_CHECK; |
3229 |
|
|
|
3230 |
root |
1.152 |
#if EV_USE_INOTIFY |
3231 |
|
|
infy_del (EV_A_ w); |
3232 |
|
|
#endif |
3233 |
root |
1.318 |
|
3234 |
|
|
if (ev_is_active (&w->timer)) |
3235 |
|
|
{ |
3236 |
|
|
ev_ref (EV_A); |
3237 |
|
|
ev_timer_stop (EV_A_ &w->timer); |
3238 |
|
|
} |
3239 |
root |
1.140 |
|
3240 |
root |
1.134 |
ev_stop (EV_A_ (W)w); |
3241 |
root |
1.248 |
|
3242 |
|
|
EV_FREQUENT_CHECK; |
3243 |
root |
1.134 |
} |
3244 |
|
|
#endif |
3245 |
|
|
|
3246 |
root |
1.164 |
#if EV_IDLE_ENABLE |
3247 |
root |
1.144 |
void |
3248 |
|
|
ev_idle_start (EV_P_ ev_idle *w) |
3249 |
|
|
{ |
3250 |
|
|
if (expect_false (ev_is_active (w))) |
3251 |
|
|
return; |
3252 |
|
|
|
3253 |
root |
1.164 |
pri_adjust (EV_A_ (W)w); |
3254 |
|
|
|
3255 |
root |
1.248 |
EV_FREQUENT_CHECK; |
3256 |
|
|
|
3257 |
root |
1.164 |
{ |
3258 |
|
|
int active = ++idlecnt [ABSPRI (w)]; |
3259 |
|
|
|
3260 |
|
|
++idleall; |
3261 |
|
|
ev_start (EV_A_ (W)w, active); |
3262 |
|
|
|
3263 |
|
|
array_needsize (ev_idle *, idles [ABSPRI (w)], idlemax [ABSPRI (w)], active, EMPTY2); |
3264 |
|
|
idles [ABSPRI (w)][active - 1] = w; |
3265 |
|
|
} |
3266 |
root |
1.248 |
|
3267 |
|
|
EV_FREQUENT_CHECK; |
3268 |
root |
1.144 |
} |
3269 |
|
|
|
3270 |
|
|
void |
3271 |
|
|
ev_idle_stop (EV_P_ ev_idle *w) |
3272 |
|
|
{ |
3273 |
root |
1.166 |
clear_pending (EV_A_ (W)w); |
3274 |
root |
1.144 |
if (expect_false (!ev_is_active (w))) |
3275 |
|
|
return; |
3276 |
|
|
|
3277 |
root |
1.248 |
EV_FREQUENT_CHECK; |
3278 |
|
|
|
3279 |
root |
1.144 |
{ |
3280 |
root |
1.230 |
int active = ev_active (w); |
3281 |
root |
1.164 |
|
3282 |
|
|
idles [ABSPRI (w)][active - 1] = idles [ABSPRI (w)][--idlecnt [ABSPRI (w)]]; |
3283 |
root |
1.230 |
ev_active (idles [ABSPRI (w)][active - 1]) = active; |
3284 |
root |
1.164 |
|
3285 |
|
|
ev_stop (EV_A_ (W)w); |
3286 |
|
|
--idleall; |
3287 |
root |
1.144 |
} |
3288 |
root |
1.248 |
|
3289 |
|
|
EV_FREQUENT_CHECK; |
3290 |
root |
1.144 |
} |
3291 |
root |
1.164 |
#endif |
3292 |
root |
1.144 |
|
3293 |
|
|
void |
3294 |
|
|
ev_prepare_start (EV_P_ ev_prepare *w) |
3295 |
|
|
{ |
3296 |
|
|
if (expect_false (ev_is_active (w))) |
3297 |
|
|
return; |
3298 |
|
|
|
|