ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev.c
Revision: 1.385
Committed: Wed Jul 20 01:04:03 2011 UTC (12 years, 9 months ago) by root
Content type: text/plain
Branch: MAIN
Changes since 1.384: +1 -5 lines
Log Message:
temporary fence hack

File Contents

# User Rev Content
1 root 1.17 /*
2 root 1.36 * libev event processing core, watcher management
3     *
4 root 1.366 * Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann <libev@schmorp.de>
5 root 1.17 * All rights reserved.
6     *
7 root 1.199 * Redistribution and use in source and binary forms, with or without modifica-
8     * tion, are permitted provided that the following conditions are met:
9 root 1.372 *
10 root 1.199 * 1. Redistributions of source code must retain the above copyright notice,
11     * this list of conditions and the following disclaimer.
12 root 1.372 *
13 root 1.199 * 2. Redistributions in binary form must reproduce the above copyright
14     * notice, this list of conditions and the following disclaimer in the
15     * documentation and/or other materials provided with the distribution.
16 root 1.372 *
17 root 1.199 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
18     * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
19     * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
20     * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
21     * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22     * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23     * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24     * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
25     * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
26     * OF THE POSSIBILITY OF SUCH DAMAGE.
27 root 1.17 *
28 root 1.199 * Alternatively, the contents of this file may be used under the terms of
29     * the GNU General Public License ("GPL") version 2 or any later version,
30     * in which case the provisions of the GPL are applicable instead of
31     * the above. If you wish to allow the use of your version of this file
32     * only under the terms of the GPL and not to allow others to use your
33     * version of this file under the BSD license, indicate your decision
34     * by deleting the provisions above and replace them with the notice
35     * and other provisions required by the GPL. If you do not delete the
36     * provisions above, a recipient may use your version of this file under
37     * either the BSD or the GPL.
38 root 1.17 */
39 root 1.87
40 root 1.220 /* this big block deduces configuration from config.h */
41 root 1.59 #ifndef EV_STANDALONE
42 root 1.133 # ifdef EV_CONFIG_H
43     # include EV_CONFIG_H
44     # else
45     # include "config.h"
46     # endif
47 root 1.60
48 root 1.373 #if HAVE_FLOOR
49     # ifndef EV_USE_FLOOR
50     # define EV_USE_FLOOR 1
51     # endif
52     #endif
53    
54 root 1.274 # if HAVE_CLOCK_SYSCALL
55     # ifndef EV_USE_CLOCK_SYSCALL
56     # define EV_USE_CLOCK_SYSCALL 1
57     # ifndef EV_USE_REALTIME
58     # define EV_USE_REALTIME 0
59     # endif
60     # ifndef EV_USE_MONOTONIC
61     # define EV_USE_MONOTONIC 1
62     # endif
63     # endif
64 root 1.290 # elif !defined(EV_USE_CLOCK_SYSCALL)
65     # define EV_USE_CLOCK_SYSCALL 0
66 root 1.274 # endif
67    
68 root 1.60 # if HAVE_CLOCK_GETTIME
69 root 1.97 # ifndef EV_USE_MONOTONIC
70     # define EV_USE_MONOTONIC 1
71     # endif
72     # ifndef EV_USE_REALTIME
73 root 1.279 # define EV_USE_REALTIME 0
74 root 1.97 # endif
75 root 1.126 # else
76     # ifndef EV_USE_MONOTONIC
77     # define EV_USE_MONOTONIC 0
78     # endif
79     # ifndef EV_USE_REALTIME
80     # define EV_USE_REALTIME 0
81     # endif
82 root 1.60 # endif
83    
84 root 1.343 # if HAVE_NANOSLEEP
85     # ifndef EV_USE_NANOSLEEP
86     # define EV_USE_NANOSLEEP EV_FEATURE_OS
87     # endif
88     # else
89     # undef EV_USE_NANOSLEEP
90 root 1.193 # define EV_USE_NANOSLEEP 0
91     # endif
92    
93 root 1.343 # if HAVE_SELECT && HAVE_SYS_SELECT_H
94     # ifndef EV_USE_SELECT
95 root 1.339 # define EV_USE_SELECT EV_FEATURE_BACKENDS
96 root 1.127 # endif
97 root 1.343 # else
98     # undef EV_USE_SELECT
99     # define EV_USE_SELECT 0
100 root 1.60 # endif
101    
102 root 1.343 # if HAVE_POLL && HAVE_POLL_H
103     # ifndef EV_USE_POLL
104 root 1.339 # define EV_USE_POLL EV_FEATURE_BACKENDS
105 root 1.127 # endif
106 root 1.343 # else
107     # undef EV_USE_POLL
108     # define EV_USE_POLL 0
109 root 1.60 # endif
110 root 1.127
111 root 1.343 # if HAVE_EPOLL_CTL && HAVE_SYS_EPOLL_H
112     # ifndef EV_USE_EPOLL
113 root 1.339 # define EV_USE_EPOLL EV_FEATURE_BACKENDS
114 root 1.127 # endif
115 root 1.343 # else
116     # undef EV_USE_EPOLL
117     # define EV_USE_EPOLL 0
118 root 1.60 # endif
119 root 1.127
120 root 1.343 # if HAVE_KQUEUE && HAVE_SYS_EVENT_H
121     # ifndef EV_USE_KQUEUE
122 root 1.339 # define EV_USE_KQUEUE EV_FEATURE_BACKENDS
123 root 1.127 # endif
124 root 1.343 # else
125     # undef EV_USE_KQUEUE
126     # define EV_USE_KQUEUE 0
127 root 1.60 # endif
128 root 1.127
129 root 1.343 # if HAVE_PORT_H && HAVE_PORT_CREATE
130     # ifndef EV_USE_PORT
131 root 1.339 # define EV_USE_PORT EV_FEATURE_BACKENDS
132 root 1.127 # endif
133 root 1.343 # else
134     # undef EV_USE_PORT
135     # define EV_USE_PORT 0
136 root 1.118 # endif
137    
138 root 1.343 # if HAVE_INOTIFY_INIT && HAVE_SYS_INOTIFY_H
139     # ifndef EV_USE_INOTIFY
140 root 1.339 # define EV_USE_INOTIFY EV_FEATURE_OS
141 root 1.152 # endif
142 root 1.343 # else
143     # undef EV_USE_INOTIFY
144     # define EV_USE_INOTIFY 0
145 root 1.152 # endif
146    
147 root 1.343 # if HAVE_SIGNALFD && HAVE_SYS_SIGNALFD_H
148     # ifndef EV_USE_SIGNALFD
149 root 1.339 # define EV_USE_SIGNALFD EV_FEATURE_OS
150 root 1.303 # endif
151 root 1.343 # else
152     # undef EV_USE_SIGNALFD
153     # define EV_USE_SIGNALFD 0
154 root 1.303 # endif
155    
156 root 1.343 # if HAVE_EVENTFD
157     # ifndef EV_USE_EVENTFD
158 root 1.339 # define EV_USE_EVENTFD EV_FEATURE_OS
159 root 1.220 # endif
160 root 1.343 # else
161     # undef EV_USE_EVENTFD
162     # define EV_USE_EVENTFD 0
163 root 1.220 # endif
164 root 1.250
165 root 1.29 #endif
166 root 1.17
167 root 1.1 #include <stdlib.h>
168 root 1.319 #include <string.h>
169 root 1.7 #include <fcntl.h>
170 root 1.16 #include <stddef.h>
171 root 1.1
172     #include <stdio.h>
173    
174 root 1.4 #include <assert.h>
175 root 1.1 #include <errno.h>
176 root 1.22 #include <sys/types.h>
177 root 1.71 #include <time.h>
178 root 1.326 #include <limits.h>
179 root 1.71
180 root 1.72 #include <signal.h>
181 root 1.71
182 root 1.152 #ifdef EV_H
183     # include EV_H
184     #else
185     # include "ev.h"
186     #endif
187    
188 root 1.354 EV_CPP(extern "C" {)
189    
190 root 1.103 #ifndef _WIN32
191 root 1.71 # include <sys/time.h>
192 root 1.45 # include <sys/wait.h>
193 root 1.140 # include <unistd.h>
194 root 1.103 #else
195 root 1.256 # include <io.h>
196 root 1.103 # define WIN32_LEAN_AND_MEAN
197     # include <windows.h>
198     # ifndef EV_SELECT_IS_WINSOCKET
199     # define EV_SELECT_IS_WINSOCKET 1
200     # endif
201 root 1.331 # undef EV_AVOID_STDIO
202 root 1.45 #endif
203 root 1.103
204 root 1.344 /* OS X, in its infinite idiocy, actually HARDCODES
205     * a limit of 1024 into their select. Where people have brains,
206     * OS X engineers apparently have a vacuum. Or maybe they were
207     * ordered to have a vacuum, or they do anything for money.
208     * This might help. Or not.
209     */
210     #define _DARWIN_UNLIMITED_SELECT 1
211    
212 root 1.220 /* this block tries to deduce configuration from header-defined symbols and defaults */
213 root 1.40
214 root 1.305 /* try to deduce the maximum number of signals on this platform */
215     #if defined (EV_NSIG)
216     /* use what's provided */
217     #elif defined (NSIG)
218     # define EV_NSIG (NSIG)
219     #elif defined(_NSIG)
220     # define EV_NSIG (_NSIG)
221     #elif defined (SIGMAX)
222     # define EV_NSIG (SIGMAX+1)
223     #elif defined (SIG_MAX)
224     # define EV_NSIG (SIG_MAX+1)
225     #elif defined (_SIG_MAX)
226     # define EV_NSIG (_SIG_MAX+1)
227     #elif defined (MAXSIG)
228     # define EV_NSIG (MAXSIG+1)
229     #elif defined (MAX_SIG)
230     # define EV_NSIG (MAX_SIG+1)
231     #elif defined (SIGARRAYSIZE)
232 root 1.336 # define EV_NSIG (SIGARRAYSIZE) /* Assume ary[SIGARRAYSIZE] */
233 root 1.305 #elif defined (_sys_nsig)
234     # define EV_NSIG (_sys_nsig) /* Solaris 2.5 */
235     #else
236     # error "unable to find value for NSIG, please report"
237 root 1.336 /* to make it compile regardless, just remove the above line, */
238     /* but consider reporting it, too! :) */
239 root 1.306 # define EV_NSIG 65
240 root 1.305 #endif
241    
242 root 1.373 #ifndef EV_USE_FLOOR
243     # define EV_USE_FLOOR 0
244     #endif
245    
246 root 1.274 #ifndef EV_USE_CLOCK_SYSCALL
247     # if __linux && __GLIBC__ >= 2
248 root 1.338 # define EV_USE_CLOCK_SYSCALL EV_FEATURE_OS
249 root 1.274 # else
250     # define EV_USE_CLOCK_SYSCALL 0
251     # endif
252     #endif
253    
254 root 1.29 #ifndef EV_USE_MONOTONIC
255 root 1.253 # if defined (_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0
256 root 1.338 # define EV_USE_MONOTONIC EV_FEATURE_OS
257 root 1.253 # else
258     # define EV_USE_MONOTONIC 0
259     # endif
260 root 1.37 #endif
261    
262 root 1.118 #ifndef EV_USE_REALTIME
263 root 1.279 # define EV_USE_REALTIME !EV_USE_CLOCK_SYSCALL
264 root 1.118 #endif
265    
266 root 1.193 #ifndef EV_USE_NANOSLEEP
267 root 1.253 # if _POSIX_C_SOURCE >= 199309L
268 root 1.338 # define EV_USE_NANOSLEEP EV_FEATURE_OS
269 root 1.253 # else
270     # define EV_USE_NANOSLEEP 0
271     # endif
272 root 1.193 #endif
273    
274 root 1.29 #ifndef EV_USE_SELECT
275 root 1.338 # define EV_USE_SELECT EV_FEATURE_BACKENDS
276 root 1.10 #endif
277    
278 root 1.59 #ifndef EV_USE_POLL
279 root 1.104 # ifdef _WIN32
280     # define EV_USE_POLL 0
281     # else
282 root 1.338 # define EV_USE_POLL EV_FEATURE_BACKENDS
283 root 1.104 # endif
284 root 1.41 #endif
285    
286 root 1.29 #ifndef EV_USE_EPOLL
287 root 1.220 # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))
288 root 1.338 # define EV_USE_EPOLL EV_FEATURE_BACKENDS
289 root 1.220 # else
290     # define EV_USE_EPOLL 0
291     # endif
292 root 1.10 #endif
293    
294 root 1.44 #ifndef EV_USE_KQUEUE
295     # define EV_USE_KQUEUE 0
296     #endif
297    
298 root 1.118 #ifndef EV_USE_PORT
299     # define EV_USE_PORT 0
300 root 1.40 #endif
301    
302 root 1.152 #ifndef EV_USE_INOTIFY
303 root 1.220 # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))
304 root 1.338 # define EV_USE_INOTIFY EV_FEATURE_OS
305 root 1.220 # else
306     # define EV_USE_INOTIFY 0
307     # endif
308 root 1.152 #endif
309    
310 root 1.149 #ifndef EV_PID_HASHSIZE
311 root 1.338 # define EV_PID_HASHSIZE EV_FEATURE_DATA ? 16 : 1
312 root 1.149 #endif
313    
314 root 1.152 #ifndef EV_INOTIFY_HASHSIZE
315 root 1.338 # define EV_INOTIFY_HASHSIZE EV_FEATURE_DATA ? 16 : 1
316 root 1.152 #endif
317    
318 root 1.220 #ifndef EV_USE_EVENTFD
319     # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7))
320 root 1.338 # define EV_USE_EVENTFD EV_FEATURE_OS
321 root 1.220 # else
322     # define EV_USE_EVENTFD 0
323     # endif
324     #endif
325    
326 root 1.303 #ifndef EV_USE_SIGNALFD
327 root 1.314 # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7))
328 root 1.338 # define EV_USE_SIGNALFD EV_FEATURE_OS
329 root 1.303 # else
330     # define EV_USE_SIGNALFD 0
331     # endif
332     #endif
333    
334 root 1.249 #if 0 /* debugging */
335 root 1.250 # define EV_VERIFY 3
336 root 1.249 # define EV_USE_4HEAP 1
337     # define EV_HEAP_CACHE_AT 1
338     #endif
339    
340 root 1.250 #ifndef EV_VERIFY
341 root 1.338 # define EV_VERIFY (EV_FEATURE_API ? 1 : 0)
342 root 1.250 #endif
343    
344 root 1.243 #ifndef EV_USE_4HEAP
345 root 1.338 # define EV_USE_4HEAP EV_FEATURE_DATA
346 root 1.243 #endif
347    
348     #ifndef EV_HEAP_CACHE_AT
349 root 1.338 # define EV_HEAP_CACHE_AT EV_FEATURE_DATA
350 root 1.243 #endif
351    
352 root 1.291 /* on linux, we can use a (slow) syscall to avoid a dependency on pthread, */
353     /* which makes programs even slower. might work on other unices, too. */
354     #if EV_USE_CLOCK_SYSCALL
355     # include <syscall.h>
356     # ifdef SYS_clock_gettime
357     # define clock_gettime(id, ts) syscall (SYS_clock_gettime, (id), (ts))
358     # undef EV_USE_MONOTONIC
359     # define EV_USE_MONOTONIC 1
360     # else
361     # undef EV_USE_CLOCK_SYSCALL
362     # define EV_USE_CLOCK_SYSCALL 0
363     # endif
364     #endif
365    
366 root 1.220 /* this block fixes any misconfiguration where we know we run into trouble otherwise */
367 root 1.40
368 root 1.325 #ifdef _AIX
369     /* AIX has a completely broken poll.h header */
370     # undef EV_USE_POLL
371     # define EV_USE_POLL 0
372     #endif
373    
374 root 1.40 #ifndef CLOCK_MONOTONIC
375     # undef EV_USE_MONOTONIC
376     # define EV_USE_MONOTONIC 0
377     #endif
378    
379 root 1.31 #ifndef CLOCK_REALTIME
380 root 1.40 # undef EV_USE_REALTIME
381 root 1.31 # define EV_USE_REALTIME 0
382     #endif
383 root 1.40
384 root 1.152 #if !EV_STAT_ENABLE
385 root 1.185 # undef EV_USE_INOTIFY
386 root 1.152 # define EV_USE_INOTIFY 0
387     #endif
388    
389 root 1.193 #if !EV_USE_NANOSLEEP
390 root 1.370 /* hp-ux has it in sys/time.h, which we unconditionally include above */
391     # if !defined(_WIN32) && !defined(__hpux)
392 root 1.193 # include <sys/select.h>
393     # endif
394     #endif
395    
396 root 1.152 #if EV_USE_INOTIFY
397 root 1.273 # include <sys/statfs.h>
398 root 1.152 # include <sys/inotify.h>
399 root 1.263 /* some very old inotify.h headers don't have IN_DONT_FOLLOW */
400     # ifndef IN_DONT_FOLLOW
401     # undef EV_USE_INOTIFY
402     # define EV_USE_INOTIFY 0
403     # endif
404 root 1.152 #endif
405    
406 root 1.185 #if EV_SELECT_IS_WINSOCKET
407     # include <winsock.h>
408     #endif
409    
410 root 1.220 #if EV_USE_EVENTFD
411     /* our minimum requirement is glibc 2.7 which has the stub, but not the header */
412 root 1.221 # include <stdint.h>
413 root 1.303 # ifndef EFD_NONBLOCK
414     # define EFD_NONBLOCK O_NONBLOCK
415     # endif
416     # ifndef EFD_CLOEXEC
417 root 1.311 # ifdef O_CLOEXEC
418     # define EFD_CLOEXEC O_CLOEXEC
419     # else
420     # define EFD_CLOEXEC 02000000
421     # endif
422 root 1.303 # endif
423 root 1.354 EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags);
424 root 1.220 #endif
425    
426 root 1.303 #if EV_USE_SIGNALFD
427 root 1.314 /* our minimum requirement is glibc 2.7 which has the stub, but not the header */
428     # include <stdint.h>
429     # ifndef SFD_NONBLOCK
430     # define SFD_NONBLOCK O_NONBLOCK
431     # endif
432     # ifndef SFD_CLOEXEC
433     # ifdef O_CLOEXEC
434     # define SFD_CLOEXEC O_CLOEXEC
435     # else
436     # define SFD_CLOEXEC 02000000
437     # endif
438     # endif
439 root 1.354 EV_CPP (extern "C") int signalfd (int fd, const sigset_t *mask, int flags);
440 root 1.314
441     struct signalfd_siginfo
442     {
443     uint32_t ssi_signo;
444     char pad[128 - sizeof (uint32_t)];
445     };
446 root 1.303 #endif
447    
448 root 1.40 /**/
449 root 1.1
450 root 1.250 #if EV_VERIFY >= 3
451 root 1.340 # define EV_FREQUENT_CHECK ev_verify (EV_A)
452 root 1.248 #else
453     # define EV_FREQUENT_CHECK do { } while (0)
454     #endif
455    
456 root 1.176 /*
457 root 1.373 * This is used to work around floating point rounding problems.
458 root 1.177 * This value is good at least till the year 4000.
459 root 1.176 */
460 root 1.373 #define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */
461     /*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */
462 root 1.176
463 root 1.4 #define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
464 root 1.120 #define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
465 root 1.1
466 root 1.347 #define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0)
467 root 1.348 #define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0)
468 root 1.347
469 root 1.379 /* the following are taken from libecb */
470     /* ecb.h start */
471    
472     /* many compilers define _GNUC_ to some versions but then only implement
473     * what their idiot authors think are the "more important" extensions,
474     * causing enourmous grief in return for some better fake benchmark numbers.
475     * or so.
476     * we try to detect these and simply assume they are not gcc - if they have
477     * an issue with that they should have done it right in the first place.
478     */
479     #ifndef ECB_GCC_VERSION
480     #if !defined(__GNUC_MINOR__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_C) || defined(__SUNPRO_CC) || defined(__llvm__) || defined(__clang__)
481     #define ECB_GCC_VERSION(major,minor) 0
482     #else
483     #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))
484     #endif
485     #endif
486    
487     #if __cplusplus
488     #define ecb_inline static inline
489     #elif ECB_GCC_VERSION(2,5)
490     #define ecb_inline static __inline__
491     #elif ECB_C99
492     #define ecb_inline static inline
493     #else
494     #define ecb_inline static
495 root 1.40 #endif
496    
497 root 1.383 #ifndef ECB_MEMORY_FENCE
498     #if ECB_GCC_VERSION(2,5)
499     #if __x86
500     #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
501     #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
502     #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE /* better be safe than sorry */
503     #elif __amd64
504     #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory")
505     #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("lfence" : : : "memory")
506     #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("sfence")
507     #endif
508     #endif
509     #endif
510    
511     #ifndef ECB_MEMORY_FENCE
512     #if ECB_GCC_VERSION(4,4)
513     #define ECB_MEMORY_FENCE __sync_synchronize ()
514     #define ECB_MEMORY_FENCE_ACQUIRE ({ char dummy = 0; __sync_lock_test_and_set (&dummy, 1); })
515     #define ECB_MEMORY_FENCE_RELEASE ({ char dummy = 1; __sync_lock_release (&dummy ); })
516     #elif defined(_WIN32) && defined(MemoryBarrier)
517     #define ECB_MEMORY_FENCE MemoryBarrier ()
518     #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
519     #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
520     #endif
521     #endif
522    
523     #ifndef ECB_MEMORY_FENCE
524     #include <pthread.h>
525    
526     static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER;
527     #define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0)
528     #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
529     #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
530     #endif
531    
532 root 1.379 #if ECB_GCC_VERSION(3,1)
533     #define ecb_attribute(attrlist) __attribute__(attrlist)
534     #define ecb_is_constant(expr) __builtin_constant_p (expr)
535     #define ecb_expect(expr,value) __builtin_expect ((expr),(value))
536     #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality)
537     #else
538     #define ecb_attribute(attrlist)
539     #define ecb_is_constant(expr) 0
540     #define ecb_expect(expr,value) (expr)
541     #define ecb_prefetch(addr,rw,locality)
542     #endif
543    
544     #define ecb_noinline ecb_attribute ((__noinline__))
545     #define ecb_noreturn ecb_attribute ((__noreturn__))
546     #define ecb_unused ecb_attribute ((__unused__))
547     #define ecb_const ecb_attribute ((__const__))
548     #define ecb_pure ecb_attribute ((__pure__))
549    
550     #if ECB_GCC_VERSION(4,3)
551     #define ecb_artificial ecb_attribute ((__artificial__))
552     #define ecb_hot ecb_attribute ((__hot__))
553     #define ecb_cold ecb_attribute ((__cold__))
554     #else
555     #define ecb_artificial
556     #define ecb_hot
557     #define ecb_cold
558     #endif
559    
560     /* put around conditional expressions if you are very sure that the */
561     /* expression is mostly true or mostly false. note that these return */
562     /* booleans, not the expression. */
563     #define ecb_expect_false(expr) ecb_expect (!!(expr), 0)
564     #define ecb_expect_true(expr) ecb_expect (!!(expr), 1)
565     /* ecb.h end */
566    
567     #define expect_false(cond) ecb_expect_false (cond)
568     #define expect_true(cond) ecb_expect_true (cond)
569     #define noinline ecb_noinline
570    
571     #define inline_size ecb_inline
572 root 1.169
573 root 1.338 #if EV_FEATURE_CODE
574 root 1.379 # define inline_speed ecb_inline
575 root 1.338 #else
576 root 1.169 # define inline_speed static noinline
577     #endif
578 root 1.40
579 root 1.295 #define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)
580    
581     #if EV_MINPRI == EV_MAXPRI
582     # define ABSPRI(w) (((W)w), 0)
583     #else
584     # define ABSPRI(w) (((W)w)->priority - EV_MINPRI)
585     #endif
586 root 1.42
587 root 1.164 #define EMPTY /* required for microsofts broken pseudo-c compiler */
588 root 1.114 #define EMPTY2(a,b) /* used to suppress some warnings */
589 root 1.103
590 root 1.136 typedef ev_watcher *W;
591     typedef ev_watcher_list *WL;
592     typedef ev_watcher_time *WT;
593 root 1.10
594 root 1.229 #define ev_active(w) ((W)(w))->active
595 root 1.228 #define ev_at(w) ((WT)(w))->at
596    
597 root 1.279 #if EV_USE_REALTIME
598 root 1.194 /* sig_atomic_t is used to avoid per-thread variables or locking but still */
599 sf-exg 1.345 /* giving it a reasonably high chance of working on typical architectures */
600 root 1.279 static EV_ATOMIC_T have_realtime; /* did clock_gettime (CLOCK_REALTIME) work? */
601     #endif
602    
603     #if EV_USE_MONOTONIC
604 root 1.207 static EV_ATOMIC_T have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work? */
605 root 1.198 #endif
606 root 1.54
607 root 1.313 #ifndef EV_FD_TO_WIN32_HANDLE
608     # define EV_FD_TO_WIN32_HANDLE(fd) _get_osfhandle (fd)
609     #endif
610     #ifndef EV_WIN32_HANDLE_TO_FD
611 root 1.322 # define EV_WIN32_HANDLE_TO_FD(handle) _open_osfhandle (handle, 0)
612 root 1.313 #endif
613     #ifndef EV_WIN32_CLOSE_FD
614     # define EV_WIN32_CLOSE_FD(fd) close (fd)
615     #endif
616    
617 root 1.103 #ifdef _WIN32
618 root 1.98 # include "ev_win32.c"
619     #endif
620 root 1.67
621 root 1.53 /*****************************************************************************/
622 root 1.1
623 root 1.373 /* define a suitable floor function (only used by periodics atm) */
624    
625     #if EV_USE_FLOOR
626     # include <math.h>
627     # define ev_floor(v) floor (v)
628     #else
629    
630     #include <float.h>
631    
632     /* a floor() replacement function, should be independent of ev_tstamp type */
633     static ev_tstamp noinline
634     ev_floor (ev_tstamp v)
635     {
636     /* the choice of shift factor is not terribly important */
637     #if FLT_RADIX != 2 /* assume FLT_RADIX == 10 */
638     const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 10000000000000000000. : 1000000000.;
639     #else
640     const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.;
641     #endif
642    
643     /* argument too large for an unsigned long? */
644     if (expect_false (v >= shift))
645     {
646     ev_tstamp f;
647    
648     if (v == v - 1.)
649     return v; /* very large number */
650    
651     f = shift * ev_floor (v * (1. / shift));
652     return f + ev_floor (v - f);
653     }
654    
655     /* special treatment for negative args? */
656     if (expect_false (v < 0.))
657     {
658     ev_tstamp f = -ev_floor (-v);
659    
660     return f - (f == v ? 0 : 1);
661     }
662    
663     /* fits into an unsigned long */
664     return (unsigned long)v;
665     }
666    
667     #endif
668    
669     /*****************************************************************************/
670    
671 root 1.356 #ifdef __linux
672     # include <sys/utsname.h>
673     #endif
674    
675 root 1.379 static unsigned int noinline ecb_cold
676 root 1.355 ev_linux_version (void)
677     {
678     #ifdef __linux
679 root 1.359 unsigned int v = 0;
680 root 1.355 struct utsname buf;
681     int i;
682     char *p = buf.release;
683    
684     if (uname (&buf))
685     return 0;
686    
687     for (i = 3+1; --i; )
688     {
689     unsigned int c = 0;
690    
691     for (;;)
692     {
693     if (*p >= '0' && *p <= '9')
694     c = c * 10 + *p++ - '0';
695     else
696     {
697     p += *p == '.';
698     break;
699     }
700     }
701    
702     v = (v << 8) | c;
703     }
704    
705     return v;
706     #else
707     return 0;
708     #endif
709     }
710    
711     /*****************************************************************************/
712    
713 root 1.331 #if EV_AVOID_STDIO
714 root 1.379 static void noinline ecb_cold
715 root 1.331 ev_printerr (const char *msg)
716     {
717     write (STDERR_FILENO, msg, strlen (msg));
718     }
719     #endif
720    
721 root 1.70 static void (*syserr_cb)(const char *msg);
722 root 1.69
723 root 1.379 void ecb_cold
724 root 1.141 ev_set_syserr_cb (void (*cb)(const char *msg))
725 root 1.69 {
726     syserr_cb = cb;
727     }
728    
729 root 1.379 static void noinline ecb_cold
730 root 1.269 ev_syserr (const char *msg)
731 root 1.69 {
732 root 1.70 if (!msg)
733     msg = "(libev) system error";
734    
735 root 1.69 if (syserr_cb)
736 root 1.70 syserr_cb (msg);
737 root 1.69 else
738     {
739 root 1.330 #if EV_AVOID_STDIO
740 root 1.331 ev_printerr (msg);
741     ev_printerr (": ");
742 root 1.365 ev_printerr (strerror (errno));
743 root 1.331 ev_printerr ("\n");
744 root 1.330 #else
745 root 1.70 perror (msg);
746 root 1.330 #endif
747 root 1.69 abort ();
748     }
749     }
750    
751 root 1.224 static void *
752     ev_realloc_emul (void *ptr, long size)
753     {
754 root 1.334 #if __GLIBC__
755     return realloc (ptr, size);
756     #else
757 root 1.224 /* some systems, notably openbsd and darwin, fail to properly
758 root 1.335 * implement realloc (x, 0) (as required by both ansi c-89 and
759 root 1.224 * the single unix specification, so work around them here.
760     */
761 root 1.333
762 root 1.224 if (size)
763     return realloc (ptr, size);
764    
765     free (ptr);
766     return 0;
767 root 1.334 #endif
768 root 1.224 }
769    
770     static void *(*alloc)(void *ptr, long size) = ev_realloc_emul;
771 root 1.69
772 root 1.379 void ecb_cold
773 root 1.155 ev_set_allocator (void *(*cb)(void *ptr, long size))
774 root 1.69 {
775     alloc = cb;
776     }
777    
778 root 1.150 inline_speed void *
779 root 1.155 ev_realloc (void *ptr, long size)
780 root 1.69 {
781 root 1.224 ptr = alloc (ptr, size);
782 root 1.69
783     if (!ptr && size)
784     {
785 root 1.330 #if EV_AVOID_STDIO
786 root 1.365 ev_printerr ("(libev) memory allocation failed, aborting.\n");
787 root 1.330 #else
788 root 1.365 fprintf (stderr, "(libev) cannot allocate %ld bytes, aborting.", size);
789 root 1.330 #endif
790 root 1.69 abort ();
791     }
792    
793     return ptr;
794     }
795    
796     #define ev_malloc(size) ev_realloc (0, (size))
797     #define ev_free(ptr) ev_realloc ((ptr), 0)
798    
799     /*****************************************************************************/
800    
801 root 1.298 /* set in reify when reification needed */
802     #define EV_ANFD_REIFY 1
803    
804 root 1.288 /* file descriptor info structure */
805 root 1.53 typedef struct
806     {
807 root 1.68 WL head;
808 root 1.288 unsigned char events; /* the events watched for */
809 root 1.298 unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */
810 root 1.288 unsigned char emask; /* the epoll backend stores the actual kernel mask in here */
811 root 1.269 unsigned char unused;
812     #if EV_USE_EPOLL
813 root 1.288 unsigned int egen; /* generation counter to counter epoll bugs */
814 root 1.269 #endif
815 root 1.357 #if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP
816 root 1.103 SOCKET handle;
817     #endif
818 root 1.357 #if EV_USE_IOCP
819     OVERLAPPED or, ow;
820     #endif
821 root 1.53 } ANFD;
822 root 1.1
823 root 1.288 /* stores the pending event set for a given watcher */
824 root 1.53 typedef struct
825     {
826     W w;
827 root 1.288 int events; /* the pending event set for the given watcher */
828 root 1.53 } ANPENDING;
829 root 1.51
830 root 1.155 #if EV_USE_INOTIFY
831 root 1.241 /* hash table entry per inotify-id */
832 root 1.152 typedef struct
833     {
834     WL head;
835 root 1.155 } ANFS;
836 root 1.152 #endif
837    
838 root 1.241 /* Heap Entry */
839     #if EV_HEAP_CACHE_AT
840 root 1.288 /* a heap element */
841 root 1.241 typedef struct {
842 root 1.243 ev_tstamp at;
843 root 1.241 WT w;
844     } ANHE;
845    
846 root 1.248 #define ANHE_w(he) (he).w /* access watcher, read-write */
847     #define ANHE_at(he) (he).at /* access cached at, read-only */
848     #define ANHE_at_cache(he) (he).at = (he).w->at /* update at from watcher */
849 root 1.241 #else
850 root 1.288 /* a heap element */
851 root 1.241 typedef WT ANHE;
852    
853 root 1.248 #define ANHE_w(he) (he)
854     #define ANHE_at(he) (he)->at
855     #define ANHE_at_cache(he)
856 root 1.241 #endif
857    
858 root 1.55 #if EV_MULTIPLICITY
859 root 1.54
860 root 1.80 struct ev_loop
861     {
862 root 1.86 ev_tstamp ev_rt_now;
863 root 1.99 #define ev_rt_now ((loop)->ev_rt_now)
864 root 1.80 #define VAR(name,decl) decl;
865     #include "ev_vars.h"
866     #undef VAR
867     };
868     #include "ev_wrap.h"
869    
870 root 1.116 static struct ev_loop default_loop_struct;
871     struct ev_loop *ev_default_loop_ptr;
872 root 1.54
873 root 1.53 #else
874 root 1.54
875 root 1.86 ev_tstamp ev_rt_now;
876 root 1.80 #define VAR(name,decl) static decl;
877     #include "ev_vars.h"
878     #undef VAR
879    
880 root 1.116 static int ev_default_loop_ptr;
881 root 1.54
882 root 1.51 #endif
883 root 1.1
884 root 1.338 #if EV_FEATURE_API
885 root 1.298 # define EV_RELEASE_CB if (expect_false (release_cb)) release_cb (EV_A)
886     # define EV_ACQUIRE_CB if (expect_false (acquire_cb)) acquire_cb (EV_A)
887 root 1.297 # define EV_INVOKE_PENDING invoke_cb (EV_A)
888     #else
889 root 1.298 # define EV_RELEASE_CB (void)0
890     # define EV_ACQUIRE_CB (void)0
891 root 1.297 # define EV_INVOKE_PENDING ev_invoke_pending (EV_A)
892     #endif
893    
894 root 1.353 #define EVBREAK_RECURSE 0x80
895 root 1.298
896 root 1.8 /*****************************************************************************/
897    
898 root 1.292 #ifndef EV_HAVE_EV_TIME
899 root 1.141 ev_tstamp
900 root 1.1 ev_time (void)
901     {
902 root 1.29 #if EV_USE_REALTIME
903 root 1.279 if (expect_true (have_realtime))
904     {
905     struct timespec ts;
906     clock_gettime (CLOCK_REALTIME, &ts);
907     return ts.tv_sec + ts.tv_nsec * 1e-9;
908     }
909     #endif
910    
911 root 1.1 struct timeval tv;
912     gettimeofday (&tv, 0);
913     return tv.tv_sec + tv.tv_usec * 1e-6;
914     }
915 root 1.292 #endif
916 root 1.1
917 root 1.284 inline_size ev_tstamp
918 root 1.1 get_clock (void)
919     {
920 root 1.29 #if EV_USE_MONOTONIC
921 root 1.40 if (expect_true (have_monotonic))
922 root 1.1 {
923     struct timespec ts;
924     clock_gettime (CLOCK_MONOTONIC, &ts);
925     return ts.tv_sec + ts.tv_nsec * 1e-9;
926     }
927     #endif
928    
929     return ev_time ();
930     }
931    
932 root 1.85 #if EV_MULTIPLICITY
933 root 1.51 ev_tstamp
934     ev_now (EV_P)
935     {
936 root 1.85 return ev_rt_now;
937 root 1.51 }
938 root 1.85 #endif
939 root 1.51
940 root 1.193 void
941     ev_sleep (ev_tstamp delay)
942     {
943     if (delay > 0.)
944     {
945     #if EV_USE_NANOSLEEP
946     struct timespec ts;
947    
948 root 1.348 EV_TS_SET (ts, delay);
949 root 1.193 nanosleep (&ts, 0);
950     #elif defined(_WIN32)
951 root 1.217 Sleep ((unsigned long)(delay * 1e3));
952 root 1.193 #else
953     struct timeval tv;
954    
955 root 1.257 /* here we rely on sys/time.h + sys/types.h + unistd.h providing select */
956 root 1.302 /* something not guaranteed by newer posix versions, but guaranteed */
957 root 1.257 /* by older ones */
958 sf-exg 1.349 EV_TV_SET (tv, delay);
959 root 1.193 select (0, 0, 0, 0, &tv);
960     #endif
961     }
962     }
963    
964     /*****************************************************************************/
965    
966 root 1.233 #define MALLOC_ROUND 4096 /* prefer to allocate in chunks of this size, must be 2**n and >> 4 longs */
967 root 1.232
968 root 1.288 /* find a suitable new size for the given array, */
969 sf-exg 1.345 /* hopefully by rounding to a nice-to-malloc size */
970 root 1.284 inline_size int
971 root 1.163 array_nextsize (int elem, int cur, int cnt)
972     {
973     int ncur = cur + 1;
974    
975     do
976     ncur <<= 1;
977     while (cnt > ncur);
978    
979 root 1.232 /* if size is large, round to MALLOC_ROUND - 4 * longs to accomodate malloc overhead */
980     if (elem * ncur > MALLOC_ROUND - sizeof (void *) * 4)
981 root 1.163 {
982     ncur *= elem;
983 root 1.232 ncur = (ncur + elem + (MALLOC_ROUND - 1) + sizeof (void *) * 4) & ~(MALLOC_ROUND - 1);
984 root 1.163 ncur = ncur - sizeof (void *) * 4;
985     ncur /= elem;
986     }
987    
988     return ncur;
989     }
990    
991 root 1.379 static void * noinline ecb_cold
992 root 1.163 array_realloc (int elem, void *base, int *cur, int cnt)
993     {
994     *cur = array_nextsize (elem, *cur, cnt);
995     return ev_realloc (base, elem * *cur);
996     }
997 root 1.29
998 root 1.265 #define array_init_zero(base,count) \
999     memset ((void *)(base), 0, sizeof (*(base)) * (count))
1000    
1001 root 1.74 #define array_needsize(type,base,cur,cnt,init) \
1002 root 1.163 if (expect_false ((cnt) > (cur))) \
1003 root 1.69 { \
1004 sf-exg 1.382 int ecb_unused ocur_ = (cur); \
1005 root 1.163 (base) = (type *)array_realloc \
1006     (sizeof (type), (base), &(cur), (cnt)); \
1007     init ((base) + (ocur_), (cur) - ocur_); \
1008 root 1.1 }
1009    
1010 root 1.163 #if 0
1011 root 1.74 #define array_slim(type,stem) \
1012 root 1.67 if (stem ## max < array_roundsize (stem ## cnt >> 2)) \
1013     { \
1014     stem ## max = array_roundsize (stem ## cnt >> 1); \
1015 root 1.74 base = (type *)ev_realloc (base, sizeof (type) * (stem ## max));\
1016 root 1.67 fprintf (stderr, "slimmed down " # stem " to %d\n", stem ## max);/*D*/\
1017     }
1018 root 1.163 #endif
1019 root 1.67
1020 root 1.65 #define array_free(stem, idx) \
1021 root 1.280 ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0; stem ## s idx = 0
1022 root 1.65
1023 root 1.8 /*****************************************************************************/
1024    
1025 root 1.288 /* dummy callback for pending events */
1026     static void noinline
1027     pendingcb (EV_P_ ev_prepare *w, int revents)
1028     {
1029     }
1030    
1031 root 1.140 void noinline
1032 root 1.78 ev_feed_event (EV_P_ void *w, int revents)
1033 root 1.1 {
1034 root 1.78 W w_ = (W)w;
1035 root 1.171 int pri = ABSPRI (w_);
1036 root 1.78
1037 root 1.123 if (expect_false (w_->pending))
1038 root 1.171 pendings [pri][w_->pending - 1].events |= revents;
1039     else
1040 root 1.32 {
1041 root 1.171 w_->pending = ++pendingcnt [pri];
1042     array_needsize (ANPENDING, pendings [pri], pendingmax [pri], w_->pending, EMPTY2);
1043     pendings [pri][w_->pending - 1].w = w_;
1044     pendings [pri][w_->pending - 1].events = revents;
1045 root 1.32 }
1046 root 1.1 }
1047    
1048 root 1.284 inline_speed void
1049     feed_reverse (EV_P_ W w)
1050     {
1051     array_needsize (W, rfeeds, rfeedmax, rfeedcnt + 1, EMPTY2);
1052     rfeeds [rfeedcnt++] = w;
1053     }
1054    
1055     inline_size void
1056     feed_reverse_done (EV_P_ int revents)
1057     {
1058     do
1059     ev_feed_event (EV_A_ rfeeds [--rfeedcnt], revents);
1060     while (rfeedcnt);
1061     }
1062    
1063     inline_speed void
1064 root 1.51 queue_events (EV_P_ W *events, int eventcnt, int type)
1065 root 1.27 {
1066     int i;
1067    
1068     for (i = 0; i < eventcnt; ++i)
1069 root 1.78 ev_feed_event (EV_A_ events [i], type);
1070 root 1.27 }
1071    
1072 root 1.141 /*****************************************************************************/
1073    
1074 root 1.284 inline_speed void
1075 root 1.337 fd_event_nocheck (EV_P_ int fd, int revents)
1076 root 1.1 {
1077     ANFD *anfd = anfds + fd;
1078 root 1.136 ev_io *w;
1079 root 1.1
1080 root 1.136 for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next)
1081 root 1.1 {
1082 root 1.79 int ev = w->events & revents;
1083 root 1.1
1084     if (ev)
1085 root 1.78 ev_feed_event (EV_A_ (W)w, ev);
1086 root 1.1 }
1087     }
1088    
1089 root 1.298 /* do not submit kernel events for fds that have reify set */
1090     /* because that means they changed while we were polling for new events */
1091     inline_speed void
1092     fd_event (EV_P_ int fd, int revents)
1093     {
1094     ANFD *anfd = anfds + fd;
1095    
1096     if (expect_true (!anfd->reify))
1097 root 1.337 fd_event_nocheck (EV_A_ fd, revents);
1098 root 1.298 }
1099    
1100 root 1.79 void
1101     ev_feed_fd_event (EV_P_ int fd, int revents)
1102     {
1103 root 1.168 if (fd >= 0 && fd < anfdmax)
1104 root 1.337 fd_event_nocheck (EV_A_ fd, revents);
1105 root 1.79 }
1106    
1107 root 1.288 /* make sure the external fd watch events are in-sync */
1108     /* with the kernel/libev internal state */
1109 root 1.284 inline_size void
1110 root 1.51 fd_reify (EV_P)
1111 root 1.9 {
1112     int i;
1113    
1114 root 1.371 #if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP
1115     for (i = 0; i < fdchangecnt; ++i)
1116     {
1117     int fd = fdchanges [i];
1118     ANFD *anfd = anfds + fd;
1119    
1120 root 1.374 if (anfd->reify & EV__IOFDSET && anfd->head)
1121 root 1.371 {
1122     SOCKET handle = EV_FD_TO_WIN32_HANDLE (fd);
1123    
1124     if (handle != anfd->handle)
1125     {
1126     unsigned long arg;
1127    
1128     assert (("libev: only socket fds supported in this configuration", ioctlsocket (handle, FIONREAD, &arg) == 0));
1129    
1130     /* handle changed, but fd didn't - we need to do it in two steps */
1131     backend_modify (EV_A_ fd, anfd->events, 0);
1132     anfd->events = 0;
1133     anfd->handle = handle;
1134     }
1135     }
1136     }
1137     #endif
1138    
1139 root 1.27 for (i = 0; i < fdchangecnt; ++i)
1140     {
1141     int fd = fdchanges [i];
1142     ANFD *anfd = anfds + fd;
1143 root 1.136 ev_io *w;
1144 root 1.27
1145 root 1.350 unsigned char o_events = anfd->events;
1146     unsigned char o_reify = anfd->reify;
1147 root 1.27
1148 root 1.350 anfd->reify = 0;
1149 root 1.27
1150 root 1.350 /*if (expect_true (o_reify & EV_ANFD_REIFY)) probably a deoptimisation */
1151     {
1152     anfd->events = 0;
1153 root 1.184
1154 root 1.350 for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next)
1155     anfd->events |= (unsigned char)w->events;
1156 root 1.27
1157 root 1.351 if (o_events != anfd->events)
1158 root 1.350 o_reify = EV__IOFDSET; /* actually |= */
1159     }
1160    
1161     if (o_reify & EV__IOFDSET)
1162     backend_modify (EV_A_ fd, o_events, anfd->events);
1163 root 1.27 }
1164    
1165     fdchangecnt = 0;
1166     }
1167    
1168 root 1.288 /* something about the given fd changed */
1169 root 1.284 inline_size void
1170 root 1.183 fd_change (EV_P_ int fd, int flags)
1171 root 1.27 {
1172 root 1.183 unsigned char reify = anfds [fd].reify;
1173 root 1.184 anfds [fd].reify |= flags;
1174 root 1.27
1175 root 1.183 if (expect_true (!reify))
1176     {
1177     ++fdchangecnt;
1178     array_needsize (int, fdchanges, fdchangemax, fdchangecnt, EMPTY2);
1179     fdchanges [fdchangecnt - 1] = fd;
1180     }
1181 root 1.9 }
1182    
1183 root 1.288 /* the given fd is invalid/unusable, so make sure it doesn't hurt us anymore */
1184 root 1.379 inline_speed void ecb_cold
1185 root 1.51 fd_kill (EV_P_ int fd)
1186 root 1.41 {
1187 root 1.136 ev_io *w;
1188 root 1.41
1189 root 1.136 while ((w = (ev_io *)anfds [fd].head))
1190 root 1.41 {
1191 root 1.51 ev_io_stop (EV_A_ w);
1192 root 1.78 ev_feed_event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE);
1193 root 1.41 }
1194     }
1195    
1196 root 1.336 /* check whether the given fd is actually valid, for error recovery */
1197 root 1.379 inline_size int ecb_cold
1198 root 1.71 fd_valid (int fd)
1199     {
1200 root 1.103 #ifdef _WIN32
1201 root 1.322 return EV_FD_TO_WIN32_HANDLE (fd) != -1;
1202 root 1.71 #else
1203     return fcntl (fd, F_GETFD) != -1;
1204     #endif
1205     }
1206    
1207 root 1.19 /* called on EBADF to verify fds */
1208 root 1.379 static void noinline ecb_cold
1209 root 1.51 fd_ebadf (EV_P)
1210 root 1.19 {
1211     int fd;
1212    
1213     for (fd = 0; fd < anfdmax; ++fd)
1214 root 1.27 if (anfds [fd].events)
1215 root 1.254 if (!fd_valid (fd) && errno == EBADF)
1216 root 1.51 fd_kill (EV_A_ fd);
1217 root 1.41 }
1218    
1219     /* called on ENOMEM in select/poll to kill some fds and retry */
1220 root 1.379 static void noinline ecb_cold
1221 root 1.51 fd_enomem (EV_P)
1222 root 1.41 {
1223 root 1.62 int fd;
1224 root 1.41
1225 root 1.62 for (fd = anfdmax; fd--; )
1226 root 1.41 if (anfds [fd].events)
1227     {
1228 root 1.51 fd_kill (EV_A_ fd);
1229 root 1.307 break;
1230 root 1.41 }
1231 root 1.19 }
1232    
1233 root 1.130 /* usually called after fork if backend needs to re-arm all fds from scratch */
1234 root 1.140 static void noinline
1235 root 1.56 fd_rearm_all (EV_P)
1236     {
1237     int fd;
1238    
1239     for (fd = 0; fd < anfdmax; ++fd)
1240     if (anfds [fd].events)
1241     {
1242     anfds [fd].events = 0;
1243 root 1.268 anfds [fd].emask = 0;
1244 root 1.298 fd_change (EV_A_ fd, EV__IOFDSET | EV_ANFD_REIFY);
1245 root 1.56 }
1246     }
1247    
1248 root 1.336 /* used to prepare libev internal fd's */
1249     /* this is not fork-safe */
1250     inline_speed void
1251     fd_intern (int fd)
1252     {
1253     #ifdef _WIN32
1254     unsigned long arg = 1;
1255     ioctlsocket (EV_FD_TO_WIN32_HANDLE (fd), FIONBIO, &arg);
1256     #else
1257     fcntl (fd, F_SETFD, FD_CLOEXEC);
1258     fcntl (fd, F_SETFL, O_NONBLOCK);
1259     #endif
1260     }
1261    
1262 root 1.8 /*****************************************************************************/
1263    
1264 root 1.235 /*
1265 sf-exg 1.345 * the heap functions want a real array index. array index 0 is guaranteed to not
1266 root 1.241 * be in-use at any time. the first heap entry is at array [HEAP0]. DHEAP gives
1267     * the branching factor of the d-tree.
1268     */
1269    
1270     /*
1271 root 1.235 * at the moment we allow libev the luxury of two heaps,
1272     * a small-code-size 2-heap one and a ~1.5kb larger 4-heap
1273     * which is more cache-efficient.
1274     * the difference is about 5% with 50000+ watchers.
1275     */
1276 root 1.241 #if EV_USE_4HEAP
1277 root 1.235
1278 root 1.237 #define DHEAP 4
1279     #define HEAP0 (DHEAP - 1) /* index of first element in heap */
1280 root 1.247 #define HPARENT(k) ((((k) - HEAP0 - 1) / DHEAP) + HEAP0)
1281 root 1.248 #define UPHEAP_DONE(p,k) ((p) == (k))
1282 root 1.235
1283     /* away from the root */
1284 root 1.284 inline_speed void
1285 root 1.241 downheap (ANHE *heap, int N, int k)
1286 root 1.235 {
1287 root 1.241 ANHE he = heap [k];
1288     ANHE *E = heap + N + HEAP0;
1289 root 1.235
1290     for (;;)
1291     {
1292     ev_tstamp minat;
1293 root 1.241 ANHE *minpos;
1294 root 1.248 ANHE *pos = heap + DHEAP * (k - HEAP0) + HEAP0 + 1;
1295 root 1.235
1296 root 1.248 /* find minimum child */
1297 root 1.237 if (expect_true (pos + DHEAP - 1 < E))
1298 root 1.235 {
1299 root 1.245 /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
1300     if ( ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos));
1301     if ( ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos));
1302     if ( ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos));
1303 root 1.235 }
1304 root 1.240 else if (pos < E)
1305 root 1.235 {
1306 root 1.241 /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
1307     if (pos + 1 < E && ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos));
1308     if (pos + 2 < E && ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos));
1309     if (pos + 3 < E && ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos));
1310 root 1.235 }
1311 root 1.240 else
1312     break;
1313 root 1.235
1314 root 1.241 if (ANHE_at (he) <= minat)
1315 root 1.235 break;
1316    
1317 root 1.247 heap [k] = *minpos;
1318 root 1.241 ev_active (ANHE_w (*minpos)) = k;
1319 root 1.235
1320     k = minpos - heap;
1321     }
1322    
1323 root 1.247 heap [k] = he;
1324 root 1.241 ev_active (ANHE_w (he)) = k;
1325 root 1.235 }
1326    
1327 root 1.248 #else /* 4HEAP */
1328 root 1.235
1329     #define HEAP0 1
1330 root 1.247 #define HPARENT(k) ((k) >> 1)
1331 root 1.248 #define UPHEAP_DONE(p,k) (!(p))
1332 root 1.235
1333 root 1.248 /* away from the root */
1334 root 1.284 inline_speed void
1335 root 1.248 downheap (ANHE *heap, int N, int k)
1336 root 1.1 {
1337 root 1.241 ANHE he = heap [k];
1338 root 1.1
1339 root 1.228 for (;;)
1340 root 1.1 {
1341 root 1.248 int c = k << 1;
1342 root 1.179
1343 root 1.309 if (c >= N + HEAP0)
1344 root 1.179 break;
1345    
1346 root 1.248 c += c + 1 < N + HEAP0 && ANHE_at (heap [c]) > ANHE_at (heap [c + 1])
1347     ? 1 : 0;
1348    
1349     if (ANHE_at (he) <= ANHE_at (heap [c]))
1350     break;
1351    
1352     heap [k] = heap [c];
1353 root 1.241 ev_active (ANHE_w (heap [k])) = k;
1354 root 1.248
1355     k = c;
1356 root 1.1 }
1357    
1358 root 1.243 heap [k] = he;
1359 root 1.248 ev_active (ANHE_w (he)) = k;
1360 root 1.1 }
1361 root 1.248 #endif
1362 root 1.1
1363 root 1.248 /* towards the root */
1364 root 1.284 inline_speed void
1365 root 1.248 upheap (ANHE *heap, int k)
1366 root 1.1 {
1367 root 1.241 ANHE he = heap [k];
1368 root 1.1
1369 root 1.179 for (;;)
1370 root 1.1 {
1371 root 1.248 int p = HPARENT (k);
1372 root 1.179
1373 root 1.248 if (UPHEAP_DONE (p, k) || ANHE_at (heap [p]) <= ANHE_at (he))
1374 root 1.179 break;
1375 root 1.1
1376 root 1.248 heap [k] = heap [p];
1377 root 1.241 ev_active (ANHE_w (heap [k])) = k;
1378 root 1.248 k = p;
1379 root 1.1 }
1380    
1381 root 1.241 heap [k] = he;
1382     ev_active (ANHE_w (he)) = k;
1383 root 1.1 }
1384    
1385 root 1.288 /* move an element suitably so it is in a correct place */
1386 root 1.284 inline_size void
1387 root 1.241 adjustheap (ANHE *heap, int N, int k)
1388 root 1.84 {
1389 root 1.310 if (k > HEAP0 && ANHE_at (heap [k]) <= ANHE_at (heap [HPARENT (k)]))
1390 root 1.247 upheap (heap, k);
1391     else
1392     downheap (heap, N, k);
1393 root 1.84 }
1394    
1395 root 1.248 /* rebuild the heap: this function is used only once and executed rarely */
1396 root 1.284 inline_size void
1397 root 1.248 reheap (ANHE *heap, int N)
1398     {
1399     int i;
1400 root 1.251
1401 root 1.248 /* we don't use floyds algorithm, upheap is simpler and is more cache-efficient */
1402     /* also, this is easy to implement and correct for both 2-heaps and 4-heaps */
1403     for (i = 0; i < N; ++i)
1404     upheap (heap, i + HEAP0);
1405     }
1406    
1407 root 1.8 /*****************************************************************************/
1408    
1409 root 1.288 /* associate signal watchers to a signal signal */
1410 root 1.7 typedef struct
1411     {
1412 root 1.307 EV_ATOMIC_T pending;
1413 root 1.306 #if EV_MULTIPLICITY
1414     EV_P;
1415     #endif
1416 root 1.68 WL head;
1417 root 1.7 } ANSIG;
1418    
1419 root 1.306 static ANSIG signals [EV_NSIG - 1];
1420 root 1.7
1421 root 1.207 /*****************************************************************************/
1422    
1423 root 1.336 #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
1424 root 1.207
1425 root 1.379 static void noinline ecb_cold
1426 root 1.207 evpipe_init (EV_P)
1427     {
1428 root 1.288 if (!ev_is_active (&pipe_w))
1429 root 1.207 {
1430 root 1.336 # if EV_USE_EVENTFD
1431 root 1.303 evfd = eventfd (0, EFD_NONBLOCK | EFD_CLOEXEC);
1432     if (evfd < 0 && errno == EINVAL)
1433     evfd = eventfd (0, 0);
1434    
1435     if (evfd >= 0)
1436 root 1.220 {
1437     evpipe [0] = -1;
1438 root 1.303 fd_intern (evfd); /* doing it twice doesn't hurt */
1439 root 1.288 ev_io_set (&pipe_w, evfd, EV_READ);
1440 root 1.220 }
1441     else
1442 root 1.336 # endif
1443 root 1.220 {
1444     while (pipe (evpipe))
1445 root 1.269 ev_syserr ("(libev) error creating signal/async pipe");
1446 root 1.207
1447 root 1.220 fd_intern (evpipe [0]);
1448     fd_intern (evpipe [1]);
1449 root 1.288 ev_io_set (&pipe_w, evpipe [0], EV_READ);
1450 root 1.220 }
1451 root 1.207
1452 root 1.288 ev_io_start (EV_A_ &pipe_w);
1453 root 1.210 ev_unref (EV_A); /* watcher should not keep loop alive */
1454 root 1.207 }
1455     }
1456    
1457 root 1.380 inline_speed void
1458 root 1.214 evpipe_write (EV_P_ EV_ATOMIC_T *flag)
1459 root 1.207 {
1460 root 1.383 if (expect_true (*flag))
1461 root 1.385 /*return*//*D*/;
1462 root 1.383
1463     *flag = 1;
1464    
1465 root 1.384 ECB_MEMORY_FENCE_RELEASE; /* make sure flag is visible before the wakeup */
1466 root 1.383
1467     pipe_write_skipped = 1;
1468 root 1.378
1469 root 1.384 ECB_MEMORY_FENCE; /* make sure pipe_write_skipped is visible before we check pipe_write_wanted */
1470 root 1.214
1471 root 1.383 if (pipe_write_wanted)
1472     {
1473     int old_errno;
1474 root 1.378
1475 root 1.384 pipe_write_skipped = 0; /* just an optimsiation, no fence needed */
1476 root 1.220
1477 root 1.383 old_errno = errno; /* save errno because write will clobber it */
1478 root 1.380
1479 root 1.220 #if EV_USE_EVENTFD
1480 root 1.383 if (evfd >= 0)
1481     {
1482     uint64_t counter = 1;
1483     write (evfd, &counter, sizeof (uint64_t));
1484     }
1485     else
1486 root 1.220 #endif
1487 root 1.383 {
1488     /* win32 people keep sending patches that change this write() to send() */
1489     /* and then run away. but send() is wrong, it wants a socket handle on win32 */
1490     /* so when you think this write should be a send instead, please find out */
1491     /* where your send() is from - it's definitely not the microsoft send, and */
1492     /* tell me. thank you. */
1493     write (evpipe [1], &(evpipe [1]), 1);
1494     }
1495 root 1.214
1496 root 1.383 errno = old_errno;
1497 root 1.207 }
1498     }
1499    
1500 root 1.288 /* called whenever the libev signal pipe */
1501     /* got some events (signal, async) */
1502 root 1.207 static void
1503     pipecb (EV_P_ ev_io *iow, int revents)
1504     {
1505 root 1.307 int i;
1506    
1507 root 1.378 if (revents & EV_READ)
1508     {
1509 root 1.220 #if EV_USE_EVENTFD
1510 root 1.378 if (evfd >= 0)
1511     {
1512     uint64_t counter;
1513     read (evfd, &counter, sizeof (uint64_t));
1514     }
1515     else
1516 root 1.220 #endif
1517 root 1.378 {
1518     char dummy;
1519     /* see discussion in evpipe_write when you think this read should be recv in win32 */
1520     read (evpipe [0], &dummy, 1);
1521     }
1522 root 1.220 }
1523 root 1.207
1524 root 1.378 pipe_write_skipped = 0;
1525    
1526 root 1.369 #if EV_SIGNAL_ENABLE
1527 root 1.307 if (sig_pending)
1528 root 1.372 {
1529 root 1.307 sig_pending = 0;
1530 root 1.207
1531 root 1.307 for (i = EV_NSIG - 1; i--; )
1532     if (expect_false (signals [i].pending))
1533     ev_feed_signal_event (EV_A_ i + 1);
1534 root 1.207 }
1535 root 1.369 #endif
1536 root 1.207
1537 root 1.209 #if EV_ASYNC_ENABLE
1538 root 1.307 if (async_pending)
1539 root 1.207 {
1540 root 1.307 async_pending = 0;
1541 root 1.207
1542     for (i = asynccnt; i--; )
1543     if (asyncs [i]->sent)
1544     {
1545     asyncs [i]->sent = 0;
1546     ev_feed_event (EV_A_ asyncs [i], EV_ASYNC);
1547     }
1548     }
1549 root 1.209 #endif
1550 root 1.207 }
1551    
1552     /*****************************************************************************/
1553    
1554 root 1.366 void
1555     ev_feed_signal (int signum)
1556 root 1.7 {
1557 root 1.207 #if EV_MULTIPLICITY
1558 root 1.306 EV_P = signals [signum - 1].loop;
1559 root 1.366
1560     if (!EV_A)
1561     return;
1562 root 1.207 #endif
1563    
1564 root 1.381 if (!ev_active (&pipe_w))
1565     return;
1566 root 1.378
1567 root 1.366 signals [signum - 1].pending = 1;
1568     evpipe_write (EV_A_ &sig_pending);
1569     }
1570    
1571     static void
1572     ev_sighandler (int signum)
1573     {
1574 root 1.322 #ifdef _WIN32
1575 root 1.218 signal (signum, ev_sighandler);
1576 root 1.67 #endif
1577    
1578 root 1.366 ev_feed_signal (signum);
1579 root 1.7 }
1580    
1581 root 1.140 void noinline
1582 root 1.79 ev_feed_signal_event (EV_P_ int signum)
1583     {
1584 root 1.80 WL w;
1585    
1586 root 1.307 if (expect_false (signum <= 0 || signum > EV_NSIG))
1587     return;
1588    
1589     --signum;
1590    
1591 root 1.79 #if EV_MULTIPLICITY
1592 root 1.307 /* it is permissible to try to feed a signal to the wrong loop */
1593     /* or, likely more useful, feeding a signal nobody is waiting for */
1594 root 1.79
1595 root 1.307 if (expect_false (signals [signum].loop != EV_A))
1596 root 1.306 return;
1597 root 1.307 #endif
1598 root 1.306
1599 root 1.307 signals [signum].pending = 0;
1600 root 1.79
1601     for (w = signals [signum].head; w; w = w->next)
1602     ev_feed_event (EV_A_ (W)w, EV_SIGNAL);
1603     }
1604    
1605 root 1.303 #if EV_USE_SIGNALFD
1606     static void
1607     sigfdcb (EV_P_ ev_io *iow, int revents)
1608     {
1609 root 1.306 struct signalfd_siginfo si[2], *sip; /* these structs are big */
1610 root 1.303
1611     for (;;)
1612     {
1613     ssize_t res = read (sigfd, si, sizeof (si));
1614    
1615     /* not ISO-C, as res might be -1, but works with SuS */
1616     for (sip = si; (char *)sip < (char *)si + res; ++sip)
1617     ev_feed_signal_event (EV_A_ sip->ssi_signo);
1618    
1619     if (res < (ssize_t)sizeof (si))
1620     break;
1621     }
1622     }
1623     #endif
1624    
1625 root 1.336 #endif
1626    
1627 root 1.8 /*****************************************************************************/
1628    
1629 root 1.336 #if EV_CHILD_ENABLE
1630 root 1.182 static WL childs [EV_PID_HASHSIZE];
1631 root 1.71
1632 root 1.136 static ev_signal childev;
1633 root 1.59
1634 root 1.206 #ifndef WIFCONTINUED
1635     # define WIFCONTINUED(status) 0
1636     #endif
1637    
1638 root 1.288 /* handle a single child status event */
1639 root 1.284 inline_speed void
1640 root 1.216 child_reap (EV_P_ int chain, int pid, int status)
1641 root 1.47 {
1642 root 1.136 ev_child *w;
1643 root 1.206 int traced = WIFSTOPPED (status) || WIFCONTINUED (status);
1644 root 1.47
1645 root 1.338 for (w = (ev_child *)childs [chain & ((EV_PID_HASHSIZE) - 1)]; w; w = (ev_child *)((WL)w)->next)
1646 root 1.206 {
1647     if ((w->pid == pid || !w->pid)
1648     && (!traced || (w->flags & 1)))
1649     {
1650 root 1.216 ev_set_priority (w, EV_MAXPRI); /* need to do it *now*, this *must* be the same prio as the signal watcher itself */
1651 root 1.206 w->rpid = pid;
1652     w->rstatus = status;
1653     ev_feed_event (EV_A_ (W)w, EV_CHILD);
1654     }
1655     }
1656 root 1.47 }
1657    
1658 root 1.142 #ifndef WCONTINUED
1659     # define WCONTINUED 0
1660     #endif
1661    
1662 root 1.288 /* called on sigchld etc., calls waitpid */
1663 root 1.47 static void
1664 root 1.136 childcb (EV_P_ ev_signal *sw, int revents)
1665 root 1.22 {
1666     int pid, status;
1667    
1668 root 1.142 /* some systems define WCONTINUED but then fail to support it (linux 2.4) */
1669     if (0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED | WCONTINUED)))
1670     if (!WCONTINUED
1671     || errno != EINVAL
1672     || 0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED)))
1673     return;
1674    
1675 root 1.216 /* make sure we are called again until all children have been reaped */
1676 root 1.142 /* we need to do it this way so that the callback gets called before we continue */
1677     ev_feed_event (EV_A_ (W)sw, EV_SIGNAL);
1678 root 1.47
1679 root 1.216 child_reap (EV_A_ pid, pid, status);
1680 root 1.338 if ((EV_PID_HASHSIZE) > 1)
1681 root 1.216 child_reap (EV_A_ 0, pid, status); /* this might trigger a watcher twice, but feed_event catches that */
1682 root 1.22 }
1683    
1684 root 1.45 #endif
1685    
1686 root 1.22 /*****************************************************************************/
1687    
1688 root 1.357 #if EV_USE_IOCP
1689     # include "ev_iocp.c"
1690     #endif
1691 root 1.118 #if EV_USE_PORT
1692     # include "ev_port.c"
1693     #endif
1694 root 1.44 #if EV_USE_KQUEUE
1695     # include "ev_kqueue.c"
1696     #endif
1697 root 1.29 #if EV_USE_EPOLL
1698 root 1.1 # include "ev_epoll.c"
1699     #endif
1700 root 1.59 #if EV_USE_POLL
1701 root 1.41 # include "ev_poll.c"
1702     #endif
1703 root 1.29 #if EV_USE_SELECT
1704 root 1.1 # include "ev_select.c"
1705     #endif
1706    
1707 root 1.379 int ecb_cold
1708 root 1.24 ev_version_major (void)
1709     {
1710     return EV_VERSION_MAJOR;
1711     }
1712    
1713 root 1.379 int ecb_cold
1714 root 1.24 ev_version_minor (void)
1715     {
1716     return EV_VERSION_MINOR;
1717     }
1718    
1719 root 1.49 /* return true if we are running with elevated privileges and should ignore env variables */
1720 root 1.379 int inline_size ecb_cold
1721 root 1.51 enable_secure (void)
1722 root 1.41 {
1723 root 1.103 #ifdef _WIN32
1724 root 1.49 return 0;
1725     #else
1726 root 1.41 return getuid () != geteuid ()
1727     || getgid () != getegid ();
1728 root 1.49 #endif
1729 root 1.41 }
1730    
1731 root 1.379 unsigned int ecb_cold
1732 root 1.129 ev_supported_backends (void)
1733     {
1734 root 1.130 unsigned int flags = 0;
1735 root 1.129
1736     if (EV_USE_PORT ) flags |= EVBACKEND_PORT;
1737     if (EV_USE_KQUEUE) flags |= EVBACKEND_KQUEUE;
1738     if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL;
1739     if (EV_USE_POLL ) flags |= EVBACKEND_POLL;
1740     if (EV_USE_SELECT) flags |= EVBACKEND_SELECT;
1741    
1742     return flags;
1743     }
1744    
1745 root 1.379 unsigned int ecb_cold
1746 root 1.130 ev_recommended_backends (void)
1747 root 1.1 {
1748 root 1.131 unsigned int flags = ev_supported_backends ();
1749 root 1.129
1750     #ifndef __NetBSD__
1751     /* kqueue is borked on everything but netbsd apparently */
1752     /* it usually doesn't work correctly on anything but sockets and pipes */
1753     flags &= ~EVBACKEND_KQUEUE;
1754     #endif
1755     #ifdef __APPLE__
1756 root 1.278 /* only select works correctly on that "unix-certified" platform */
1757     flags &= ~EVBACKEND_KQUEUE; /* horribly broken, even for sockets */
1758     flags &= ~EVBACKEND_POLL; /* poll is based on kqueue from 10.5 onwards */
1759 root 1.129 #endif
1760 root 1.342 #ifdef __FreeBSD__
1761     flags &= ~EVBACKEND_POLL; /* poll return value is unusable (http://forums.freebsd.org/archive/index.php/t-10270.html) */
1762     #endif
1763 root 1.129
1764     return flags;
1765 root 1.51 }
1766    
1767 root 1.379 unsigned int ecb_cold
1768 root 1.134 ev_embeddable_backends (void)
1769     {
1770 root 1.196 int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT;
1771    
1772 root 1.192 /* epoll embeddability broken on all linux versions up to at least 2.6.23 */
1773 root 1.355 if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */
1774     flags &= ~EVBACKEND_EPOLL;
1775 root 1.196
1776     return flags;
1777 root 1.134 }
1778    
1779     unsigned int
1780 root 1.130 ev_backend (EV_P)
1781     {
1782     return backend;
1783     }
1784    
1785 root 1.338 #if EV_FEATURE_API
1786 root 1.162 unsigned int
1787 root 1.340 ev_iteration (EV_P)
1788 root 1.162 {
1789     return loop_count;
1790     }
1791    
1792 root 1.294 unsigned int
1793 root 1.340 ev_depth (EV_P)
1794 root 1.294 {
1795     return loop_depth;
1796     }
1797    
1798 root 1.193 void
1799     ev_set_io_collect_interval (EV_P_ ev_tstamp interval)
1800     {
1801     io_blocktime = interval;
1802     }
1803    
1804     void
1805     ev_set_timeout_collect_interval (EV_P_ ev_tstamp interval)
1806     {
1807     timeout_blocktime = interval;
1808     }
1809    
1810 root 1.297 void
1811     ev_set_userdata (EV_P_ void *data)
1812     {
1813     userdata = data;
1814     }
1815    
1816     void *
1817     ev_userdata (EV_P)
1818     {
1819     return userdata;
1820     }
1821    
1822 root 1.379 void
1823     ev_set_invoke_pending_cb (EV_P_ void (*invoke_pending_cb)(EV_P))
1824 root 1.297 {
1825     invoke_cb = invoke_pending_cb;
1826     }
1827    
1828 root 1.379 void
1829     ev_set_loop_release_cb (EV_P_ void (*release)(EV_P), void (*acquire)(EV_P))
1830 root 1.297 {
1831 root 1.298 release_cb = release;
1832     acquire_cb = acquire;
1833 root 1.297 }
1834     #endif
1835    
1836 root 1.288 /* initialise a loop structure, must be zero-initialised */
1837 root 1.379 static void noinline ecb_cold
1838 root 1.108 loop_init (EV_P_ unsigned int flags)
1839 root 1.51 {
1840 root 1.130 if (!backend)
1841 root 1.23 {
1842 root 1.366 origflags = flags;
1843    
1844 root 1.279 #if EV_USE_REALTIME
1845     if (!have_realtime)
1846     {
1847     struct timespec ts;
1848    
1849     if (!clock_gettime (CLOCK_REALTIME, &ts))
1850     have_realtime = 1;
1851     }
1852     #endif
1853    
1854 root 1.29 #if EV_USE_MONOTONIC
1855 root 1.279 if (!have_monotonic)
1856     {
1857     struct timespec ts;
1858    
1859     if (!clock_gettime (CLOCK_MONOTONIC, &ts))
1860     have_monotonic = 1;
1861     }
1862 root 1.1 #endif
1863    
1864 root 1.306 /* pid check not overridable via env */
1865     #ifndef _WIN32
1866     if (flags & EVFLAG_FORKCHECK)
1867     curpid = getpid ();
1868     #endif
1869    
1870     if (!(flags & EVFLAG_NOENV)
1871     && !enable_secure ()
1872     && getenv ("LIBEV_FLAGS"))
1873     flags = atoi (getenv ("LIBEV_FLAGS"));
1874    
1875 root 1.378 ev_rt_now = ev_time ();
1876     mn_now = get_clock ();
1877     now_floor = mn_now;
1878     rtmn_diff = ev_rt_now - mn_now;
1879 root 1.338 #if EV_FEATURE_API
1880 root 1.378 invoke_cb = ev_invoke_pending;
1881 root 1.297 #endif
1882 root 1.1
1883 root 1.378 io_blocktime = 0.;
1884     timeout_blocktime = 0.;
1885     backend = 0;
1886     backend_fd = -1;
1887     sig_pending = 0;
1888 root 1.307 #if EV_ASYNC_ENABLE
1889 root 1.378 async_pending = 0;
1890 root 1.307 #endif
1891 root 1.378 pipe_write_skipped = 0;
1892     pipe_write_wanted = 0;
1893 root 1.209 #if EV_USE_INOTIFY
1894 root 1.378 fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2;
1895 root 1.209 #endif
1896 root 1.303 #if EV_USE_SIGNALFD
1897 root 1.378 sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1;
1898 root 1.303 #endif
1899 root 1.193
1900 root 1.366 if (!(flags & EVBACKEND_MASK))
1901 root 1.129 flags |= ev_recommended_backends ();
1902 root 1.41
1903 root 1.357 #if EV_USE_IOCP
1904     if (!backend && (flags & EVBACKEND_IOCP )) backend = iocp_init (EV_A_ flags);
1905     #endif
1906 root 1.118 #if EV_USE_PORT
1907 root 1.130 if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags);
1908 root 1.118 #endif
1909 root 1.44 #if EV_USE_KQUEUE
1910 root 1.130 if (!backend && (flags & EVBACKEND_KQUEUE)) backend = kqueue_init (EV_A_ flags);
1911 root 1.44 #endif
1912 root 1.29 #if EV_USE_EPOLL
1913 root 1.130 if (!backend && (flags & EVBACKEND_EPOLL )) backend = epoll_init (EV_A_ flags);
1914 root 1.41 #endif
1915 root 1.59 #if EV_USE_POLL
1916 root 1.130 if (!backend && (flags & EVBACKEND_POLL )) backend = poll_init (EV_A_ flags);
1917 root 1.1 #endif
1918 root 1.29 #if EV_USE_SELECT
1919 root 1.130 if (!backend && (flags & EVBACKEND_SELECT)) backend = select_init (EV_A_ flags);
1920 root 1.1 #endif
1921 root 1.70
1922 root 1.288 ev_prepare_init (&pending_w, pendingcb);
1923    
1924 root 1.336 #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
1925 root 1.288 ev_init (&pipe_w, pipecb);
1926     ev_set_priority (&pipe_w, EV_MAXPRI);
1927 root 1.336 #endif
1928 root 1.56 }
1929     }
1930    
1931 root 1.288 /* free up a loop structure */
1932 root 1.379 void ecb_cold
1933 root 1.359 ev_loop_destroy (EV_P)
1934 root 1.56 {
1935 root 1.65 int i;
1936    
1937 root 1.364 #if EV_MULTIPLICITY
1938 root 1.363 /* mimic free (0) */
1939     if (!EV_A)
1940     return;
1941 root 1.364 #endif
1942 root 1.363
1943 root 1.361 #if EV_CLEANUP_ENABLE
1944     /* queue cleanup watchers (and execute them) */
1945     if (expect_false (cleanupcnt))
1946     {
1947     queue_events (EV_A_ (W *)cleanups, cleanupcnt, EV_CLEANUP);
1948     EV_INVOKE_PENDING;
1949     }
1950     #endif
1951    
1952 root 1.359 #if EV_CHILD_ENABLE
1953     if (ev_is_active (&childev))
1954     {
1955     ev_ref (EV_A); /* child watcher */
1956     ev_signal_stop (EV_A_ &childev);
1957     }
1958     #endif
1959    
1960 root 1.288 if (ev_is_active (&pipe_w))
1961 root 1.207 {
1962 root 1.303 /*ev_ref (EV_A);*/
1963     /*ev_io_stop (EV_A_ &pipe_w);*/
1964 root 1.207
1965 root 1.220 #if EV_USE_EVENTFD
1966     if (evfd >= 0)
1967     close (evfd);
1968     #endif
1969    
1970     if (evpipe [0] >= 0)
1971     {
1972 root 1.313 EV_WIN32_CLOSE_FD (evpipe [0]);
1973     EV_WIN32_CLOSE_FD (evpipe [1]);
1974 root 1.220 }
1975 root 1.207 }
1976    
1977 root 1.303 #if EV_USE_SIGNALFD
1978     if (ev_is_active (&sigfd_w))
1979 root 1.317 close (sigfd);
1980 root 1.303 #endif
1981    
1982 root 1.152 #if EV_USE_INOTIFY
1983     if (fs_fd >= 0)
1984     close (fs_fd);
1985     #endif
1986    
1987     if (backend_fd >= 0)
1988     close (backend_fd);
1989    
1990 root 1.357 #if EV_USE_IOCP
1991     if (backend == EVBACKEND_IOCP ) iocp_destroy (EV_A);
1992     #endif
1993 root 1.118 #if EV_USE_PORT
1994 root 1.130 if (backend == EVBACKEND_PORT ) port_destroy (EV_A);
1995 root 1.118 #endif
1996 root 1.56 #if EV_USE_KQUEUE
1997 root 1.130 if (backend == EVBACKEND_KQUEUE) kqueue_destroy (EV_A);
1998 root 1.56 #endif
1999     #if EV_USE_EPOLL
2000 root 1.130 if (backend == EVBACKEND_EPOLL ) epoll_destroy (EV_A);
2001 root 1.56 #endif
2002 root 1.59 #if EV_USE_POLL
2003 root 1.130 if (backend == EVBACKEND_POLL ) poll_destroy (EV_A);
2004 root 1.56 #endif
2005     #if EV_USE_SELECT
2006 root 1.130 if (backend == EVBACKEND_SELECT) select_destroy (EV_A);
2007 root 1.56 #endif
2008 root 1.1
2009 root 1.65 for (i = NUMPRI; i--; )
2010 root 1.164 {
2011     array_free (pending, [i]);
2012     #if EV_IDLE_ENABLE
2013     array_free (idle, [i]);
2014     #endif
2015     }
2016 root 1.65
2017 root 1.305 ev_free (anfds); anfds = 0; anfdmax = 0;
2018 root 1.186
2019 root 1.71 /* have to use the microsoft-never-gets-it-right macro */
2020 root 1.284 array_free (rfeed, EMPTY);
2021 root 1.164 array_free (fdchange, EMPTY);
2022     array_free (timer, EMPTY);
2023 root 1.140 #if EV_PERIODIC_ENABLE
2024 root 1.164 array_free (periodic, EMPTY);
2025 root 1.93 #endif
2026 root 1.187 #if EV_FORK_ENABLE
2027     array_free (fork, EMPTY);
2028     #endif
2029 root 1.360 #if EV_CLEANUP_ENABLE
2030     array_free (cleanup, EMPTY);
2031     #endif
2032 root 1.164 array_free (prepare, EMPTY);
2033     array_free (check, EMPTY);
2034 root 1.209 #if EV_ASYNC_ENABLE
2035     array_free (async, EMPTY);
2036     #endif
2037 root 1.65
2038 root 1.130 backend = 0;
2039 root 1.359
2040     #if EV_MULTIPLICITY
2041     if (ev_is_default_loop (EV_A))
2042     #endif
2043     ev_default_loop_ptr = 0;
2044     #if EV_MULTIPLICITY
2045     else
2046     ev_free (EV_A);
2047     #endif
2048 root 1.56 }
2049 root 1.22
2050 root 1.226 #if EV_USE_INOTIFY
2051 root 1.284 inline_size void infy_fork (EV_P);
2052 root 1.226 #endif
2053 root 1.154
2054 root 1.284 inline_size void
2055 root 1.56 loop_fork (EV_P)
2056     {
2057 root 1.118 #if EV_USE_PORT
2058 root 1.130 if (backend == EVBACKEND_PORT ) port_fork (EV_A);
2059 root 1.56 #endif
2060     #if EV_USE_KQUEUE
2061 root 1.130 if (backend == EVBACKEND_KQUEUE) kqueue_fork (EV_A);
2062 root 1.45 #endif
2063 root 1.118 #if EV_USE_EPOLL
2064 root 1.130 if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A);
2065 root 1.118 #endif
2066 root 1.154 #if EV_USE_INOTIFY
2067     infy_fork (EV_A);
2068     #endif
2069 root 1.70
2070 root 1.288 if (ev_is_active (&pipe_w))
2071 root 1.70 {
2072 root 1.378 /* pipe_write_wanted must be false now, so modifying fd vars should be safe */
2073 root 1.70
2074     ev_ref (EV_A);
2075 root 1.288 ev_io_stop (EV_A_ &pipe_w);
2076 root 1.220
2077     #if EV_USE_EVENTFD
2078     if (evfd >= 0)
2079     close (evfd);
2080     #endif
2081    
2082     if (evpipe [0] >= 0)
2083     {
2084 root 1.313 EV_WIN32_CLOSE_FD (evpipe [0]);
2085     EV_WIN32_CLOSE_FD (evpipe [1]);
2086 root 1.220 }
2087 root 1.207
2088 root 1.337 #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
2089 root 1.207 evpipe_init (EV_A);
2090 root 1.208 /* now iterate over everything, in case we missed something */
2091 root 1.288 pipecb (EV_A_ &pipe_w, EV_READ);
2092 root 1.337 #endif
2093 root 1.70 }
2094    
2095     postfork = 0;
2096 root 1.1 }
2097    
2098 root 1.55 #if EV_MULTIPLICITY
2099 root 1.250
2100 root 1.379 struct ev_loop * ecb_cold
2101 root 1.108 ev_loop_new (unsigned int flags)
2102 root 1.54 {
2103 root 1.306 EV_P = (struct ev_loop *)ev_malloc (sizeof (struct ev_loop));
2104 root 1.69
2105 root 1.306 memset (EV_A, 0, sizeof (struct ev_loop));
2106 root 1.108 loop_init (EV_A_ flags);
2107 root 1.56
2108 root 1.130 if (ev_backend (EV_A))
2109 root 1.306 return EV_A;
2110 root 1.54
2111 root 1.359 ev_free (EV_A);
2112 root 1.55 return 0;
2113 root 1.54 }
2114    
2115 root 1.297 #endif /* multiplicity */
2116 root 1.248
2117     #if EV_VERIFY
2118 root 1.379 static void noinline ecb_cold
2119 root 1.251 verify_watcher (EV_P_ W w)
2120     {
2121 root 1.278 assert (("libev: watcher has invalid priority", ABSPRI (w) >= 0 && ABSPRI (w) < NUMPRI));
2122 root 1.251
2123     if (w->pending)
2124 root 1.278 assert (("libev: pending watcher not on pending queue", pendings [ABSPRI (w)][w->pending - 1].w == w));
2125 root 1.251 }
2126    
2127 root 1.379 static void noinline ecb_cold
2128 root 1.251 verify_heap (EV_P_ ANHE *heap, int N)
2129     {
2130     int i;
2131    
2132     for (i = HEAP0; i < N + HEAP0; ++i)
2133     {
2134 root 1.278 assert (("libev: active index mismatch in heap", ev_active (ANHE_w (heap [i])) == i));
2135     assert (("libev: heap condition violated", i == HEAP0 || ANHE_at (heap [HPARENT (i)]) <= ANHE_at (heap [i])));
2136     assert (("libev: heap at cache mismatch", ANHE_at (heap [i]) == ev_at (ANHE_w (heap [i]))));
2137 root 1.251
2138     verify_watcher (EV_A_ (W)ANHE_w (heap [i]));
2139     }
2140     }
2141    
2142 root 1.379 static void noinline ecb_cold
2143 root 1.251 array_verify (EV_P_ W *ws, int cnt)
2144 root 1.248 {
2145     while (cnt--)
2146 root 1.251 {
2147 root 1.278 assert (("libev: active index mismatch", ev_active (ws [cnt]) == cnt + 1));
2148 root 1.251 verify_watcher (EV_A_ ws [cnt]);
2149     }
2150 root 1.248 }
2151 root 1.250 #endif
2152 root 1.248
2153 root 1.338 #if EV_FEATURE_API
2154 root 1.379 void ecb_cold
2155 root 1.340 ev_verify (EV_P)
2156 root 1.248 {
2157 root 1.250 #if EV_VERIFY
2158 root 1.248 int i;
2159 root 1.251 WL w;
2160    
2161     assert (activecnt >= -1);
2162    
2163     assert (fdchangemax >= fdchangecnt);
2164     for (i = 0; i < fdchangecnt; ++i)
2165 root 1.278 assert (("libev: negative fd in fdchanges", fdchanges [i] >= 0));
2166 root 1.251
2167     assert (anfdmax >= 0);
2168     for (i = 0; i < anfdmax; ++i)
2169     for (w = anfds [i].head; w; w = w->next)
2170     {
2171     verify_watcher (EV_A_ (W)w);
2172 root 1.278 assert (("libev: inactive fd watcher on anfd list", ev_active (w) == 1));
2173     assert (("libev: fd mismatch between watcher and anfd", ((ev_io *)w)->fd == i));
2174 root 1.251 }
2175    
2176     assert (timermax >= timercnt);
2177     verify_heap (EV_A_ timers, timercnt);
2178 root 1.248
2179     #if EV_PERIODIC_ENABLE
2180 root 1.251 assert (periodicmax >= periodiccnt);
2181     verify_heap (EV_A_ periodics, periodiccnt);
2182 root 1.248 #endif
2183    
2184 root 1.251 for (i = NUMPRI; i--; )
2185     {
2186     assert (pendingmax [i] >= pendingcnt [i]);
2187 root 1.248 #if EV_IDLE_ENABLE
2188 root 1.252 assert (idleall >= 0);
2189 root 1.251 assert (idlemax [i] >= idlecnt [i]);
2190     array_verify (EV_A_ (W *)idles [i], idlecnt [i]);
2191 root 1.248 #endif
2192 root 1.251 }
2193    
2194 root 1.248 #if EV_FORK_ENABLE
2195 root 1.251 assert (forkmax >= forkcnt);
2196     array_verify (EV_A_ (W *)forks, forkcnt);
2197 root 1.248 #endif
2198 root 1.251
2199 root 1.360 #if EV_CLEANUP_ENABLE
2200     assert (cleanupmax >= cleanupcnt);
2201     array_verify (EV_A_ (W *)cleanups, cleanupcnt);
2202     #endif
2203    
2204 root 1.250 #if EV_ASYNC_ENABLE
2205 root 1.251 assert (asyncmax >= asynccnt);
2206     array_verify (EV_A_ (W *)asyncs, asynccnt);
2207 root 1.250 #endif
2208 root 1.251
2209 root 1.337 #if EV_PREPARE_ENABLE
2210 root 1.251 assert (preparemax >= preparecnt);
2211     array_verify (EV_A_ (W *)prepares, preparecnt);
2212 root 1.337 #endif
2213 root 1.251
2214 root 1.337 #if EV_CHECK_ENABLE
2215 root 1.251 assert (checkmax >= checkcnt);
2216     array_verify (EV_A_ (W *)checks, checkcnt);
2217 root 1.337 #endif
2218 root 1.251
2219     # if 0
2220 root 1.336 #if EV_CHILD_ENABLE
2221 root 1.338 for (w = (ev_child *)childs [chain & ((EV_PID_HASHSIZE) - 1)]; w; w = (ev_child *)((WL)w)->next)
2222 root 1.307 for (signum = EV_NSIG; signum--; ) if (signals [signum].pending)
2223 root 1.336 #endif
2224 root 1.251 # endif
2225 root 1.248 #endif
2226     }
2227 root 1.297 #endif
2228 root 1.56
2229     #if EV_MULTIPLICITY
2230 root 1.379 struct ev_loop * ecb_cold
2231 root 1.54 #else
2232     int
2233 root 1.358 #endif
2234 root 1.116 ev_default_loop (unsigned int flags)
2235 root 1.54 {
2236 root 1.116 if (!ev_default_loop_ptr)
2237 root 1.56 {
2238     #if EV_MULTIPLICITY
2239 root 1.306 EV_P = ev_default_loop_ptr = &default_loop_struct;
2240 root 1.56 #else
2241 ayin 1.117 ev_default_loop_ptr = 1;
2242 root 1.54 #endif
2243    
2244 root 1.110 loop_init (EV_A_ flags);
2245 root 1.56
2246 root 1.130 if (ev_backend (EV_A))
2247 root 1.56 {
2248 root 1.336 #if EV_CHILD_ENABLE
2249 root 1.56 ev_signal_init (&childev, childcb, SIGCHLD);
2250     ev_set_priority (&childev, EV_MAXPRI);
2251     ev_signal_start (EV_A_ &childev);
2252     ev_unref (EV_A); /* child watcher should not keep loop alive */
2253     #endif
2254     }
2255     else
2256 root 1.116 ev_default_loop_ptr = 0;
2257 root 1.56 }
2258 root 1.8
2259 root 1.116 return ev_default_loop_ptr;
2260 root 1.1 }
2261    
2262 root 1.24 void
2263 root 1.359 ev_loop_fork (EV_P)
2264 root 1.1 {
2265 root 1.359 postfork = 1; /* must be in line with ev_default_fork */
2266 root 1.1 }
2267    
2268 root 1.8 /*****************************************************************************/
2269    
2270 root 1.168 void
2271     ev_invoke (EV_P_ void *w, int revents)
2272     {
2273     EV_CB_INVOKE ((W)w, revents);
2274     }
2275    
2276 root 1.300 unsigned int
2277     ev_pending_count (EV_P)
2278     {
2279     int pri;
2280     unsigned int count = 0;
2281    
2282     for (pri = NUMPRI; pri--; )
2283     count += pendingcnt [pri];
2284    
2285     return count;
2286     }
2287    
2288 root 1.297 void noinline
2289 root 1.296 ev_invoke_pending (EV_P)
2290 root 1.1 {
2291 root 1.42 int pri;
2292    
2293     for (pri = NUMPRI; pri--; )
2294     while (pendingcnt [pri])
2295     {
2296     ANPENDING *p = pendings [pri] + --pendingcnt [pri];
2297 root 1.1
2298 root 1.288 p->w->pending = 0;
2299     EV_CB_INVOKE (p->w, p->events);
2300     EV_FREQUENT_CHECK;
2301 root 1.42 }
2302 root 1.1 }
2303    
2304 root 1.234 #if EV_IDLE_ENABLE
2305 root 1.288 /* make idle watchers pending. this handles the "call-idle */
2306     /* only when higher priorities are idle" logic */
2307 root 1.284 inline_size void
2308 root 1.234 idle_reify (EV_P)
2309     {
2310     if (expect_false (idleall))
2311     {
2312     int pri;
2313    
2314     for (pri = NUMPRI; pri--; )
2315     {
2316     if (pendingcnt [pri])
2317     break;
2318    
2319     if (idlecnt [pri])
2320     {
2321     queue_events (EV_A_ (W *)idles [pri], idlecnt [pri], EV_IDLE);
2322     break;
2323     }
2324     }
2325     }
2326     }
2327     #endif
2328    
2329 root 1.288 /* make timers pending */
2330 root 1.284 inline_size void
2331 root 1.51 timers_reify (EV_P)
2332 root 1.1 {
2333 root 1.248 EV_FREQUENT_CHECK;
2334    
2335 root 1.284 if (timercnt && ANHE_at (timers [HEAP0]) < mn_now)
2336 root 1.1 {
2337 root 1.284 do
2338     {
2339     ev_timer *w = (ev_timer *)ANHE_w (timers [HEAP0]);
2340 root 1.1
2341 root 1.284 /*assert (("libev: inactive timer on timer heap detected", ev_is_active (w)));*/
2342    
2343     /* first reschedule or stop timer */
2344     if (w->repeat)
2345     {
2346     ev_at (w) += w->repeat;
2347     if (ev_at (w) < mn_now)
2348     ev_at (w) = mn_now;
2349 root 1.61
2350 root 1.284 assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > 0.));
2351 root 1.90
2352 root 1.284 ANHE_at_cache (timers [HEAP0]);
2353     downheap (timers, timercnt, HEAP0);
2354     }
2355     else
2356     ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */
2357 root 1.243
2358 root 1.284 EV_FREQUENT_CHECK;
2359     feed_reverse (EV_A_ (W)w);
2360 root 1.12 }
2361 root 1.284 while (timercnt && ANHE_at (timers [HEAP0]) < mn_now);
2362 root 1.30
2363 root 1.341 feed_reverse_done (EV_A_ EV_TIMER);
2364 root 1.12 }
2365     }
2366 root 1.4
2367 root 1.140 #if EV_PERIODIC_ENABLE
2368 root 1.370
2369 root 1.373 static void noinline
2370 root 1.370 periodic_recalc (EV_P_ ev_periodic *w)
2371     {
2372 root 1.373 ev_tstamp interval = w->interval > MIN_INTERVAL ? w->interval : MIN_INTERVAL;
2373     ev_tstamp at = w->offset + interval * ev_floor ((ev_rt_now - w->offset) / interval);
2374    
2375     /* the above almost always errs on the low side */
2376     while (at <= ev_rt_now)
2377     {
2378     ev_tstamp nat = at + w->interval;
2379    
2380     /* when resolution fails us, we use ev_rt_now */
2381     if (expect_false (nat == at))
2382     {
2383     at = ev_rt_now;
2384     break;
2385     }
2386    
2387     at = nat;
2388     }
2389    
2390     ev_at (w) = at;
2391 root 1.370 }
2392    
2393 root 1.288 /* make periodics pending */
2394 root 1.284 inline_size void
2395 root 1.51 periodics_reify (EV_P)
2396 root 1.12 {
2397 root 1.248 EV_FREQUENT_CHECK;
2398 root 1.250
2399 root 1.244 while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now)
2400 root 1.12 {
2401 root 1.284 int feed_count = 0;
2402    
2403     do
2404     {
2405     ev_periodic *w = (ev_periodic *)ANHE_w (periodics [HEAP0]);
2406 root 1.1
2407 root 1.284 /*assert (("libev: inactive timer on periodic heap detected", ev_is_active (w)));*/
2408 root 1.61
2409 root 1.284 /* first reschedule or stop timer */
2410     if (w->reschedule_cb)
2411     {
2412     ev_at (w) = w->reschedule_cb (w, ev_rt_now);
2413 root 1.243
2414 root 1.284 assert (("libev: ev_periodic reschedule callback returned time in the past", ev_at (w) >= ev_rt_now));
2415 root 1.243
2416 root 1.284 ANHE_at_cache (periodics [HEAP0]);
2417     downheap (periodics, periodiccnt, HEAP0);
2418     }
2419     else if (w->interval)
2420 root 1.246 {
2421 root 1.370 periodic_recalc (EV_A_ w);
2422 root 1.284 ANHE_at_cache (periodics [HEAP0]);
2423     downheap (periodics, periodiccnt, HEAP0);
2424 root 1.246 }
2425 root 1.284 else
2426     ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */
2427 root 1.243
2428 root 1.284 EV_FREQUENT_CHECK;
2429     feed_reverse (EV_A_ (W)w);
2430 root 1.1 }
2431 root 1.284 while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now);
2432 root 1.12
2433 root 1.284 feed_reverse_done (EV_A_ EV_PERIODIC);
2434 root 1.12 }
2435     }
2436    
2437 root 1.288 /* simply recalculate all periodics */
2438 sf-exg 1.345 /* TODO: maybe ensure that at least one event happens when jumping forward? */
2439 root 1.379 static void noinline ecb_cold
2440 root 1.54 periodics_reschedule (EV_P)
2441 root 1.12 {
2442     int i;
2443    
2444 root 1.13 /* adjust periodics after time jump */
2445 root 1.241 for (i = HEAP0; i < periodiccnt + HEAP0; ++i)
2446 root 1.12 {
2447 root 1.241 ev_periodic *w = (ev_periodic *)ANHE_w (periodics [i]);
2448 root 1.12
2449 root 1.77 if (w->reschedule_cb)
2450 root 1.228 ev_at (w) = w->reschedule_cb (w, ev_rt_now);
2451 root 1.77 else if (w->interval)
2452 root 1.370 periodic_recalc (EV_A_ w);
2453 root 1.242
2454 root 1.248 ANHE_at_cache (periodics [i]);
2455 root 1.77 }
2456 root 1.12
2457 root 1.248 reheap (periodics, periodiccnt);
2458 root 1.1 }
2459 root 1.93 #endif
2460 root 1.1
2461 root 1.288 /* adjust all timers by a given offset */
2462 root 1.379 static void noinline ecb_cold
2463 root 1.285 timers_reschedule (EV_P_ ev_tstamp adjust)
2464     {
2465     int i;
2466    
2467     for (i = 0; i < timercnt; ++i)
2468     {
2469     ANHE *he = timers + i + HEAP0;
2470     ANHE_w (*he)->at += adjust;
2471     ANHE_at_cache (*he);
2472     }
2473     }
2474    
2475 root 1.288 /* fetch new monotonic and realtime times from the kernel */
2476 root 1.324 /* also detect if there was a timejump, and act accordingly */
2477 root 1.284 inline_speed void
2478 root 1.178 time_update (EV_P_ ev_tstamp max_block)
2479 root 1.4 {
2480 root 1.40 #if EV_USE_MONOTONIC
2481     if (expect_true (have_monotonic))
2482     {
2483 root 1.289 int i;
2484 root 1.178 ev_tstamp odiff = rtmn_diff;
2485    
2486     mn_now = get_clock ();
2487    
2488     /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */
2489     /* interpolate in the meantime */
2490     if (expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5))
2491 root 1.40 {
2492 root 1.178 ev_rt_now = rtmn_diff + mn_now;
2493     return;
2494     }
2495    
2496     now_floor = mn_now;
2497     ev_rt_now = ev_time ();
2498 root 1.4
2499 root 1.178 /* loop a few times, before making important decisions.
2500     * on the choice of "4": one iteration isn't enough,
2501     * in case we get preempted during the calls to
2502     * ev_time and get_clock. a second call is almost guaranteed
2503     * to succeed in that case, though. and looping a few more times
2504     * doesn't hurt either as we only do this on time-jumps or
2505     * in the unlikely event of having been preempted here.
2506     */
2507     for (i = 4; --i; )
2508     {
2509 root 1.373 ev_tstamp diff;
2510 root 1.178 rtmn_diff = ev_rt_now - mn_now;
2511 root 1.4
2512 root 1.373 diff = odiff - rtmn_diff;
2513    
2514     if (expect_true ((diff < 0. ? -diff : diff) < MIN_TIMEJUMP))
2515 root 1.178 return; /* all is well */
2516 root 1.4
2517 root 1.178 ev_rt_now = ev_time ();
2518     mn_now = get_clock ();
2519     now_floor = mn_now;
2520     }
2521 root 1.4
2522 root 1.285 /* no timer adjustment, as the monotonic clock doesn't jump */
2523     /* timers_reschedule (EV_A_ rtmn_diff - odiff) */
2524 root 1.140 # if EV_PERIODIC_ENABLE
2525 root 1.178 periodics_reschedule (EV_A);
2526 root 1.93 # endif
2527 root 1.4 }
2528     else
2529 root 1.40 #endif
2530 root 1.4 {
2531 root 1.85 ev_rt_now = ev_time ();
2532 root 1.40
2533 root 1.178 if (expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + MIN_TIMEJUMP))
2534 root 1.13 {
2535 root 1.285 /* adjust timers. this is easy, as the offset is the same for all of them */
2536     timers_reschedule (EV_A_ ev_rt_now - mn_now);
2537 root 1.140 #if EV_PERIODIC_ENABLE
2538 root 1.54 periodics_reschedule (EV_A);
2539 root 1.93 #endif
2540 root 1.13 }
2541 root 1.4
2542 root 1.85 mn_now = ev_rt_now;
2543 root 1.4 }
2544     }
2545    
2546 root 1.51 void
2547 root 1.353 ev_run (EV_P_ int flags)
2548 root 1.1 {
2549 root 1.338 #if EV_FEATURE_API
2550 root 1.294 ++loop_depth;
2551 root 1.297 #endif
2552 root 1.294
2553 root 1.353 assert (("libev: ev_loop recursion during release detected", loop_done != EVBREAK_RECURSE));
2554 root 1.298
2555 root 1.353 loop_done = EVBREAK_CANCEL;
2556 root 1.1
2557 root 1.297 EV_INVOKE_PENDING; /* in case we recurse, ensure ordering stays nice and clean */
2558 root 1.158
2559 root 1.161 do
2560 root 1.9 {
2561 root 1.250 #if EV_VERIFY >= 2
2562 root 1.340 ev_verify (EV_A);
2563 root 1.250 #endif
2564    
2565 root 1.158 #ifndef _WIN32
2566     if (expect_false (curpid)) /* penalise the forking check even more */
2567     if (expect_false (getpid () != curpid))
2568     {
2569     curpid = getpid ();
2570     postfork = 1;
2571     }
2572     #endif
2573    
2574 root 1.157 #if EV_FORK_ENABLE
2575     /* we might have forked, so queue fork handlers */
2576     if (expect_false (postfork))
2577     if (forkcnt)
2578     {
2579     queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK);
2580 root 1.297 EV_INVOKE_PENDING;
2581 root 1.157 }
2582     #endif
2583 root 1.147
2584 root 1.337 #if EV_PREPARE_ENABLE
2585 root 1.170 /* queue prepare watchers (and execute them) */
2586 root 1.40 if (expect_false (preparecnt))
2587 root 1.20 {
2588 root 1.51 queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE);
2589 root 1.297 EV_INVOKE_PENDING;
2590 root 1.20 }
2591 root 1.337 #endif
2592 root 1.9
2593 root 1.298 if (expect_false (loop_done))
2594     break;
2595    
2596 root 1.70 /* we might have forked, so reify kernel state if necessary */
2597     if (expect_false (postfork))
2598     loop_fork (EV_A);
2599    
2600 root 1.1 /* update fd-related kernel structures */
2601 root 1.51 fd_reify (EV_A);
2602 root 1.1
2603     /* calculate blocking time */
2604 root 1.135 {
2605 root 1.193 ev_tstamp waittime = 0.;
2606     ev_tstamp sleeptime = 0.;
2607 root 1.12
2608 root 1.353 /* remember old timestamp for io_blocktime calculation */
2609     ev_tstamp prev_mn_now = mn_now;
2610 root 1.293
2611 root 1.353 /* update time to cancel out callback processing overhead */
2612     time_update (EV_A_ 1e100);
2613 root 1.135
2614 root 1.378 /* from now on, we want a pipe-wake-up */
2615     pipe_write_wanted = 1;
2616    
2617 root 1.384 ECB_MEMORY_FENCE; /* amke sure pipe_write_wanted is visible before we check for potential skips */
2618 root 1.383
2619 root 1.378 if (expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped)))
2620 root 1.353 {
2621 root 1.287 waittime = MAX_BLOCKTIME;
2622    
2623 root 1.135 if (timercnt)
2624     {
2625 root 1.377 ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now;
2626 root 1.193 if (waittime > to) waittime = to;
2627 root 1.135 }
2628 root 1.4
2629 root 1.140 #if EV_PERIODIC_ENABLE
2630 root 1.135 if (periodiccnt)
2631     {
2632 root 1.377 ev_tstamp to = ANHE_at (periodics [HEAP0]) - ev_rt_now;
2633 root 1.193 if (waittime > to) waittime = to;
2634 root 1.135 }
2635 root 1.93 #endif
2636 root 1.4
2637 root 1.293 /* don't let timeouts decrease the waittime below timeout_blocktime */
2638 root 1.193 if (expect_false (waittime < timeout_blocktime))
2639     waittime = timeout_blocktime;
2640    
2641 root 1.377 /* at this point, we NEED to wait, so we have to ensure */
2642     /* to pass a minimum nonzero value to the backend */
2643     if (expect_false (waittime < backend_mintime))
2644     waittime = backend_mintime;
2645    
2646 root 1.293 /* extra check because io_blocktime is commonly 0 */
2647     if (expect_false (io_blocktime))
2648     {
2649     sleeptime = io_blocktime - (mn_now - prev_mn_now);
2650 root 1.193
2651 root 1.376 if (sleeptime > waittime - backend_mintime)
2652     sleeptime = waittime - backend_mintime;
2653 root 1.193
2654 root 1.293 if (expect_true (sleeptime > 0.))
2655     {
2656     ev_sleep (sleeptime);
2657     waittime -= sleeptime;
2658     }
2659 root 1.193 }
2660 root 1.135 }
2661 root 1.1
2662 root 1.338 #if EV_FEATURE_API
2663 root 1.162 ++loop_count;
2664 root 1.297 #endif
2665 root 1.353 assert ((loop_done = EVBREAK_RECURSE, 1)); /* assert for side effect */
2666 root 1.193 backend_poll (EV_A_ waittime);
2667 root 1.353 assert ((loop_done = EVBREAK_CANCEL, 1)); /* assert for side effect */
2668 root 1.178
2669 root 1.384 pipe_write_wanted = 0; /* just an optimsiation, no fence needed */
2670 root 1.378
2671     if (pipe_write_skipped)
2672     {
2673     assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w)));
2674     ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
2675     }
2676    
2677    
2678 root 1.178 /* update ev_rt_now, do magic */
2679 root 1.193 time_update (EV_A_ waittime + sleeptime);
2680 root 1.135 }
2681 root 1.1
2682 root 1.9 /* queue pending timers and reschedule them */
2683 root 1.51 timers_reify (EV_A); /* relative timers called last */
2684 root 1.140 #if EV_PERIODIC_ENABLE
2685 root 1.51 periodics_reify (EV_A); /* absolute timers called first */
2686 root 1.93 #endif
2687 root 1.1
2688 root 1.164 #if EV_IDLE_ENABLE
2689 root 1.137 /* queue idle watchers unless other events are pending */
2690 root 1.164 idle_reify (EV_A);
2691     #endif
2692 root 1.9
2693 root 1.337 #if EV_CHECK_ENABLE
2694 root 1.20 /* queue check watchers, to be executed first */
2695 root 1.123 if (expect_false (checkcnt))
2696 root 1.51 queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK);
2697 root 1.337 #endif
2698 root 1.9
2699 root 1.297 EV_INVOKE_PENDING;
2700 root 1.1 }
2701 root 1.219 while (expect_true (
2702     activecnt
2703     && !loop_done
2704 root 1.353 && !(flags & (EVRUN_ONCE | EVRUN_NOWAIT))
2705 root 1.219 ));
2706 root 1.13
2707 root 1.353 if (loop_done == EVBREAK_ONE)
2708     loop_done = EVBREAK_CANCEL;
2709 root 1.294
2710 root 1.338 #if EV_FEATURE_API
2711 root 1.294 --loop_depth;
2712 root 1.297 #endif
2713 root 1.51 }
2714    
2715     void
2716 root 1.353 ev_break (EV_P_ int how)
2717 root 1.51 {
2718     loop_done = how;
2719 root 1.1 }
2720    
2721 root 1.285 void
2722     ev_ref (EV_P)
2723     {
2724     ++activecnt;
2725     }
2726    
2727     void
2728     ev_unref (EV_P)
2729     {
2730     --activecnt;
2731     }
2732    
2733     void
2734     ev_now_update (EV_P)
2735     {
2736     time_update (EV_A_ 1e100);
2737     }
2738    
2739     void
2740     ev_suspend (EV_P)
2741     {
2742     ev_now_update (EV_A);
2743     }
2744    
2745     void
2746     ev_resume (EV_P)
2747     {
2748     ev_tstamp mn_prev = mn_now;
2749    
2750     ev_now_update (EV_A);
2751     timers_reschedule (EV_A_ mn_now - mn_prev);
2752 root 1.286 #if EV_PERIODIC_ENABLE
2753 root 1.288 /* TODO: really do this? */
2754 root 1.285 periodics_reschedule (EV_A);
2755 root 1.286 #endif
2756 root 1.285 }
2757    
2758 root 1.8 /*****************************************************************************/
2759 root 1.288 /* singly-linked list management, used when the expected list length is short */
2760 root 1.8
2761 root 1.284 inline_size void
2762 root 1.10 wlist_add (WL *head, WL elem)
2763 root 1.1 {
2764     elem->next = *head;
2765     *head = elem;
2766     }
2767    
2768 root 1.284 inline_size void
2769 root 1.10 wlist_del (WL *head, WL elem)
2770 root 1.1 {
2771     while (*head)
2772     {
2773 root 1.307 if (expect_true (*head == elem))
2774 root 1.1 {
2775     *head = elem->next;
2776 root 1.307 break;
2777 root 1.1 }
2778    
2779     head = &(*head)->next;
2780     }
2781     }
2782    
2783 root 1.288 /* internal, faster, version of ev_clear_pending */
2784 root 1.284 inline_speed void
2785 root 1.166 clear_pending (EV_P_ W w)
2786 root 1.16 {
2787     if (w->pending)
2788     {
2789 root 1.288 pendings [ABSPRI (w)][w->pending - 1].w = (W)&pending_w;
2790 root 1.16 w->pending = 0;
2791     }
2792     }
2793    
2794 root 1.167 int
2795     ev_clear_pending (EV_P_ void *w)
2796 root 1.166 {
2797     W w_ = (W)w;
2798     int pending = w_->pending;
2799    
2800 root 1.172 if (expect_true (pending))
2801     {
2802     ANPENDING *p = pendings [ABSPRI (w_)] + pending - 1;
2803 root 1.288 p->w = (W)&pending_w;
2804 root 1.172 w_->pending = 0;
2805     return p->events;
2806     }
2807     else
2808 root 1.167 return 0;
2809 root 1.166 }
2810    
2811 root 1.284 inline_size void
2812 root 1.164 pri_adjust (EV_P_ W w)
2813     {
2814 root 1.295 int pri = ev_priority (w);
2815 root 1.164 pri = pri < EV_MINPRI ? EV_MINPRI : pri;
2816     pri = pri > EV_MAXPRI ? EV_MAXPRI : pri;
2817 root 1.295 ev_set_priority (w, pri);
2818 root 1.164 }
2819    
2820 root 1.284 inline_speed void
2821 root 1.51 ev_start (EV_P_ W w, int active)
2822 root 1.1 {
2823 root 1.164 pri_adjust (EV_A_ w);
2824 root 1.1 w->active = active;
2825 root 1.51 ev_ref (EV_A);
2826 root 1.1 }
2827    
2828 root 1.284 inline_size void
2829 root 1.51 ev_stop (EV_P_ W w)
2830 root 1.1 {
2831 root 1.51 ev_unref (EV_A);
2832 root 1.1 w->active = 0;
2833     }
2834    
2835 root 1.8 /*****************************************************************************/
2836    
2837 root 1.171 void noinline
2838 root 1.136 ev_io_start (EV_P_ ev_io *w)
2839 root 1.1 {
2840 root 1.37 int fd = w->fd;
2841    
2842 root 1.123 if (expect_false (ev_is_active (w)))
2843 root 1.1 return;
2844    
2845 root 1.278 assert (("libev: ev_io_start called with negative fd", fd >= 0));
2846 root 1.327 assert (("libev: ev_io_start called with illegal event mask", !(w->events & ~(EV__IOFDSET | EV_READ | EV_WRITE))));
2847 root 1.33
2848 root 1.248 EV_FREQUENT_CHECK;
2849    
2850 root 1.51 ev_start (EV_A_ (W)w, 1);
2851 root 1.265 array_needsize (ANFD, anfds, anfdmax, fd + 1, array_init_zero);
2852 root 1.182 wlist_add (&anfds[fd].head, (WL)w);
2853 root 1.1
2854 root 1.298 fd_change (EV_A_ fd, w->events & EV__IOFDSET | EV_ANFD_REIFY);
2855 root 1.281 w->events &= ~EV__IOFDSET;
2856 root 1.248
2857     EV_FREQUENT_CHECK;
2858 root 1.1 }
2859    
2860 root 1.171 void noinline
2861 root 1.136 ev_io_stop (EV_P_ ev_io *w)
2862 root 1.1 {
2863 root 1.166 clear_pending (EV_A_ (W)w);
2864 root 1.123 if (expect_false (!ev_is_active (w)))
2865 root 1.1 return;
2866    
2867 root 1.278 assert (("libev: ev_io_stop called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax));
2868 root 1.89
2869 root 1.248 EV_FREQUENT_CHECK;
2870    
2871 root 1.182 wlist_del (&anfds[w->fd].head, (WL)w);
2872 root 1.51 ev_stop (EV_A_ (W)w);
2873 root 1.1
2874 root 1.350 fd_change (EV_A_ w->fd, EV_ANFD_REIFY);
2875 root 1.248
2876     EV_FREQUENT_CHECK;
2877 root 1.1 }
2878    
2879 root 1.171 void noinline
2880 root 1.136 ev_timer_start (EV_P_ ev_timer *w)
2881 root 1.1 {
2882 root 1.123 if (expect_false (ev_is_active (w)))
2883 root 1.1 return;
2884    
2885 root 1.228 ev_at (w) += mn_now;
2886 root 1.12
2887 root 1.278 assert (("libev: ev_timer_start called with negative timer repeat value", w->repeat >= 0.));
2888 root 1.13
2889 root 1.248 EV_FREQUENT_CHECK;
2890    
2891     ++timercnt;
2892     ev_start (EV_A_ (W)w, timercnt + HEAP0 - 1);
2893 root 1.241 array_needsize (ANHE, timers, timermax, ev_active (w) + 1, EMPTY2);
2894     ANHE_w (timers [ev_active (w)]) = (WT)w;
2895 root 1.248 ANHE_at_cache (timers [ev_active (w)]);
2896 root 1.235 upheap (timers, ev_active (w));
2897 root 1.62
2898 root 1.248 EV_FREQUENT_CHECK;
2899    
2900 root 1.278 /*assert (("libev: internal timer heap corruption", timers [ev_active (w)] == (WT)w));*/
2901 root 1.12 }
2902    
2903 root 1.171 void noinline
2904 root 1.136 ev_timer_stop (EV_P_ ev_timer *w)
2905 root 1.12 {
2906 root 1.166 clear_pending (EV_A_ (W)w);
2907 root 1.123 if (expect_false (!ev_is_active (w)))
2908 root 1.12 return;
2909    
2910 root 1.248 EV_FREQUENT_CHECK;
2911    
2912 root 1.230 {
2913     int active = ev_active (w);
2914 root 1.62
2915 root 1.278 assert (("libev: internal timer heap corruption", ANHE_w (timers [active]) == (WT)w));
2916 root 1.151
2917 root 1.248 --timercnt;
2918    
2919     if (expect_true (active < timercnt + HEAP0))
2920 root 1.151 {
2921 root 1.248 timers [active] = timers [timercnt + HEAP0];
2922 root 1.181 adjustheap (timers, timercnt, active);
2923 root 1.151 }
2924 root 1.248 }
2925 root 1.228
2926     ev_at (w) -= mn_now;
2927 root 1.14
2928 root 1.51 ev_stop (EV_A_ (W)w);
2929 root 1.328
2930     EV_FREQUENT_CHECK;
2931 root 1.12 }
2932 root 1.4
2933 root 1.171 void noinline
2934 root 1.136 ev_timer_again (EV_P_ ev_timer *w)
2935 root 1.14 {
2936 root 1.248 EV_FREQUENT_CHECK;
2937    
2938 root 1.14 if (ev_is_active (w))
2939     {
2940     if (w->repeat)
2941 root 1.99 {
2942 root 1.228 ev_at (w) = mn_now + w->repeat;
2943 root 1.248 ANHE_at_cache (timers [ev_active (w)]);
2944 root 1.230 adjustheap (timers, timercnt, ev_active (w));
2945 root 1.99 }
2946 root 1.14 else
2947 root 1.51 ev_timer_stop (EV_A_ w);
2948 root 1.14 }
2949     else if (w->repeat)
2950 root 1.112 {
2951 root 1.229 ev_at (w) = w->repeat;
2952 root 1.112 ev_timer_start (EV_A_ w);
2953     }
2954 root 1.248
2955     EV_FREQUENT_CHECK;
2956 root 1.14 }
2957    
2958 root 1.301 ev_tstamp
2959     ev_timer_remaining (EV_P_ ev_timer *w)
2960     {
2961     return ev_at (w) - (ev_is_active (w) ? mn_now : 0.);
2962     }
2963    
2964 root 1.140 #if EV_PERIODIC_ENABLE
2965 root 1.171 void noinline
2966 root 1.136 ev_periodic_start (EV_P_ ev_periodic *w)
2967 root 1.12 {
2968 root 1.123 if (expect_false (ev_is_active (w)))
2969 root 1.12 return;
2970 root 1.1
2971 root 1.77 if (w->reschedule_cb)
2972 root 1.228 ev_at (w) = w->reschedule_cb (w, ev_rt_now);
2973 root 1.77 else if (w->interval)
2974     {
2975 root 1.278 assert (("libev: ev_periodic_start called with negative interval value", w->interval >= 0.));
2976 root 1.370 periodic_recalc (EV_A_ w);
2977 root 1.77 }
2978 root 1.173 else
2979 root 1.228 ev_at (w) = w->offset;
2980 root 1.12
2981 root 1.248 EV_FREQUENT_CHECK;
2982    
2983     ++periodiccnt;
2984     ev_start (EV_A_ (W)w, periodiccnt + HEAP0 - 1);
2985 root 1.241 array_needsize (ANHE, periodics, periodicmax, ev_active (w) + 1, EMPTY2);
2986     ANHE_w (periodics [ev_active (w)]) = (WT)w;
2987 root 1.248 ANHE_at_cache (periodics [ev_active (w)]);
2988 root 1.235 upheap (periodics, ev_active (w));
2989 root 1.62
2990 root 1.248 EV_FREQUENT_CHECK;
2991    
2992 root 1.278 /*assert (("libev: internal periodic heap corruption", ANHE_w (periodics [ev_active (w)]) == (WT)w));*/
2993 root 1.1 }
2994    
2995 root 1.171 void noinline
2996 root 1.136 ev_periodic_stop (EV_P_ ev_periodic *w)
2997 root 1.1 {
2998 root 1.166 clear_pending (EV_A_ (W)w);
2999 root 1.123 if (expect_false (!ev_is_active (w)))
3000 root 1.1 return;
3001    
3002 root 1.248 EV_FREQUENT_CHECK;
3003    
3004 root 1.230 {
3005     int active = ev_active (w);
3006 root 1.62
3007 root 1.278 assert (("libev: internal periodic heap corruption", ANHE_w (periodics [active]) == (WT)w));
3008 root 1.151
3009 root 1.248 --periodiccnt;
3010    
3011     if (expect_true (active < periodiccnt + HEAP0))
3012 root 1.151 {
3013 root 1.248 periodics [active] = periodics [periodiccnt + HEAP0];
3014 root 1.181 adjustheap (periodics, periodiccnt, active);
3015 root 1.151 }
3016 root 1.248 }
3017 root 1.228
3018 root 1.328 ev_stop (EV_A_ (W)w);
3019    
3020 root 1.248 EV_FREQUENT_CHECK;
3021 root 1.1 }
3022    
3023 root 1.171 void noinline
3024 root 1.136 ev_periodic_again (EV_P_ ev_periodic *w)
3025 root 1.77 {
3026 root 1.84 /* TODO: use adjustheap and recalculation */
3027 root 1.77 ev_periodic_stop (EV_A_ w);
3028     ev_periodic_start (EV_A_ w);
3029     }
3030 root 1.93 #endif
3031 root 1.77
3032 root 1.56 #ifndef SA_RESTART
3033     # define SA_RESTART 0
3034     #endif
3035    
3036 root 1.336 #if EV_SIGNAL_ENABLE
3037    
3038 root 1.171 void noinline
3039 root 1.136 ev_signal_start (EV_P_ ev_signal *w)
3040 root 1.56 {
3041 root 1.123 if (expect_false (ev_is_active (w)))
3042 root 1.56 return;
3043    
3044 root 1.306 assert (("libev: ev_signal_start called with illegal signal number", w->signum > 0 && w->signum < EV_NSIG));
3045    
3046     #if EV_MULTIPLICITY
3047 root 1.308 assert (("libev: a signal must not be attached to two different loops",
3048 root 1.306 !signals [w->signum - 1].loop || signals [w->signum - 1].loop == loop));
3049    
3050     signals [w->signum - 1].loop = EV_A;
3051     #endif
3052 root 1.56
3053 root 1.303 EV_FREQUENT_CHECK;
3054    
3055     #if EV_USE_SIGNALFD
3056     if (sigfd == -2)
3057     {
3058     sigfd = signalfd (-1, &sigfd_set, SFD_NONBLOCK | SFD_CLOEXEC);
3059     if (sigfd < 0 && errno == EINVAL)
3060     sigfd = signalfd (-1, &sigfd_set, 0); /* retry without flags */
3061    
3062     if (sigfd >= 0)
3063     {
3064     fd_intern (sigfd); /* doing it twice will not hurt */
3065    
3066     sigemptyset (&sigfd_set);
3067    
3068     ev_io_init (&sigfd_w, sigfdcb, sigfd, EV_READ);
3069     ev_set_priority (&sigfd_w, EV_MAXPRI);
3070     ev_io_start (EV_A_ &sigfd_w);
3071     ev_unref (EV_A); /* signalfd watcher should not keep loop alive */
3072     }
3073     }
3074    
3075     if (sigfd >= 0)
3076     {
3077     /* TODO: check .head */
3078     sigaddset (&sigfd_set, w->signum);
3079     sigprocmask (SIG_BLOCK, &sigfd_set, 0);
3080 root 1.207
3081 root 1.303 signalfd (sigfd, &sigfd_set, 0);
3082     }
3083 root 1.180 #endif
3084    
3085 root 1.56 ev_start (EV_A_ (W)w, 1);
3086 root 1.182 wlist_add (&signals [w->signum - 1].head, (WL)w);
3087 root 1.56
3088 root 1.63 if (!((WL)w)->next)
3089 root 1.304 # if EV_USE_SIGNALFD
3090 root 1.306 if (sigfd < 0) /*TODO*/
3091 root 1.304 # endif
3092 root 1.306 {
3093 root 1.322 # ifdef _WIN32
3094 root 1.317 evpipe_init (EV_A);
3095    
3096 root 1.306 signal (w->signum, ev_sighandler);
3097     # else
3098     struct sigaction sa;
3099    
3100     evpipe_init (EV_A);
3101    
3102     sa.sa_handler = ev_sighandler;
3103     sigfillset (&sa.sa_mask);
3104     sa.sa_flags = SA_RESTART; /* if restarting works we save one iteration */
3105     sigaction (w->signum, &sa, 0);
3106    
3107 root 1.366 if (origflags & EVFLAG_NOSIGMASK)
3108     {
3109     sigemptyset (&sa.sa_mask);
3110     sigaddset (&sa.sa_mask, w->signum);
3111     sigprocmask (SIG_UNBLOCK, &sa.sa_mask, 0);
3112     }
3113 root 1.67 #endif
3114 root 1.306 }
3115 root 1.248
3116     EV_FREQUENT_CHECK;
3117 root 1.56 }
3118    
3119 root 1.171 void noinline
3120 root 1.136 ev_signal_stop (EV_P_ ev_signal *w)
3121 root 1.56 {
3122 root 1.166 clear_pending (EV_A_ (W)w);
3123 root 1.123 if (expect_false (!ev_is_active (w)))
3124 root 1.56 return;
3125    
3126 root 1.248 EV_FREQUENT_CHECK;
3127    
3128 root 1.182 wlist_del (&signals [w->signum - 1].head, (WL)w);
3129 root 1.56 ev_stop (EV_A_ (W)w);
3130    
3131     if (!signals [w->signum - 1].head)
3132 root 1.306 {
3133 root 1.307 #if EV_MULTIPLICITY
3134 root 1.306 signals [w->signum - 1].loop = 0; /* unattach from signal */
3135 root 1.307 #endif
3136     #if EV_USE_SIGNALFD
3137 root 1.306 if (sigfd >= 0)
3138     {
3139 root 1.321 sigset_t ss;
3140    
3141     sigemptyset (&ss);
3142     sigaddset (&ss, w->signum);
3143 root 1.306 sigdelset (&sigfd_set, w->signum);
3144 root 1.321
3145 root 1.306 signalfd (sigfd, &sigfd_set, 0);
3146 root 1.321 sigprocmask (SIG_UNBLOCK, &ss, 0);
3147 root 1.306 }
3148     else
3149 root 1.307 #endif
3150 root 1.306 signal (w->signum, SIG_DFL);
3151     }
3152 root 1.248
3153     EV_FREQUENT_CHECK;
3154 root 1.56 }
3155    
3156 root 1.336 #endif
3157    
3158     #if EV_CHILD_ENABLE
3159    
3160 root 1.28 void
3161 root 1.136 ev_child_start (EV_P_ ev_child *w)
3162 root 1.22 {
3163 root 1.56 #if EV_MULTIPLICITY
3164 root 1.278 assert (("libev: child watchers are only supported in the default loop", loop == ev_default_loop_ptr));
3165 root 1.56 #endif
3166 root 1.123 if (expect_false (ev_is_active (w)))
3167 root 1.22 return;
3168    
3169 root 1.248 EV_FREQUENT_CHECK;
3170    
3171 root 1.51 ev_start (EV_A_ (W)w, 1);
3172 root 1.338 wlist_add (&childs [w->pid & ((EV_PID_HASHSIZE) - 1)], (WL)w);
3173 root 1.248
3174     EV_FREQUENT_CHECK;
3175 root 1.22 }
3176    
3177 root 1.28 void
3178 root 1.136 ev_child_stop (EV_P_ ev_child *w)
3179 root 1.22 {
3180 root 1.166 clear_pending (EV_A_ (W)w);
3181 root 1.123 if (expect_false (!ev_is_active (w)))
3182 root 1.22 return;
3183    
3184 root 1.248 EV_FREQUENT_CHECK;
3185    
3186 root 1.338 wlist_del (&childs [w->pid & ((EV_PID_HASHSIZE) - 1)], (WL)w);
3187 root 1.51 ev_stop (EV_A_ (W)w);
3188 root 1.248
3189     EV_FREQUENT_CHECK;
3190 root 1.22 }
3191    
3192 root 1.336 #endif
3193    
3194 root 1.140 #if EV_STAT_ENABLE
3195    
3196     # ifdef _WIN32
3197 root 1.146 # undef lstat
3198     # define lstat(a,b) _stati64 (a,b)
3199 root 1.140 # endif
3200    
3201 root 1.273 #define DEF_STAT_INTERVAL 5.0074891
3202     #define NFS_STAT_INTERVAL 30.1074891 /* for filesystems potentially failing inotify */
3203     #define MIN_STAT_INTERVAL 0.1074891
3204 root 1.143
3205 root 1.157 static void noinline stat_timer_cb (EV_P_ ev_timer *w_, int revents);
3206 root 1.152
3207     #if EV_USE_INOTIFY
3208 root 1.326
3209     /* the * 2 is to allow for alignment padding, which for some reason is >> 8 */
3210     # define EV_INOTIFY_BUFSIZE (sizeof (struct inotify_event) * 2 + NAME_MAX)
3211 root 1.152
3212     static void noinline
3213     infy_add (EV_P_ ev_stat *w)
3214     {
3215     w->wd = inotify_add_watch (fs_fd, w->path, IN_ATTRIB | IN_DELETE_SELF | IN_MOVE_SELF | IN_MODIFY | IN_DONT_FOLLOW | IN_MASK_ADD);
3216    
3217 root 1.318 if (w->wd >= 0)
3218 root 1.152 {
3219 root 1.318 struct statfs sfs;
3220    
3221     /* now local changes will be tracked by inotify, but remote changes won't */
3222     /* unless the filesystem is known to be local, we therefore still poll */
3223     /* also do poll on <2.6.25, but with normal frequency */
3224    
3225     if (!fs_2625)
3226     w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL;
3227     else if (!statfs (w->path, &sfs)
3228     && (sfs.f_type == 0x1373 /* devfs */
3229     || sfs.f_type == 0xEF53 /* ext2/3 */
3230     || sfs.f_type == 0x3153464a /* jfs */
3231     || sfs.f_type == 0x52654973 /* reiser3 */
3232     || sfs.f_type == 0x01021994 /* tempfs */
3233     || sfs.f_type == 0x58465342 /* xfs */))
3234     w->timer.repeat = 0.; /* filesystem is local, kernel new enough */
3235     else
3236     w->timer.repeat = w->interval ? w->interval : NFS_STAT_INTERVAL; /* remote, use reduced frequency */
3237     }
3238     else
3239     {
3240     /* can't use inotify, continue to stat */
3241 root 1.273 w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL;
3242 root 1.152
3243 root 1.318 /* if path is not there, monitor some parent directory for speedup hints */
3244 root 1.271 /* note that exceeding the hardcoded path limit is not a correctness issue, */
3245 root 1.233 /* but an efficiency issue only */
3246 root 1.153 if ((errno == ENOENT || errno == EACCES) && strlen (w->path) < 4096)
3247 root 1.152 {
3248 root 1.153 char path [4096];
3249 root 1.152 strcpy (path, w->path);
3250    
3251     do
3252     {
3253     int mask = IN_MASK_ADD | IN_DELETE_SELF | IN_MOVE_SELF
3254     | (errno == EACCES ? IN_ATTRIB : IN_CREATE | IN_MOVED_TO);
3255    
3256     char *pend = strrchr (path, '/');
3257    
3258 root 1.275 if (!pend || pend == path)
3259     break;
3260 root 1.152
3261     *pend = 0;
3262 root 1.153 w->wd = inotify_add_watch (fs_fd, path, mask);
3263 root 1.372 }
3264 root 1.152 while (w->wd < 0 && (errno == ENOENT || errno == EACCES));
3265     }
3266     }
3267 root 1.275
3268     if (w->wd >= 0)
3269 root 1.338 wlist_add (&fs_hash [w->wd & ((EV_INOTIFY_HASHSIZE) - 1)].head, (WL)w);
3270 root 1.152
3271 root 1.318 /* now re-arm timer, if required */
3272     if (ev_is_active (&w->timer)) ev_ref (EV_A);
3273     ev_timer_again (EV_A_ &w->timer);
3274     if (ev_is_active (&w->timer)) ev_unref (EV_A);
3275 root 1.152 }
3276    
3277     static void noinline
3278     infy_del (EV_P_ ev_stat *w)
3279     {
3280     int slot;
3281     int wd = w->wd;
3282    
3283     if (wd < 0)
3284     return;
3285    
3286     w->wd = -2;
3287 root 1.338 slot = wd & ((EV_INOTIFY_HASHSIZE) - 1);
3288 root 1.152 wlist_del (&fs_hash [slot].head, (WL)w);
3289    
3290     /* remove this watcher, if others are watching it, they will rearm */
3291     inotify_rm_watch (fs_fd, wd);
3292     }
3293    
3294     static void noinline
3295     infy_wd (EV_P_ int slot, int wd, struct inotify_event *ev)
3296     {
3297     if (slot < 0)
3298 root 1.264 /* overflow, need to check for all hash slots */
3299 root 1.338 for (slot = 0; slot < (EV_INOTIFY_HASHSIZE); ++slot)
3300 root 1.152 infy_wd (EV_A_ slot, wd, ev);
3301     else
3302     {
3303     WL w_;
3304    
3305 root 1.338 for (w_ = fs_hash [slot & ((EV_INOTIFY_HASHSIZE) - 1)].head; w_; )
3306 root 1.152 {
3307     ev_stat *w = (ev_stat *)w_;
3308     w_ = w_->next; /* lets us remove this watcher and all before it */
3309    
3310     if (w->wd == wd || wd == -1)
3311     {
3312     if (ev->mask & (IN_IGNORED | IN_UNMOUNT | IN_DELETE_SELF))
3313     {
3314 root 1.338 wlist_del (&fs_hash [slot & ((EV_INOTIFY_HASHSIZE) - 1)].head, (WL)w);
3315 root 1.152 w->wd = -1;
3316     infy_add (EV_A_ w); /* re-add, no matter what */
3317     }
3318    
3319 root 1.153 stat_timer_cb (EV_A_ &w->timer, 0);
3320 root 1.152 }
3321     }
3322     }
3323     }
3324    
3325     static void
3326     infy_cb (EV_P_ ev_io *w, int revents)
3327     {
3328     char buf [EV_INOTIFY_BUFSIZE];
3329     int ofs;
3330     int len = read (fs_fd, buf, sizeof (buf));
3331    
3332 root 1.326 for (ofs = 0; ofs < len; )
3333     {
3334     struct inotify_event *ev = (struct inotify_event *)(buf + ofs);
3335     infy_wd (EV_A_ ev->wd, ev->wd, ev);
3336     ofs += sizeof (struct inotify_event) + ev->len;
3337     }
3338 root 1.152 }
3339    
3340 root 1.379 inline_size void ecb_cold
3341 root 1.330 ev_check_2625 (EV_P)
3342     {
3343     /* kernels < 2.6.25 are borked
3344     * http://www.ussg.indiana.edu/hypermail/linux/kernel/0711.3/1208.html
3345     */
3346     if (ev_linux_version () < 0x020619)
3347 root 1.273 return;
3348 root 1.264
3349 root 1.273 fs_2625 = 1;
3350     }
3351 root 1.264
3352 root 1.315 inline_size int
3353     infy_newfd (void)
3354     {
3355     #if defined (IN_CLOEXEC) && defined (IN_NONBLOCK)
3356     int fd = inotify_init1 (IN_CLOEXEC | IN_NONBLOCK);
3357     if (fd >= 0)
3358     return fd;
3359     #endif
3360     return inotify_init ();
3361     }
3362    
3363 root 1.284 inline_size void
3364 root 1.273 infy_init (EV_P)
3365     {
3366     if (fs_fd != -2)
3367     return;
3368 root 1.264
3369 root 1.273 fs_fd = -1;
3370 root 1.264
3371 root 1.330 ev_check_2625 (EV_A);
3372 root 1.264
3373 root 1.315 fs_fd = infy_newfd ();
3374 root 1.152
3375     if (fs_fd >= 0)
3376     {
3377 root 1.315 fd_intern (fs_fd);
3378 root 1.152 ev_io_init (&fs_w, infy_cb, fs_fd, EV_READ);
3379     ev_set_priority (&fs_w, EV_MAXPRI);
3380     ev_io_start (EV_A_ &fs_w);
3381 root 1.317 ev_unref (EV_A);
3382 root 1.152 }
3383     }
3384    
3385 root 1.284 inline_size void
3386 root 1.154 infy_fork (EV_P)
3387     {
3388     int slot;
3389    
3390     if (fs_fd < 0)
3391     return;
3392    
3393 root 1.317 ev_ref (EV_A);
3394 root 1.315 ev_io_stop (EV_A_ &fs_w);
3395 root 1.154 close (fs_fd);
3396 root 1.315 fs_fd = infy_newfd ();
3397    
3398     if (fs_fd >= 0)
3399     {
3400     fd_intern (fs_fd);
3401     ev_io_set (&fs_w, fs_fd, EV_READ);
3402     ev_io_start (EV_A_ &fs_w);
3403 root 1.317 ev_unref (EV_A);
3404 root 1.315 }
3405 root 1.154
3406 root 1.338 for (slot = 0; slot < (EV_INOTIFY_HASHSIZE); ++slot)
3407 root 1.154 {
3408     WL w_ = fs_hash [slot].head;
3409     fs_hash [slot].head = 0;
3410    
3411     while (w_)
3412     {
3413     ev_stat *w = (ev_stat *)w_;
3414     w_ = w_->next; /* lets us add this watcher */
3415    
3416     w->wd = -1;
3417    
3418     if (fs_fd >= 0)
3419     infy_add (EV_A_ w); /* re-add, no matter what */
3420     else
3421 root 1.318 {
3422     w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL;
3423     if (ev_is_active (&w->timer)) ev_ref (EV_A);
3424     ev_timer_again (EV_A_ &w->timer);
3425     if (ev_is_active (&w->timer)) ev_unref (EV_A);
3426     }
3427 root 1.154 }
3428     }
3429     }
3430    
3431 root 1.152 #endif
3432    
3433 root 1.255 #ifdef _WIN32
3434     # define EV_LSTAT(p,b) _stati64 (p, b)
3435     #else
3436     # define EV_LSTAT(p,b) lstat (p, b)
3437     #endif
3438    
3439 root 1.140 void
3440     ev_stat_stat (EV_P_ ev_stat *w)
3441     {
3442     if (lstat (w->path, &w->attr) < 0)
3443     w->attr.st_nlink = 0;
3444     else if (!w->attr.st_nlink)
3445     w->attr.st_nlink = 1;
3446     }
3447    
3448 root 1.157 static void noinline
3449 root 1.140 stat_timer_cb (EV_P_ ev_timer *w_, int revents)
3450     {
3451     ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer));
3452    
3453 root 1.320 ev_statdata prev = w->attr;
3454 root 1.140 ev_stat_stat (EV_A_ w);
3455    
3456 root 1.156 /* memcmp doesn't work on netbsd, they.... do stuff to their struct stat */
3457     if (
3458 root 1.320 prev.st_dev != w->attr.st_dev
3459     || prev.st_ino != w->attr.st_ino
3460     || prev.st_mode != w->attr.st_mode
3461     || prev.st_nlink != w->attr.st_nlink
3462     || prev.st_uid != w->attr.st_uid
3463     || prev.st_gid != w->attr.st_gid
3464     || prev.st_rdev != w->attr.st_rdev
3465     || prev.st_size != w->attr.st_size
3466     || prev.st_atime != w->attr.st_atime
3467     || prev.st_mtime != w->attr.st_mtime
3468     || prev.st_ctime != w->attr.st_ctime
3469 root 1.156 ) {
3470 root 1.320 /* we only update w->prev on actual differences */
3471     /* in case we test more often than invoke the callback, */
3472     /* to ensure that prev is always different to attr */
3473     w->prev = prev;
3474    
3475 root 1.152 #if EV_USE_INOTIFY
3476 root 1.264 if (fs_fd >= 0)
3477     {
3478     infy_del (EV_A_ w);
3479     infy_add (EV_A_ w);
3480     ev_stat_stat (EV_A_ w); /* avoid race... */
3481     }
3482 root 1.152 #endif
3483    
3484     ev_feed_event (EV_A_ w, EV_STAT);
3485     }
3486 root 1.140 }
3487    
3488     void
3489     ev_stat_start (EV_P_ ev_stat *w)
3490     {
3491     if (expect_false (ev_is_active (w)))
3492     return;
3493    
3494     ev_stat_stat (EV_A_ w);
3495    
3496 root 1.273 if (w->interval < MIN_STAT_INTERVAL && w->interval)
3497     w->interval = MIN_STAT_INTERVAL;
3498 root 1.143
3499 root 1.273 ev_timer_init (&w->timer, stat_timer_cb, 0., w->interval ? w->interval : DEF_STAT_INTERVAL);
3500 root 1.140 ev_set_priority (&w->timer, ev_priority (w));
3501 root 1.152
3502     #if EV_USE_INOTIFY
3503     infy_init (EV_A);
3504    
3505     if (fs_fd >= 0)
3506     infy_add (EV_A_ w);
3507     else
3508     #endif
3509 root 1.318 {
3510     ev_timer_again (EV_A_ &w->timer);
3511     ev_unref (EV_A);
3512     }
3513 root 1.140
3514     ev_start (EV_A_ (W)w, 1);
3515 root 1.248
3516     EV_FREQUENT_CHECK;
3517 root 1.140 }
3518    
3519     void
3520     ev_stat_stop (EV_P_ ev_stat *w)
3521     {
3522 root 1.166 clear_pending (EV_A_ (W)w);
3523 root 1.140 if (expect_false (!ev_is_active (w)))
3524     return;
3525    
3526 root 1.248 EV_FREQUENT_CHECK;
3527    
3528 root 1.152 #if EV_USE_INOTIFY
3529     infy_del (EV_A_ w);
3530     #endif
3531 root 1.318
3532     if (ev_is_active (&w->timer))
3533     {
3534     ev_ref (EV_A);
3535     ev_timer_stop (EV_A_ &w->timer);
3536     }
3537 root 1.140
3538 root 1.134 ev_stop (EV_A_ (W)w);
3539 root 1.248
3540     EV_FREQUENT_CHECK;
3541 root 1.134 }
3542     #endif
3543    
3544 root 1.164 #if EV_IDLE_ENABLE
3545 root 1.144 void
3546     ev_idle_start (EV_P_ ev_idle *w)
3547     {
3548     if (expect_false (ev_is_active (w)))
3549     return;
3550    
3551 root 1.164 pri_adjust (EV_A_ (W)w);
3552    
3553 root 1.248 EV_FREQUENT_CHECK;
3554    
3555 root 1.164 {
3556     int active = ++idlecnt [ABSPRI (w)];
3557    
3558     ++idleall;
3559     ev_start (EV_A_ (W)w, active);
3560    
3561     array_needsize (ev_idle *, idles [ABSPRI (w)], idlemax [ABSPRI (w)], active, EMPTY2);
3562     idles [ABSPRI (w)][active - 1] = w;
3563     }
3564 root 1.248
3565     EV_FREQUENT_CHECK;
3566 root 1.144 }
3567    
3568     void
3569     ev_idle_stop (EV_P_ ev_idle *w)
3570     {
3571 root 1.166 clear_pending (EV_A_ (W)w);
3572 root 1.144 if (expect_false (!ev_is_active (w)))
3573     return;
3574    
3575 root 1.248 EV_FREQUENT_CHECK;
3576    
3577 root 1.144 {
3578 root 1.230 int active = ev_active (w);
3579 root 1.164
3580     idles [ABSPRI (w)][active - 1] = idles [ABSPRI (w)][--idlecnt [ABSPRI (w)]];
3581 root 1.230 ev_active (idles [ABSPRI (w)][active - 1]) = active;
3582 root 1.164
3583     ev_stop (EV_A_ (W)w);
3584     --idleall;
3585 root 1.144 }
3586 root 1.248
3587     EV_FREQUENT_CHECK;
3588 root 1.144 }
3589 root 1.164 #endif
3590 root 1.144
3591 root 1.337 #if EV_PREPARE_ENABLE
3592 root 1.144 void
3593     ev_prepare_start (EV_P_ ev_prepare *w)
3594     {
3595     if (expect_false (ev_is_active (w)))
3596     return;
3597    
3598 root 1.248 EV_FREQUENT_CHECK;
3599    
3600 root 1.144 ev_start (EV_A_ (W)w, ++preparecnt);
3601     array_needsize (ev_prepare *, prepares, preparemax, preparecnt, EMPTY2);
3602     prepares [preparecnt - 1] = w;
3603 root 1.248
3604     EV_FREQUENT_CHECK;
3605 root 1.144 }
3606    
3607     void
3608     ev_prepare_stop (EV_P_ ev_prepare *w)
3609     {
3610 root 1.166 clear_pending (EV_A_ (W)w);
3611 root 1.144 if (expect_false (!ev_is_active (w)))
3612     return;
3613    
3614 root 1.248 EV_FREQUENT_CHECK;
3615    
3616 root 1.144 {
3617 root 1.230 int active = ev_active (w);
3618    
3619 root 1.144 prepares [active - 1] = prepares [--preparecnt];
3620 root 1.230 ev_active (prepares [active - 1]) = active;
3621 root 1.144 }
3622    
3623     ev_stop (EV_A_ (W)w);
3624 root 1.248
3625     EV_FREQUENT_CHECK;
3626 root 1.144 }
3627 root 1.337 #endif
3628 root 1.144
3629 root 1.337 #if EV_CHECK_ENABLE
3630 root 1.144 void
3631     ev_check_start (EV_P_ ev_check *w)
3632     {
3633     if (expect_false (ev_is_active (w)))
3634     return;
3635    
3636 root 1.248 EV_FREQUENT_CHECK;
3637    
3638 root 1.144 ev_start (EV_A_ (W)w, ++checkcnt);
3639     array_needsize (ev_check *, checks, checkmax, checkcnt, EMPTY2);
3640     checks [checkcnt - 1] = w;
3641 root 1.248
3642     EV_FREQUENT_CHECK;
3643 root 1.144 }
3644    
3645     void
3646     ev_check_stop (EV_P_ ev_check *w)
3647     {
3648 root 1.166 clear_pending (EV_A_ (W)w);
3649 root 1.144 if (expect_false (!ev_is_active (w)))
3650     return;
3651    
3652 root 1.248 EV_FREQUENT_CHECK;
3653    
3654 root 1.144 {
3655 root 1.230 int active = ev_active (w);
3656    
3657 root 1.144 checks [active - 1] = checks [--checkcnt];
3658 root 1.230 ev_active (checks [active - 1]) = active;
3659 root 1.144 }
3660    
3661     ev_stop (EV_A_ (W)w);
3662 root 1.248
3663     EV_FREQUENT_CHECK;
3664 root 1.144 }
3665 root 1.337 #endif
3666 root 1.144
3667     #if EV_EMBED_ENABLE
3668     void noinline
3669     ev_embed_sweep (EV_P_ ev_embed *w)
3670     {
3671 root 1.353 ev_run (w->other, EVRUN_NOWAIT);
3672 root 1.144 }
3673    
3674     static void
3675 root 1.189 embed_io_cb (EV_P_ ev_io *io, int revents)
3676 root 1.144 {
3677     ev_embed *w = (ev_embed *)(((char *)io) - offsetof (ev_embed, io));
3678    
3679     if (ev_cb (w))
3680     ev_feed_event (EV_A_ (W)w, EV_EMBED);
3681     else
3682 root 1.353 ev_run (w->other, EVRUN_NOWAIT);
3683 root 1.144 }
3684    
3685 root 1.189 static void
3686     embed_prepare_cb (EV_P_ ev_prepare *prepare, int revents)
3687     {
3688     ev_embed *w = (ev_embed *)(((char *)prepare) - offsetof (ev_embed, prepare));
3689    
3690 root 1.195 {
3691 root 1.306 EV_P = w->other;
3692 root 1.195
3693     while (fdchangecnt)
3694     {
3695     fd_reify (EV_A);
3696 root 1.353 ev_run (EV_A_ EVRUN_NOWAIT);
3697 root 1.195 }
3698     }
3699     }
3700    
3701 root 1.261 static void
3702     embed_fork_cb (EV_P_ ev_fork *fork_w, int revents)
3703     {
3704     ev_embed *w = (ev_embed *)(((char *)fork_w) - offsetof (ev_embed, fork));
3705    
3706 root 1.277 ev_embed_stop (EV_A_ w);
3707    
3708 root 1.261 {
3709 root 1.306 EV_P = w->other;
3710 root 1.261
3711     ev_loop_fork (EV_A);
3712 root 1.353 ev_run (EV_A_ EVRUN_NOWAIT);
3713 root 1.261 }
3714 root 1.277
3715     ev_embed_start (EV_A_ w);
3716 root 1.261 }
3717    
3718 root 1.195 #if 0
3719     static void
3720     embed_idle_cb (EV_P_ ev_idle *idle, int revents)
3721     {
3722     ev_idle_stop (EV_A_ idle);
3723 root 1.189 }
3724 root 1.195 #endif
3725 root 1.189
3726 root 1.144 void
3727     ev_embed_start (EV_P_ ev_embed *w)
3728     {
3729     if (expect_false (ev_is_active (w)))
3730     return;
3731    
3732     {
3733 root 1.306 EV_P = w->other;
3734 root 1.278 assert (("libev: loop to be embedded is not embeddable", backend & ev_embeddable_backends ()));
3735 root 1.191 ev_io_init (&w->io, embed_io_cb, backend_fd, EV_READ);
3736 root 1.144 }
3737    
3738 root 1.248 EV_FREQUENT_CHECK;
3739    
3740 root 1.144 ev_set_priority (&w->io, ev_priority (w));
3741     ev_io_start (EV_A_ &w->io);
3742    
3743 root 1.189 ev_prepare_init (&w->prepare, embed_prepare_cb);
3744     ev_set_priority (&w->prepare, EV_MINPRI);
3745     ev_prepare_start (EV_A_ &w->prepare);
3746    
3747 root 1.261 ev_fork_init (&w->fork, embed_fork_cb);
3748     ev_fork_start (EV_A_ &w->fork);
3749    
3750 root 1.195 /*ev_idle_init (&w->idle, e,bed_idle_cb);*/
3751    
3752 root 1.144 ev_start (EV_A_ (W)w, 1);
3753 root 1.248
3754     EV_FREQUENT_CHECK;
3755 root 1.144 }
3756    
3757     void
3758     ev_embed_stop (EV_P_ ev_embed *w)
3759     {
3760 root 1.166 clear_pending (EV_A_ (W)w);
3761 root 1.144 if (expect_false (!ev_is_active (w)))
3762     return;
3763    
3764 root 1.248 EV_FREQUENT_CHECK;
3765    
3766 root 1.261 ev_io_stop (EV_A_ &w->io);
3767 root 1.189 ev_prepare_stop (EV_A_ &w->prepare);
3768 root 1.261 ev_fork_stop (EV_A_ &w->fork);
3769 root 1.248
3770 root 1.328 ev_stop (EV_A_ (W)w);
3771    
3772 root 1.248 EV_FREQUENT_CHECK;
3773 root 1.144 }
3774     #endif
3775    
3776 root 1.147 #if EV_FORK_ENABLE
3777     void
3778     ev_fork_start (EV_P_ ev_fork *w)
3779     {
3780     if (expect_false (ev_is_active (w)))
3781     return;
3782    
3783 root 1.248 EV_FREQUENT_CHECK;
3784    
3785 root 1.147 ev_start (EV_A_ (W)w, ++forkcnt);
3786     array_needsize (ev_fork *, forks, forkmax, forkcnt, EMPTY2);
3787     forks [forkcnt - 1] = w;
3788 root 1.248
3789     EV_FREQUENT_CHECK;
3790 root 1.147 }
3791    
3792     void
3793     ev_fork_stop (EV_P_ ev_fork *w)
3794     {
3795 root 1.166 clear_pending (EV_A_ (W)w);
3796 root 1.147 if (expect_false (!ev_is_active (w)))
3797     return;
3798    
3799 root 1.248 EV_FREQUENT_CHECK;
3800    
3801 root 1.147 {
3802 root 1.230 int active = ev_active (w);
3803    
3804 root 1.147 forks [active - 1] = forks [--forkcnt];
3805 root 1.230 ev_active (forks [active - 1]) = active;
3806 root 1.147 }
3807    
3808     ev_stop (EV_A_ (W)w);
3809 root 1.248
3810     EV_FREQUENT_CHECK;
3811 root 1.147 }
3812     #endif
3813    
3814 root 1.360 #if EV_CLEANUP_ENABLE
3815     void
3816     ev_cleanup_start (EV_P_ ev_cleanup *w)
3817     {
3818     if (expect_false (ev_is_active (w)))
3819     return;
3820    
3821     EV_FREQUENT_CHECK;
3822    
3823     ev_start (EV_A_ (W)w, ++cleanupcnt);
3824     array_needsize (ev_cleanup *, cleanups, cleanupmax, cleanupcnt, EMPTY2);
3825     cleanups [cleanupcnt - 1] = w;
3826    
3827 root 1.362 /* cleanup watchers should never keep a refcount on the loop */
3828     ev_unref (EV_A);
3829 root 1.360 EV_FREQUENT_CHECK;
3830     }
3831    
3832     void
3833     ev_cleanup_stop (EV_P_ ev_cleanup *w)
3834     {
3835     clear_pending (EV_A_ (W)w);
3836     if (expect_false (!ev_is_active (w)))
3837     return;
3838    
3839     EV_FREQUENT_CHECK;
3840 root 1.362 ev_ref (EV_A);
3841 root 1.360
3842     {
3843     int active = ev_active (w);
3844    
3845     cleanups [active - 1] = cleanups [--cleanupcnt];
3846     ev_active (cleanups [active - 1]) = active;
3847     }
3848    
3849     ev_stop (EV_A_ (W)w);
3850    
3851     EV_FREQUENT_CHECK;
3852     }
3853     #endif
3854    
3855 root 1.207 #if EV_ASYNC_ENABLE
3856     void
3857     ev_async_start (EV_P_ ev_async *w)
3858     {
3859     if (expect_false (ev_is_active (w)))
3860     return;
3861    
3862 root 1.352 w->sent = 0;
3863    
3864 root 1.207 evpipe_init (EV_A);
3865    
3866 root 1.248 EV_FREQUENT_CHECK;
3867    
3868 root 1.207 ev_start (EV_A_ (W)w, ++asynccnt);
3869     array_needsize (ev_async *, asyncs, asyncmax, asynccnt, EMPTY2);
3870     asyncs [asynccnt - 1] = w;
3871 root 1.248
3872     EV_FREQUENT_CHECK;
3873 root 1.207 }
3874    
3875     void
3876     ev_async_stop (EV_P_ ev_async *w)
3877     {
3878     clear_pending (EV_A_ (W)w);
3879     if (expect_false (!ev_is_active (w)))
3880     return;
3881    
3882 root 1.248 EV_FREQUENT_CHECK;
3883    
3884 root 1.207 {
3885 root 1.230 int active = ev_active (w);
3886    
3887 root 1.207 asyncs [active - 1] = asyncs [--asynccnt];
3888 root 1.230 ev_active (asyncs [active - 1]) = active;
3889 root 1.207 }
3890    
3891     ev_stop (EV_A_ (W)w);
3892 root 1.248
3893     EV_FREQUENT_CHECK;
3894 root 1.207 }
3895    
3896     void
3897     ev_async_send (EV_P_ ev_async *w)
3898     {
3899     w->sent = 1;
3900 root 1.307 evpipe_write (EV_A_ &async_pending);
3901 root 1.207 }
3902     #endif
3903    
3904 root 1.1 /*****************************************************************************/
3905 root 1.10
3906 root 1.16 struct ev_once
3907     {
3908 root 1.136 ev_io io;
3909     ev_timer to;
3910 root 1.16 void (*cb)(int revents, void *arg);
3911     void *arg;
3912     };
3913    
3914     static void
3915 root 1.51 once_cb (EV_P_ struct ev_once *once, int revents)
3916 root 1.16 {
3917     void (*cb)(int revents, void *arg) = once->cb;
3918     void *arg = once->arg;
3919    
3920 root 1.259 ev_io_stop (EV_A_ &once->io);
3921 root 1.51 ev_timer_stop (EV_A_ &once->to);
3922 root 1.69 ev_free (once);
3923 root 1.16
3924     cb (revents, arg);
3925     }
3926    
3927     static void
3928 root 1.136 once_cb_io (EV_P_ ev_io *w, int revents)
3929 root 1.16 {
3930 root 1.262 struct ev_once *once = (struct ev_once *)(((char *)w) - offsetof (struct ev_once, io));
3931    
3932     once_cb (EV_A_ once, revents | ev_clear_pending (EV_A_ &once->to));
3933 root 1.16 }
3934    
3935     static void
3936 root 1.136 once_cb_to (EV_P_ ev_timer *w, int revents)
3937 root 1.16 {
3938 root 1.262 struct ev_once *once = (struct ev_once *)(((char *)w) - offsetof (struct ev_once, to));
3939    
3940     once_cb (EV_A_ once, revents | ev_clear_pending (EV_A_ &once->io));
3941 root 1.16 }
3942    
3943     void
3944 root 1.51 ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg)
3945 root 1.16 {
3946 root 1.74 struct ev_once *once = (struct ev_once *)ev_malloc (sizeof (struct ev_once));
3947 root 1.16
3948 root 1.123 if (expect_false (!once))
3949 root 1.16 {
3950 root 1.341 cb (EV_ERROR | EV_READ | EV_WRITE | EV_TIMER, arg);
3951 root 1.123 return;
3952     }
3953    
3954     once->cb = cb;
3955     once->arg = arg;
3956 root 1.16
3957 root 1.123 ev_init (&once->io, once_cb_io);
3958     if (fd >= 0)
3959     {
3960     ev_io_set (&once->io, fd, events);
3961     ev_io_start (EV_A_ &once->io);
3962     }
3963 root 1.16
3964 root 1.123 ev_init (&once->to, once_cb_to);
3965     if (timeout >= 0.)
3966     {
3967     ev_timer_set (&once->to, timeout, 0.);
3968     ev_timer_start (EV_A_ &once->to);
3969 root 1.16 }
3970     }
3971    
3972 root 1.282 /*****************************************************************************/
3973    
3974 root 1.288 #if EV_WALK_ENABLE
3975 root 1.379 void ecb_cold
3976 root 1.282 ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w))
3977     {
3978     int i, j;
3979     ev_watcher_list *wl, *wn;
3980    
3981     if (types & (EV_IO | EV_EMBED))
3982     for (i = 0; i < anfdmax; ++i)
3983     for (wl = anfds [i].head; wl; )
3984     {
3985     wn = wl->next;
3986    
3987     #if EV_EMBED_ENABLE
3988     if (ev_cb ((ev_io *)wl) == embed_io_cb)
3989     {
3990     if (types & EV_EMBED)
3991     cb (EV_A_ EV_EMBED, ((char *)wl) - offsetof (struct ev_embed, io));
3992     }
3993     else
3994     #endif
3995     #if EV_USE_INOTIFY
3996     if (ev_cb ((ev_io *)wl) == infy_cb)
3997     ;
3998     else
3999     #endif
4000 root 1.288 if ((ev_io *)wl != &pipe_w)
4001 root 1.282 if (types & EV_IO)
4002     cb (EV_A_ EV_IO, wl);
4003    
4004     wl = wn;
4005     }
4006    
4007     if (types & (EV_TIMER | EV_STAT))
4008     for (i = timercnt + HEAP0; i-- > HEAP0; )
4009     #if EV_STAT_ENABLE
4010     /*TODO: timer is not always active*/
4011     if (ev_cb ((ev_timer *)ANHE_w (timers [i])) == stat_timer_cb)
4012     {
4013     if (types & EV_STAT)
4014     cb (EV_A_ EV_STAT, ((char *)ANHE_w (timers [i])) - offsetof (struct ev_stat, timer));
4015     }
4016     else
4017     #endif
4018     if (types & EV_TIMER)
4019     cb (EV_A_ EV_TIMER, ANHE_w (timers [i]));
4020    
4021     #if EV_PERIODIC_ENABLE
4022     if (types & EV_PERIODIC)
4023     for (i = periodiccnt + HEAP0; i-- > HEAP0; )
4024     cb (EV_A_ EV_PERIODIC, ANHE_w (periodics [i]));
4025     #endif
4026    
4027     #if EV_IDLE_ENABLE
4028     if (types & EV_IDLE)
4029     for (j = NUMPRI; i--; )
4030     for (i = idlecnt [j]; i--; )
4031     cb (EV_A_ EV_IDLE, idles [j][i]);
4032     #endif
4033    
4034     #if EV_FORK_ENABLE
4035     if (types & EV_FORK)
4036     for (i = forkcnt; i--; )
4037     if (ev_cb (forks [i]) != embed_fork_cb)
4038     cb (EV_A_ EV_FORK, forks [i]);
4039     #endif
4040    
4041     #if EV_ASYNC_ENABLE
4042     if (types & EV_ASYNC)
4043     for (i = asynccnt; i--; )
4044     cb (EV_A_ EV_ASYNC, asyncs [i]);
4045     #endif
4046    
4047 root 1.337 #if EV_PREPARE_ENABLE
4048 root 1.282 if (types & EV_PREPARE)
4049     for (i = preparecnt; i--; )
4050 root 1.337 # if EV_EMBED_ENABLE
4051 root 1.282 if (ev_cb (prepares [i]) != embed_prepare_cb)
4052 root 1.337 # endif
4053     cb (EV_A_ EV_PREPARE, prepares [i]);
4054 root 1.282 #endif
4055    
4056 root 1.337 #if EV_CHECK_ENABLE
4057 root 1.282 if (types & EV_CHECK)
4058     for (i = checkcnt; i--; )
4059     cb (EV_A_ EV_CHECK, checks [i]);
4060 root 1.337 #endif
4061 root 1.282
4062 root 1.337 #if EV_SIGNAL_ENABLE
4063 root 1.282 if (types & EV_SIGNAL)
4064 root 1.306 for (i = 0; i < EV_NSIG - 1; ++i)
4065 root 1.282 for (wl = signals [i].head; wl; )
4066     {
4067     wn = wl->next;
4068     cb (EV_A_ EV_SIGNAL, wl);
4069     wl = wn;
4070     }
4071 root 1.337 #endif
4072 root 1.282
4073 root 1.337 #if EV_CHILD_ENABLE
4074 root 1.282 if (types & EV_CHILD)
4075 root 1.338 for (i = (EV_PID_HASHSIZE); i--; )
4076 root 1.282 for (wl = childs [i]; wl; )
4077     {
4078     wn = wl->next;
4079     cb (EV_A_ EV_CHILD, wl);
4080     wl = wn;
4081     }
4082 root 1.337 #endif
4083 root 1.282 /* EV_STAT 0x00001000 /* stat data changed */
4084     /* EV_EMBED 0x00010000 /* embedded event loop needs sweep */
4085     }
4086     #endif
4087    
4088 root 1.188 #if EV_MULTIPLICITY
4089     #include "ev_wrap.h"
4090     #endif
4091    
4092 root 1.354 EV_CPP(})
4093 root 1.87