ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev.c
Revision: 1.242
Committed: Fri May 9 14:07:19 2008 UTC (16 years ago) by root
Content type: text/plain
Branch: MAIN
Changes since 1.241: +12 -6 lines
Log Message:
*** empty log message ***

File Contents

# User Rev Content
1 root 1.17 /*
2 root 1.36 * libev event processing core, watcher management
3     *
4 root 1.207 * Copyright (c) 2007,2008 Marc Alexander Lehmann <libev@schmorp.de>
5 root 1.17 * All rights reserved.
6     *
7 root 1.199 * Redistribution and use in source and binary forms, with or without modifica-
8     * tion, are permitted provided that the following conditions are met:
9     *
10     * 1. Redistributions of source code must retain the above copyright notice,
11     * this list of conditions and the following disclaimer.
12     *
13     * 2. Redistributions in binary form must reproduce the above copyright
14     * notice, this list of conditions and the following disclaimer in the
15     * documentation and/or other materials provided with the distribution.
16     *
17     * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
18     * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
19     * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
20     * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
21     * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22     * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23     * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24     * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
25     * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
26     * OF THE POSSIBILITY OF SUCH DAMAGE.
27 root 1.17 *
28 root 1.199 * Alternatively, the contents of this file may be used under the terms of
29     * the GNU General Public License ("GPL") version 2 or any later version,
30     * in which case the provisions of the GPL are applicable instead of
31     * the above. If you wish to allow the use of your version of this file
32     * only under the terms of the GPL and not to allow others to use your
33     * version of this file under the BSD license, indicate your decision
34     * by deleting the provisions above and replace them with the notice
35     * and other provisions required by the GPL. If you do not delete the
36     * provisions above, a recipient may use your version of this file under
37     * either the BSD or the GPL.
38 root 1.17 */
39 root 1.87
40     #ifdef __cplusplus
41     extern "C" {
42     #endif
43    
44 root 1.220 /* this big block deduces configuration from config.h */
45 root 1.59 #ifndef EV_STANDALONE
46 root 1.133 # ifdef EV_CONFIG_H
47     # include EV_CONFIG_H
48     # else
49     # include "config.h"
50     # endif
51 root 1.60
52     # if HAVE_CLOCK_GETTIME
53 root 1.97 # ifndef EV_USE_MONOTONIC
54     # define EV_USE_MONOTONIC 1
55     # endif
56     # ifndef EV_USE_REALTIME
57     # define EV_USE_REALTIME 1
58     # endif
59 root 1.126 # else
60     # ifndef EV_USE_MONOTONIC
61     # define EV_USE_MONOTONIC 0
62     # endif
63     # ifndef EV_USE_REALTIME
64     # define EV_USE_REALTIME 0
65     # endif
66 root 1.60 # endif
67    
68 root 1.193 # ifndef EV_USE_NANOSLEEP
69     # if HAVE_NANOSLEEP
70     # define EV_USE_NANOSLEEP 1
71     # else
72     # define EV_USE_NANOSLEEP 0
73     # endif
74     # endif
75    
76 root 1.127 # ifndef EV_USE_SELECT
77     # if HAVE_SELECT && HAVE_SYS_SELECT_H
78     # define EV_USE_SELECT 1
79     # else
80     # define EV_USE_SELECT 0
81     # endif
82 root 1.60 # endif
83    
84 root 1.127 # ifndef EV_USE_POLL
85     # if HAVE_POLL && HAVE_POLL_H
86     # define EV_USE_POLL 1
87     # else
88     # define EV_USE_POLL 0
89     # endif
90 root 1.60 # endif
91 root 1.127
92     # ifndef EV_USE_EPOLL
93     # if HAVE_EPOLL_CTL && HAVE_SYS_EPOLL_H
94     # define EV_USE_EPOLL 1
95     # else
96     # define EV_USE_EPOLL 0
97     # endif
98 root 1.60 # endif
99 root 1.127
100     # ifndef EV_USE_KQUEUE
101     # if HAVE_KQUEUE && HAVE_SYS_EVENT_H && HAVE_SYS_QUEUE_H
102     # define EV_USE_KQUEUE 1
103     # else
104     # define EV_USE_KQUEUE 0
105     # endif
106 root 1.60 # endif
107 root 1.127
108     # ifndef EV_USE_PORT
109     # if HAVE_PORT_H && HAVE_PORT_CREATE
110     # define EV_USE_PORT 1
111     # else
112     # define EV_USE_PORT 0
113     # endif
114 root 1.118 # endif
115    
116 root 1.152 # ifndef EV_USE_INOTIFY
117     # if HAVE_INOTIFY_INIT && HAVE_SYS_INOTIFY_H
118     # define EV_USE_INOTIFY 1
119     # else
120     # define EV_USE_INOTIFY 0
121     # endif
122     # endif
123    
124 root 1.220 # ifndef EV_USE_EVENTFD
125     # if HAVE_EVENTFD
126     # define EV_USE_EVENTFD 1
127     # else
128     # define EV_USE_EVENTFD 0
129     # endif
130     # endif
131    
132 root 1.29 #endif
133 root 1.17
134 root 1.1 #include <math.h>
135     #include <stdlib.h>
136 root 1.7 #include <fcntl.h>
137 root 1.16 #include <stddef.h>
138 root 1.1
139     #include <stdio.h>
140    
141 root 1.4 #include <assert.h>
142 root 1.1 #include <errno.h>
143 root 1.22 #include <sys/types.h>
144 root 1.71 #include <time.h>
145    
146 root 1.72 #include <signal.h>
147 root 1.71
148 root 1.152 #ifdef EV_H
149     # include EV_H
150     #else
151     # include "ev.h"
152     #endif
153    
154 root 1.103 #ifndef _WIN32
155 root 1.71 # include <sys/time.h>
156 root 1.45 # include <sys/wait.h>
157 root 1.140 # include <unistd.h>
158 root 1.103 #else
159     # define WIN32_LEAN_AND_MEAN
160     # include <windows.h>
161     # ifndef EV_SELECT_IS_WINSOCKET
162     # define EV_SELECT_IS_WINSOCKET 1
163     # endif
164 root 1.45 #endif
165 root 1.103
166 root 1.220 /* this block tries to deduce configuration from header-defined symbols and defaults */
167 root 1.40
168 root 1.29 #ifndef EV_USE_MONOTONIC
169 root 1.121 # define EV_USE_MONOTONIC 0
170 root 1.37 #endif
171    
172 root 1.118 #ifndef EV_USE_REALTIME
173 root 1.121 # define EV_USE_REALTIME 0
174 root 1.118 #endif
175    
176 root 1.193 #ifndef EV_USE_NANOSLEEP
177     # define EV_USE_NANOSLEEP 0
178     #endif
179    
180 root 1.29 #ifndef EV_USE_SELECT
181     # define EV_USE_SELECT 1
182 root 1.10 #endif
183    
184 root 1.59 #ifndef EV_USE_POLL
185 root 1.104 # ifdef _WIN32
186     # define EV_USE_POLL 0
187     # else
188     # define EV_USE_POLL 1
189     # endif
190 root 1.41 #endif
191    
192 root 1.29 #ifndef EV_USE_EPOLL
193 root 1.220 # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))
194     # define EV_USE_EPOLL 1
195     # else
196     # define EV_USE_EPOLL 0
197     # endif
198 root 1.10 #endif
199    
200 root 1.44 #ifndef EV_USE_KQUEUE
201     # define EV_USE_KQUEUE 0
202     #endif
203    
204 root 1.118 #ifndef EV_USE_PORT
205     # define EV_USE_PORT 0
206 root 1.40 #endif
207    
208 root 1.152 #ifndef EV_USE_INOTIFY
209 root 1.220 # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))
210     # define EV_USE_INOTIFY 1
211     # else
212     # define EV_USE_INOTIFY 0
213     # endif
214 root 1.152 #endif
215    
216 root 1.149 #ifndef EV_PID_HASHSIZE
217     # if EV_MINIMAL
218     # define EV_PID_HASHSIZE 1
219     # else
220     # define EV_PID_HASHSIZE 16
221     # endif
222     #endif
223    
224 root 1.152 #ifndef EV_INOTIFY_HASHSIZE
225     # if EV_MINIMAL
226     # define EV_INOTIFY_HASHSIZE 1
227     # else
228     # define EV_INOTIFY_HASHSIZE 16
229     # endif
230     #endif
231    
232 root 1.220 #ifndef EV_USE_EVENTFD
233     # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7))
234     # define EV_USE_EVENTFD 1
235     # else
236     # define EV_USE_EVENTFD 0
237     # endif
238     #endif
239    
240     /* this block fixes any misconfiguration where we know we run into trouble otherwise */
241 root 1.40
242     #ifndef CLOCK_MONOTONIC
243     # undef EV_USE_MONOTONIC
244     # define EV_USE_MONOTONIC 0
245     #endif
246    
247 root 1.31 #ifndef CLOCK_REALTIME
248 root 1.40 # undef EV_USE_REALTIME
249 root 1.31 # define EV_USE_REALTIME 0
250     #endif
251 root 1.40
252 root 1.152 #if !EV_STAT_ENABLE
253 root 1.185 # undef EV_USE_INOTIFY
254 root 1.152 # define EV_USE_INOTIFY 0
255     #endif
256    
257 root 1.193 #if !EV_USE_NANOSLEEP
258     # ifndef _WIN32
259     # include <sys/select.h>
260     # endif
261     #endif
262    
263 root 1.152 #if EV_USE_INOTIFY
264     # include <sys/inotify.h>
265     #endif
266    
267 root 1.185 #if EV_SELECT_IS_WINSOCKET
268     # include <winsock.h>
269     #endif
270    
271 root 1.220 #if EV_USE_EVENTFD
272     /* our minimum requirement is glibc 2.7 which has the stub, but not the header */
273 root 1.221 # include <stdint.h>
274 root 1.222 # ifdef __cplusplus
275     extern "C" {
276     # endif
277 root 1.220 int eventfd (unsigned int initval, int flags);
278 root 1.222 # ifdef __cplusplus
279     }
280     # endif
281 root 1.220 #endif
282    
283 root 1.40 /**/
284 root 1.1
285 root 1.176 /*
286     * This is used to avoid floating point rounding problems.
287     * It is added to ev_rt_now when scheduling periodics
288     * to ensure progress, time-wise, even when rounding
289     * errors are against us.
290 root 1.177 * This value is good at least till the year 4000.
291 root 1.176 * Better solutions welcome.
292     */
293     #define TIME_EPSILON 0.0001220703125 /* 1/8192 */
294    
295 root 1.4 #define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
296 root 1.120 #define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
297 root 1.176 /*#define CLEANUP_INTERVAL (MAX_BLOCKTIME * 5.) /* how often to try to free memory and re-check fds, TODO */
298 root 1.1
299 root 1.185 #if __GNUC__ >= 4
300 root 1.40 # define expect(expr,value) __builtin_expect ((expr),(value))
301 root 1.169 # define noinline __attribute__ ((noinline))
302 root 1.40 #else
303     # define expect(expr,value) (expr)
304 root 1.140 # define noinline
305 root 1.223 # if __STDC_VERSION__ < 199901L && __GNUC__ < 2
306 root 1.169 # define inline
307     # endif
308 root 1.40 #endif
309    
310     #define expect_false(expr) expect ((expr) != 0, 0)
311     #define expect_true(expr) expect ((expr) != 0, 1)
312 root 1.169 #define inline_size static inline
313    
314     #if EV_MINIMAL
315     # define inline_speed static noinline
316     #else
317     # define inline_speed static inline
318     #endif
319 root 1.40
320 root 1.42 #define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)
321 root 1.164 #define ABSPRI(w) (((W)w)->priority - EV_MINPRI)
322 root 1.42
323 root 1.164 #define EMPTY /* required for microsofts broken pseudo-c compiler */
324 root 1.114 #define EMPTY2(a,b) /* used to suppress some warnings */
325 root 1.103
326 root 1.136 typedef ev_watcher *W;
327     typedef ev_watcher_list *WL;
328     typedef ev_watcher_time *WT;
329 root 1.10
330 root 1.229 #define ev_active(w) ((W)(w))->active
331 root 1.228 #define ev_at(w) ((WT)(w))->at
332    
333 root 1.198 #if EV_USE_MONOTONIC
334 root 1.194 /* sig_atomic_t is used to avoid per-thread variables or locking but still */
335     /* giving it a reasonably high chance of working on typical architetcures */
336 root 1.207 static EV_ATOMIC_T have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work? */
337 root 1.198 #endif
338 root 1.54
339 root 1.103 #ifdef _WIN32
340 root 1.98 # include "ev_win32.c"
341     #endif
342 root 1.67
343 root 1.53 /*****************************************************************************/
344 root 1.1
345 root 1.70 static void (*syserr_cb)(const char *msg);
346 root 1.69
347 root 1.141 void
348     ev_set_syserr_cb (void (*cb)(const char *msg))
349 root 1.69 {
350     syserr_cb = cb;
351     }
352    
353 root 1.141 static void noinline
354 root 1.70 syserr (const char *msg)
355 root 1.69 {
356 root 1.70 if (!msg)
357     msg = "(libev) system error";
358    
359 root 1.69 if (syserr_cb)
360 root 1.70 syserr_cb (msg);
361 root 1.69 else
362     {
363 root 1.70 perror (msg);
364 root 1.69 abort ();
365     }
366     }
367    
368 root 1.224 static void *
369     ev_realloc_emul (void *ptr, long size)
370     {
371     /* some systems, notably openbsd and darwin, fail to properly
372     * implement realloc (x, 0) (as required by both ansi c-98 and
373     * the single unix specification, so work around them here.
374     */
375    
376     if (size)
377     return realloc (ptr, size);
378    
379     free (ptr);
380     return 0;
381     }
382    
383     static void *(*alloc)(void *ptr, long size) = ev_realloc_emul;
384 root 1.69
385 root 1.141 void
386 root 1.155 ev_set_allocator (void *(*cb)(void *ptr, long size))
387 root 1.69 {
388     alloc = cb;
389     }
390    
391 root 1.150 inline_speed void *
392 root 1.155 ev_realloc (void *ptr, long size)
393 root 1.69 {
394 root 1.224 ptr = alloc (ptr, size);
395 root 1.69
396     if (!ptr && size)
397     {
398 root 1.155 fprintf (stderr, "libev: cannot allocate %ld bytes, aborting.", size);
399 root 1.69 abort ();
400     }
401    
402     return ptr;
403     }
404    
405     #define ev_malloc(size) ev_realloc (0, (size))
406     #define ev_free(ptr) ev_realloc ((ptr), 0)
407    
408     /*****************************************************************************/
409    
410 root 1.53 typedef struct
411     {
412 root 1.68 WL head;
413 root 1.53 unsigned char events;
414     unsigned char reify;
415 root 1.103 #if EV_SELECT_IS_WINSOCKET
416     SOCKET handle;
417     #endif
418 root 1.53 } ANFD;
419 root 1.1
420 root 1.53 typedef struct
421     {
422     W w;
423     int events;
424     } ANPENDING;
425 root 1.51
426 root 1.155 #if EV_USE_INOTIFY
427 root 1.241 /* hash table entry per inotify-id */
428 root 1.152 typedef struct
429     {
430     WL head;
431 root 1.155 } ANFS;
432 root 1.152 #endif
433    
434 root 1.241 /* Heap Entry */
435 root 1.242 #define EV_HEAP_CACHE_AT 0
436 root 1.241 #if EV_HEAP_CACHE_AT
437     typedef struct {
438     WT w;
439     ev_tstamp at;
440     } ANHE;
441    
442 root 1.242 #define ANHE_w(he) (he).w /* access watcher, read-write */
443     #define ANHE_at(he) (he).at /* access cached at, read-only */
444     #define ANHE_at_set(he) (he).at = (he).w->at /* update at from watcher */
445 root 1.241 #else
446     typedef WT ANHE;
447    
448     #define ANHE_w(he) (he)
449     #define ANHE_at(he) (he)->at
450     #define ANHE_at_set(he)
451     #endif
452    
453 root 1.55 #if EV_MULTIPLICITY
454 root 1.54
455 root 1.80 struct ev_loop
456     {
457 root 1.86 ev_tstamp ev_rt_now;
458 root 1.99 #define ev_rt_now ((loop)->ev_rt_now)
459 root 1.80 #define VAR(name,decl) decl;
460     #include "ev_vars.h"
461     #undef VAR
462     };
463     #include "ev_wrap.h"
464    
465 root 1.116 static struct ev_loop default_loop_struct;
466     struct ev_loop *ev_default_loop_ptr;
467 root 1.54
468 root 1.53 #else
469 root 1.54
470 root 1.86 ev_tstamp ev_rt_now;
471 root 1.80 #define VAR(name,decl) static decl;
472     #include "ev_vars.h"
473     #undef VAR
474    
475 root 1.116 static int ev_default_loop_ptr;
476 root 1.54
477 root 1.51 #endif
478 root 1.1
479 root 1.8 /*****************************************************************************/
480    
481 root 1.141 ev_tstamp
482 root 1.1 ev_time (void)
483     {
484 root 1.29 #if EV_USE_REALTIME
485 root 1.1 struct timespec ts;
486     clock_gettime (CLOCK_REALTIME, &ts);
487     return ts.tv_sec + ts.tv_nsec * 1e-9;
488     #else
489     struct timeval tv;
490     gettimeofday (&tv, 0);
491     return tv.tv_sec + tv.tv_usec * 1e-6;
492     #endif
493     }
494    
495 root 1.140 ev_tstamp inline_size
496 root 1.1 get_clock (void)
497     {
498 root 1.29 #if EV_USE_MONOTONIC
499 root 1.40 if (expect_true (have_monotonic))
500 root 1.1 {
501     struct timespec ts;
502     clock_gettime (CLOCK_MONOTONIC, &ts);
503     return ts.tv_sec + ts.tv_nsec * 1e-9;
504     }
505     #endif
506    
507     return ev_time ();
508     }
509    
510 root 1.85 #if EV_MULTIPLICITY
511 root 1.51 ev_tstamp
512     ev_now (EV_P)
513     {
514 root 1.85 return ev_rt_now;
515 root 1.51 }
516 root 1.85 #endif
517 root 1.51
518 root 1.193 void
519     ev_sleep (ev_tstamp delay)
520     {
521     if (delay > 0.)
522     {
523     #if EV_USE_NANOSLEEP
524     struct timespec ts;
525    
526     ts.tv_sec = (time_t)delay;
527     ts.tv_nsec = (long)((delay - (ev_tstamp)(ts.tv_sec)) * 1e9);
528    
529     nanosleep (&ts, 0);
530     #elif defined(_WIN32)
531 root 1.217 Sleep ((unsigned long)(delay * 1e3));
532 root 1.193 #else
533     struct timeval tv;
534    
535     tv.tv_sec = (time_t)delay;
536     tv.tv_usec = (long)((delay - (ev_tstamp)(tv.tv_sec)) * 1e6);
537    
538     select (0, 0, 0, 0, &tv);
539     #endif
540     }
541     }
542    
543     /*****************************************************************************/
544    
545 root 1.233 #define MALLOC_ROUND 4096 /* prefer to allocate in chunks of this size, must be 2**n and >> 4 longs */
546 root 1.232
547 root 1.163 int inline_size
548     array_nextsize (int elem, int cur, int cnt)
549     {
550     int ncur = cur + 1;
551    
552     do
553     ncur <<= 1;
554     while (cnt > ncur);
555    
556 root 1.232 /* if size is large, round to MALLOC_ROUND - 4 * longs to accomodate malloc overhead */
557     if (elem * ncur > MALLOC_ROUND - sizeof (void *) * 4)
558 root 1.163 {
559     ncur *= elem;
560 root 1.232 ncur = (ncur + elem + (MALLOC_ROUND - 1) + sizeof (void *) * 4) & ~(MALLOC_ROUND - 1);
561 root 1.163 ncur = ncur - sizeof (void *) * 4;
562     ncur /= elem;
563     }
564    
565     return ncur;
566     }
567    
568 root 1.171 static noinline void *
569 root 1.163 array_realloc (int elem, void *base, int *cur, int cnt)
570     {
571     *cur = array_nextsize (elem, *cur, cnt);
572     return ev_realloc (base, elem * *cur);
573     }
574 root 1.29
575 root 1.74 #define array_needsize(type,base,cur,cnt,init) \
576 root 1.163 if (expect_false ((cnt) > (cur))) \
577 root 1.69 { \
578 root 1.163 int ocur_ = (cur); \
579     (base) = (type *)array_realloc \
580     (sizeof (type), (base), &(cur), (cnt)); \
581     init ((base) + (ocur_), (cur) - ocur_); \
582 root 1.1 }
583    
584 root 1.163 #if 0
585 root 1.74 #define array_slim(type,stem) \
586 root 1.67 if (stem ## max < array_roundsize (stem ## cnt >> 2)) \
587     { \
588     stem ## max = array_roundsize (stem ## cnt >> 1); \
589 root 1.74 base = (type *)ev_realloc (base, sizeof (type) * (stem ## max));\
590 root 1.67 fprintf (stderr, "slimmed down " # stem " to %d\n", stem ## max);/*D*/\
591     }
592 root 1.163 #endif
593 root 1.67
594 root 1.65 #define array_free(stem, idx) \
595 root 1.69 ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0;
596 root 1.65
597 root 1.8 /*****************************************************************************/
598    
599 root 1.140 void noinline
600 root 1.78 ev_feed_event (EV_P_ void *w, int revents)
601 root 1.1 {
602 root 1.78 W w_ = (W)w;
603 root 1.171 int pri = ABSPRI (w_);
604 root 1.78
605 root 1.123 if (expect_false (w_->pending))
606 root 1.171 pendings [pri][w_->pending - 1].events |= revents;
607     else
608 root 1.32 {
609 root 1.171 w_->pending = ++pendingcnt [pri];
610     array_needsize (ANPENDING, pendings [pri], pendingmax [pri], w_->pending, EMPTY2);
611     pendings [pri][w_->pending - 1].w = w_;
612     pendings [pri][w_->pending - 1].events = revents;
613 root 1.32 }
614 root 1.1 }
615    
616 root 1.179 void inline_speed
617 root 1.51 queue_events (EV_P_ W *events, int eventcnt, int type)
618 root 1.27 {
619     int i;
620    
621     for (i = 0; i < eventcnt; ++i)
622 root 1.78 ev_feed_event (EV_A_ events [i], type);
623 root 1.27 }
624    
625 root 1.141 /*****************************************************************************/
626    
627     void inline_size
628     anfds_init (ANFD *base, int count)
629     {
630     while (count--)
631     {
632     base->head = 0;
633     base->events = EV_NONE;
634     base->reify = 0;
635    
636     ++base;
637     }
638     }
639    
640 root 1.140 void inline_speed
641 root 1.79 fd_event (EV_P_ int fd, int revents)
642 root 1.1 {
643     ANFD *anfd = anfds + fd;
644 root 1.136 ev_io *w;
645 root 1.1
646 root 1.136 for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next)
647 root 1.1 {
648 root 1.79 int ev = w->events & revents;
649 root 1.1
650     if (ev)
651 root 1.78 ev_feed_event (EV_A_ (W)w, ev);
652 root 1.1 }
653     }
654    
655 root 1.79 void
656     ev_feed_fd_event (EV_P_ int fd, int revents)
657     {
658 root 1.168 if (fd >= 0 && fd < anfdmax)
659     fd_event (EV_A_ fd, revents);
660 root 1.79 }
661    
662 root 1.140 void inline_size
663 root 1.51 fd_reify (EV_P)
664 root 1.9 {
665     int i;
666    
667 root 1.27 for (i = 0; i < fdchangecnt; ++i)
668     {
669     int fd = fdchanges [i];
670     ANFD *anfd = anfds + fd;
671 root 1.136 ev_io *w;
672 root 1.27
673 root 1.184 unsigned char events = 0;
674 root 1.27
675 root 1.136 for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next)
676 root 1.184 events |= (unsigned char)w->events;
677 root 1.27
678 root 1.103 #if EV_SELECT_IS_WINSOCKET
679     if (events)
680     {
681     unsigned long argp;
682 root 1.200 #ifdef EV_FD_TO_WIN32_HANDLE
683     anfd->handle = EV_FD_TO_WIN32_HANDLE (fd);
684     #else
685     anfd->handle = _get_osfhandle (fd);
686     #endif
687 root 1.103 assert (("libev only supports socket fds in this configuration", ioctlsocket (anfd->handle, FIONREAD, &argp) == 0));
688     }
689     #endif
690    
691 root 1.184 {
692     unsigned char o_events = anfd->events;
693     unsigned char o_reify = anfd->reify;
694    
695     anfd->reify = 0;
696     anfd->events = events;
697 root 1.27
698 root 1.184 if (o_events != events || o_reify & EV_IOFDSET)
699     backend_modify (EV_A_ fd, o_events, events);
700     }
701 root 1.27 }
702    
703     fdchangecnt = 0;
704     }
705    
706 root 1.140 void inline_size
707 root 1.183 fd_change (EV_P_ int fd, int flags)
708 root 1.27 {
709 root 1.183 unsigned char reify = anfds [fd].reify;
710 root 1.184 anfds [fd].reify |= flags;
711 root 1.27
712 root 1.183 if (expect_true (!reify))
713     {
714     ++fdchangecnt;
715     array_needsize (int, fdchanges, fdchangemax, fdchangecnt, EMPTY2);
716     fdchanges [fdchangecnt - 1] = fd;
717     }
718 root 1.9 }
719    
720 root 1.140 void inline_speed
721 root 1.51 fd_kill (EV_P_ int fd)
722 root 1.41 {
723 root 1.136 ev_io *w;
724 root 1.41
725 root 1.136 while ((w = (ev_io *)anfds [fd].head))
726 root 1.41 {
727 root 1.51 ev_io_stop (EV_A_ w);
728 root 1.78 ev_feed_event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE);
729 root 1.41 }
730     }
731    
732 root 1.140 int inline_size
733 root 1.71 fd_valid (int fd)
734     {
735 root 1.103 #ifdef _WIN32
736     return _get_osfhandle (fd) != -1;
737 root 1.71 #else
738     return fcntl (fd, F_GETFD) != -1;
739     #endif
740     }
741    
742 root 1.19 /* called on EBADF to verify fds */
743 root 1.140 static void noinline
744 root 1.51 fd_ebadf (EV_P)
745 root 1.19 {
746     int fd;
747    
748     for (fd = 0; fd < anfdmax; ++fd)
749 root 1.27 if (anfds [fd].events)
750 root 1.71 if (!fd_valid (fd) == -1 && errno == EBADF)
751 root 1.51 fd_kill (EV_A_ fd);
752 root 1.41 }
753    
754     /* called on ENOMEM in select/poll to kill some fds and retry */
755 root 1.140 static void noinline
756 root 1.51 fd_enomem (EV_P)
757 root 1.41 {
758 root 1.62 int fd;
759 root 1.41
760 root 1.62 for (fd = anfdmax; fd--; )
761 root 1.41 if (anfds [fd].events)
762     {
763 root 1.51 fd_kill (EV_A_ fd);
764 root 1.41 return;
765     }
766 root 1.19 }
767    
768 root 1.130 /* usually called after fork if backend needs to re-arm all fds from scratch */
769 root 1.140 static void noinline
770 root 1.56 fd_rearm_all (EV_P)
771     {
772     int fd;
773    
774     for (fd = 0; fd < anfdmax; ++fd)
775     if (anfds [fd].events)
776     {
777     anfds [fd].events = 0;
778 root 1.184 fd_change (EV_A_ fd, EV_IOFDSET | 1);
779 root 1.56 }
780     }
781    
782 root 1.8 /*****************************************************************************/
783    
784 root 1.235 /*
785 root 1.241 * the heap functions want a real array index. array index 0 uis guaranteed to not
786     * be in-use at any time. the first heap entry is at array [HEAP0]. DHEAP gives
787     * the branching factor of the d-tree.
788     */
789    
790     /*
791 root 1.235 * at the moment we allow libev the luxury of two heaps,
792     * a small-code-size 2-heap one and a ~1.5kb larger 4-heap
793     * which is more cache-efficient.
794     * the difference is about 5% with 50000+ watchers.
795     */
796 root 1.241 #define EV_USE_4HEAP !EV_MINIMAL
797     #if EV_USE_4HEAP
798 root 1.235
799 root 1.237 #define DHEAP 4
800     #define HEAP0 (DHEAP - 1) /* index of first element in heap */
801 root 1.235
802     /* towards the root */
803     void inline_speed
804 root 1.241 upheap (ANHE *heap, int k)
805 root 1.235 {
806 root 1.241 ANHE he = heap [k];
807 root 1.235
808     for (;;)
809     {
810 root 1.237 int p = ((k - HEAP0 - 1) / DHEAP) + HEAP0;
811 root 1.235
812 root 1.241 if (p == k || ANHE_at (heap [p]) <= ANHE_at (he))
813 root 1.235 break;
814    
815     heap [k] = heap [p];
816 root 1.241 ev_active (ANHE_w (heap [k])) = k;
817 root 1.235 k = p;
818     }
819    
820 root 1.241 ev_active (ANHE_w (he)) = k;
821     heap [k] = he;
822 root 1.235 }
823    
824     /* away from the root */
825     void inline_speed
826 root 1.241 downheap (ANHE *heap, int N, int k)
827 root 1.235 {
828 root 1.241 ANHE he = heap [k];
829     ANHE *E = heap + N + HEAP0;
830 root 1.235
831     for (;;)
832     {
833     ev_tstamp minat;
834 root 1.241 ANHE *minpos;
835     ANHE *pos = heap + DHEAP * (k - HEAP0) + HEAP0;
836 root 1.235
837     // find minimum child
838 root 1.237 if (expect_true (pos + DHEAP - 1 < E))
839 root 1.235 {
840 root 1.241 /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
841     if (ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos));
842     if (ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos));
843     if (ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos));
844 root 1.235 }
845 root 1.240 else if (pos < E)
846 root 1.235 {
847 root 1.241 /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
848     if (pos + 1 < E && ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos));
849     if (pos + 2 < E && ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos));
850     if (pos + 3 < E && ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos));
851 root 1.235 }
852 root 1.240 else
853     break;
854 root 1.235
855 root 1.241 if (ANHE_at (he) <= minat)
856 root 1.235 break;
857    
858 root 1.241 ev_active (ANHE_w (*minpos)) = k;
859 root 1.235 heap [k] = *minpos;
860    
861     k = minpos - heap;
862     }
863    
864 root 1.241 ev_active (ANHE_w (he)) = k;
865     heap [k] = he;
866 root 1.235 }
867    
868     #else // 4HEAP
869    
870     #define HEAP0 1
871    
872 root 1.227 /* towards the root */
873 root 1.140 void inline_speed
874 root 1.241 upheap (ANHE *heap, int k)
875 root 1.1 {
876 root 1.241 ANHE he = heap [k];
877 root 1.1
878 root 1.228 for (;;)
879 root 1.1 {
880 root 1.228 int p = k >> 1;
881 root 1.179
882 root 1.228 /* maybe we could use a dummy element at heap [0]? */
883 root 1.241 if (!p || ANHE_at (heap [p]) <= ANHE_at (he))
884 root 1.179 break;
885    
886     heap [k] = heap [p];
887 root 1.241 ev_active (ANHE_w (heap [k])) = k;
888 root 1.179 k = p;
889 root 1.1 }
890    
891 root 1.54 heap [k] = w;
892 root 1.241 ev_active (ANHE_w (heap [k])) = k;
893 root 1.1 }
894    
895 root 1.227 /* away from the root */
896 root 1.140 void inline_speed
897 root 1.241 downheap (ANHE *heap, int N, int k)
898 root 1.1 {
899 root 1.241 ANHE he = heap [k];
900 root 1.1
901 root 1.179 for (;;)
902 root 1.1 {
903 root 1.228 int c = k << 1;
904 root 1.179
905 root 1.228 if (c > N)
906 root 1.179 break;
907 root 1.1
908 root 1.241 c += c + 1 < N && ANHE_at (heap [c]) > ANHE_at (heap [c + 1])
909 root 1.179 ? 1 : 0;
910 root 1.1
911 root 1.241 if (w->at <= ANHE_at (heap [c]))
912 root 1.1 break;
913    
914 root 1.179 heap [k] = heap [c];
915 root 1.241 ev_active (ANHE_w (heap [k])) = k;
916 root 1.235
917 root 1.179 k = c;
918 root 1.1 }
919    
920 root 1.241 heap [k] = he;
921     ev_active (ANHE_w (he)) = k;
922 root 1.1 }
923 root 1.235 #endif
924 root 1.1
925 root 1.140 void inline_size
926 root 1.241 adjustheap (ANHE *heap, int N, int k)
927 root 1.84 {
928 root 1.99 upheap (heap, k);
929     downheap (heap, N, k);
930 root 1.84 }
931    
932 root 1.8 /*****************************************************************************/
933    
934 root 1.7 typedef struct
935     {
936 root 1.68 WL head;
937 root 1.207 EV_ATOMIC_T gotsig;
938 root 1.7 } ANSIG;
939    
940     static ANSIG *signals;
941 root 1.4 static int signalmax;
942 root 1.1
943 root 1.207 static EV_ATOMIC_T gotsig;
944 root 1.7
945 root 1.140 void inline_size
946 root 1.7 signals_init (ANSIG *base, int count)
947 root 1.1 {
948     while (count--)
949 root 1.7 {
950     base->head = 0;
951     base->gotsig = 0;
952 root 1.33
953 root 1.7 ++base;
954     }
955     }
956    
957 root 1.207 /*****************************************************************************/
958    
959     void inline_speed
960     fd_intern (int fd)
961     {
962     #ifdef _WIN32
963     int arg = 1;
964     ioctlsocket (_get_osfhandle (fd), FIONBIO, &arg);
965     #else
966     fcntl (fd, F_SETFD, FD_CLOEXEC);
967     fcntl (fd, F_SETFL, O_NONBLOCK);
968     #endif
969     }
970    
971     static void noinline
972     evpipe_init (EV_P)
973     {
974     if (!ev_is_active (&pipeev))
975     {
976 root 1.220 #if EV_USE_EVENTFD
977     if ((evfd = eventfd (0, 0)) >= 0)
978     {
979     evpipe [0] = -1;
980     fd_intern (evfd);
981     ev_io_set (&pipeev, evfd, EV_READ);
982     }
983     else
984     #endif
985     {
986     while (pipe (evpipe))
987     syserr ("(libev) error creating signal/async pipe");
988 root 1.207
989 root 1.220 fd_intern (evpipe [0]);
990     fd_intern (evpipe [1]);
991     ev_io_set (&pipeev, evpipe [0], EV_READ);
992     }
993 root 1.207
994     ev_io_start (EV_A_ &pipeev);
995 root 1.210 ev_unref (EV_A); /* watcher should not keep loop alive */
996 root 1.207 }
997     }
998    
999     void inline_size
1000 root 1.214 evpipe_write (EV_P_ EV_ATOMIC_T *flag)
1001 root 1.207 {
1002 root 1.214 if (!*flag)
1003 root 1.207 {
1004 ayin 1.215 int old_errno = errno; /* save errno because write might clobber it */
1005 root 1.214
1006     *flag = 1;
1007 root 1.220
1008     #if EV_USE_EVENTFD
1009     if (evfd >= 0)
1010     {
1011     uint64_t counter = 1;
1012     write (evfd, &counter, sizeof (uint64_t));
1013     }
1014     else
1015     #endif
1016     write (evpipe [1], &old_errno, 1);
1017 root 1.214
1018 root 1.207 errno = old_errno;
1019     }
1020     }
1021    
1022     static void
1023     pipecb (EV_P_ ev_io *iow, int revents)
1024     {
1025 root 1.220 #if EV_USE_EVENTFD
1026     if (evfd >= 0)
1027     {
1028 root 1.232 uint64_t counter;
1029 root 1.220 read (evfd, &counter, sizeof (uint64_t));
1030     }
1031     else
1032     #endif
1033     {
1034     char dummy;
1035     read (evpipe [0], &dummy, 1);
1036     }
1037 root 1.207
1038 root 1.211 if (gotsig && ev_is_default_loop (EV_A))
1039 root 1.207 {
1040     int signum;
1041     gotsig = 0;
1042    
1043     for (signum = signalmax; signum--; )
1044     if (signals [signum].gotsig)
1045     ev_feed_signal_event (EV_A_ signum + 1);
1046     }
1047    
1048 root 1.209 #if EV_ASYNC_ENABLE
1049 root 1.207 if (gotasync)
1050     {
1051     int i;
1052     gotasync = 0;
1053    
1054     for (i = asynccnt; i--; )
1055     if (asyncs [i]->sent)
1056     {
1057     asyncs [i]->sent = 0;
1058     ev_feed_event (EV_A_ asyncs [i], EV_ASYNC);
1059     }
1060     }
1061 root 1.209 #endif
1062 root 1.207 }
1063    
1064     /*****************************************************************************/
1065    
1066 root 1.7 static void
1067 root 1.218 ev_sighandler (int signum)
1068 root 1.7 {
1069 root 1.207 #if EV_MULTIPLICITY
1070     struct ev_loop *loop = &default_loop_struct;
1071     #endif
1072    
1073 root 1.103 #if _WIN32
1074 root 1.218 signal (signum, ev_sighandler);
1075 root 1.67 #endif
1076    
1077 root 1.7 signals [signum - 1].gotsig = 1;
1078 root 1.214 evpipe_write (EV_A_ &gotsig);
1079 root 1.7 }
1080    
1081 root 1.140 void noinline
1082 root 1.79 ev_feed_signal_event (EV_P_ int signum)
1083     {
1084 root 1.80 WL w;
1085    
1086 root 1.79 #if EV_MULTIPLICITY
1087 root 1.116 assert (("feeding signal events is only supported in the default loop", loop == ev_default_loop_ptr));
1088 root 1.79 #endif
1089    
1090     --signum;
1091    
1092     if (signum < 0 || signum >= signalmax)
1093     return;
1094    
1095     signals [signum].gotsig = 0;
1096    
1097     for (w = signals [signum].head; w; w = w->next)
1098     ev_feed_event (EV_A_ (W)w, EV_SIGNAL);
1099     }
1100    
1101 root 1.8 /*****************************************************************************/
1102    
1103 root 1.182 static WL childs [EV_PID_HASHSIZE];
1104 root 1.71
1105 root 1.103 #ifndef _WIN32
1106 root 1.45
1107 root 1.136 static ev_signal childev;
1108 root 1.59
1109 root 1.206 #ifndef WIFCONTINUED
1110     # define WIFCONTINUED(status) 0
1111     #endif
1112    
1113 root 1.140 void inline_speed
1114 root 1.216 child_reap (EV_P_ int chain, int pid, int status)
1115 root 1.47 {
1116 root 1.136 ev_child *w;
1117 root 1.206 int traced = WIFSTOPPED (status) || WIFCONTINUED (status);
1118 root 1.47
1119 root 1.149 for (w = (ev_child *)childs [chain & (EV_PID_HASHSIZE - 1)]; w; w = (ev_child *)((WL)w)->next)
1120 root 1.206 {
1121     if ((w->pid == pid || !w->pid)
1122     && (!traced || (w->flags & 1)))
1123     {
1124 root 1.216 ev_set_priority (w, EV_MAXPRI); /* need to do it *now*, this *must* be the same prio as the signal watcher itself */
1125 root 1.206 w->rpid = pid;
1126     w->rstatus = status;
1127     ev_feed_event (EV_A_ (W)w, EV_CHILD);
1128     }
1129     }
1130 root 1.47 }
1131    
1132 root 1.142 #ifndef WCONTINUED
1133     # define WCONTINUED 0
1134     #endif
1135    
1136 root 1.47 static void
1137 root 1.136 childcb (EV_P_ ev_signal *sw, int revents)
1138 root 1.22 {
1139     int pid, status;
1140    
1141 root 1.142 /* some systems define WCONTINUED but then fail to support it (linux 2.4) */
1142     if (0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED | WCONTINUED)))
1143     if (!WCONTINUED
1144     || errno != EINVAL
1145     || 0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED)))
1146     return;
1147    
1148 root 1.216 /* make sure we are called again until all children have been reaped */
1149 root 1.142 /* we need to do it this way so that the callback gets called before we continue */
1150     ev_feed_event (EV_A_ (W)sw, EV_SIGNAL);
1151 root 1.47
1152 root 1.216 child_reap (EV_A_ pid, pid, status);
1153 root 1.149 if (EV_PID_HASHSIZE > 1)
1154 root 1.216 child_reap (EV_A_ 0, pid, status); /* this might trigger a watcher twice, but feed_event catches that */
1155 root 1.22 }
1156    
1157 root 1.45 #endif
1158    
1159 root 1.22 /*****************************************************************************/
1160    
1161 root 1.118 #if EV_USE_PORT
1162     # include "ev_port.c"
1163     #endif
1164 root 1.44 #if EV_USE_KQUEUE
1165     # include "ev_kqueue.c"
1166     #endif
1167 root 1.29 #if EV_USE_EPOLL
1168 root 1.1 # include "ev_epoll.c"
1169     #endif
1170 root 1.59 #if EV_USE_POLL
1171 root 1.41 # include "ev_poll.c"
1172     #endif
1173 root 1.29 #if EV_USE_SELECT
1174 root 1.1 # include "ev_select.c"
1175     #endif
1176    
1177 root 1.24 int
1178     ev_version_major (void)
1179     {
1180     return EV_VERSION_MAJOR;
1181     }
1182    
1183     int
1184     ev_version_minor (void)
1185     {
1186     return EV_VERSION_MINOR;
1187     }
1188    
1189 root 1.49 /* return true if we are running with elevated privileges and should ignore env variables */
1190 root 1.140 int inline_size
1191 root 1.51 enable_secure (void)
1192 root 1.41 {
1193 root 1.103 #ifdef _WIN32
1194 root 1.49 return 0;
1195     #else
1196 root 1.41 return getuid () != geteuid ()
1197     || getgid () != getegid ();
1198 root 1.49 #endif
1199 root 1.41 }
1200    
1201 root 1.111 unsigned int
1202 root 1.129 ev_supported_backends (void)
1203     {
1204 root 1.130 unsigned int flags = 0;
1205 root 1.129
1206     if (EV_USE_PORT ) flags |= EVBACKEND_PORT;
1207     if (EV_USE_KQUEUE) flags |= EVBACKEND_KQUEUE;
1208     if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL;
1209     if (EV_USE_POLL ) flags |= EVBACKEND_POLL;
1210     if (EV_USE_SELECT) flags |= EVBACKEND_SELECT;
1211    
1212     return flags;
1213     }
1214    
1215     unsigned int
1216 root 1.130 ev_recommended_backends (void)
1217 root 1.1 {
1218 root 1.131 unsigned int flags = ev_supported_backends ();
1219 root 1.129
1220     #ifndef __NetBSD__
1221     /* kqueue is borked on everything but netbsd apparently */
1222     /* it usually doesn't work correctly on anything but sockets and pipes */
1223     flags &= ~EVBACKEND_KQUEUE;
1224     #endif
1225     #ifdef __APPLE__
1226     // flags &= ~EVBACKEND_KQUEUE; for documentation
1227     flags &= ~EVBACKEND_POLL;
1228     #endif
1229    
1230     return flags;
1231 root 1.51 }
1232    
1233 root 1.130 unsigned int
1234 root 1.134 ev_embeddable_backends (void)
1235     {
1236 root 1.196 int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT;
1237    
1238 root 1.192 /* epoll embeddability broken on all linux versions up to at least 2.6.23 */
1239 root 1.196 /* please fix it and tell me how to detect the fix */
1240     flags &= ~EVBACKEND_EPOLL;
1241    
1242     return flags;
1243 root 1.134 }
1244    
1245     unsigned int
1246 root 1.130 ev_backend (EV_P)
1247     {
1248     return backend;
1249     }
1250    
1251 root 1.162 unsigned int
1252     ev_loop_count (EV_P)
1253     {
1254     return loop_count;
1255     }
1256    
1257 root 1.193 void
1258     ev_set_io_collect_interval (EV_P_ ev_tstamp interval)
1259     {
1260     io_blocktime = interval;
1261     }
1262    
1263     void
1264     ev_set_timeout_collect_interval (EV_P_ ev_tstamp interval)
1265     {
1266     timeout_blocktime = interval;
1267     }
1268    
1269 root 1.151 static void noinline
1270 root 1.108 loop_init (EV_P_ unsigned int flags)
1271 root 1.51 {
1272 root 1.130 if (!backend)
1273 root 1.23 {
1274 root 1.29 #if EV_USE_MONOTONIC
1275 root 1.23 {
1276     struct timespec ts;
1277     if (!clock_gettime (CLOCK_MONOTONIC, &ts))
1278     have_monotonic = 1;
1279     }
1280 root 1.1 #endif
1281    
1282 root 1.209 ev_rt_now = ev_time ();
1283     mn_now = get_clock ();
1284     now_floor = mn_now;
1285     rtmn_diff = ev_rt_now - mn_now;
1286 root 1.1
1287 root 1.193 io_blocktime = 0.;
1288     timeout_blocktime = 0.;
1289 root 1.209 backend = 0;
1290     backend_fd = -1;
1291     gotasync = 0;
1292     #if EV_USE_INOTIFY
1293     fs_fd = -2;
1294     #endif
1295 root 1.193
1296 root 1.158 /* pid check not overridable via env */
1297     #ifndef _WIN32
1298     if (flags & EVFLAG_FORKCHECK)
1299     curpid = getpid ();
1300     #endif
1301    
1302 root 1.128 if (!(flags & EVFLAG_NOENV)
1303     && !enable_secure ()
1304     && getenv ("LIBEV_FLAGS"))
1305 root 1.108 flags = atoi (getenv ("LIBEV_FLAGS"));
1306    
1307 root 1.225 if (!(flags & 0x0000ffffU))
1308 root 1.129 flags |= ev_recommended_backends ();
1309 root 1.41
1310 root 1.118 #if EV_USE_PORT
1311 root 1.130 if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags);
1312 root 1.118 #endif
1313 root 1.44 #if EV_USE_KQUEUE
1314 root 1.130 if (!backend && (flags & EVBACKEND_KQUEUE)) backend = kqueue_init (EV_A_ flags);
1315 root 1.44 #endif
1316 root 1.29 #if EV_USE_EPOLL
1317 root 1.130 if (!backend && (flags & EVBACKEND_EPOLL )) backend = epoll_init (EV_A_ flags);
1318 root 1.41 #endif
1319 root 1.59 #if EV_USE_POLL
1320 root 1.130 if (!backend && (flags & EVBACKEND_POLL )) backend = poll_init (EV_A_ flags);
1321 root 1.1 #endif
1322 root 1.29 #if EV_USE_SELECT
1323 root 1.130 if (!backend && (flags & EVBACKEND_SELECT)) backend = select_init (EV_A_ flags);
1324 root 1.1 #endif
1325 root 1.70
1326 root 1.207 ev_init (&pipeev, pipecb);
1327     ev_set_priority (&pipeev, EV_MAXPRI);
1328 root 1.56 }
1329     }
1330    
1331 root 1.151 static void noinline
1332 root 1.56 loop_destroy (EV_P)
1333     {
1334 root 1.65 int i;
1335    
1336 root 1.207 if (ev_is_active (&pipeev))
1337     {
1338     ev_ref (EV_A); /* signal watcher */
1339     ev_io_stop (EV_A_ &pipeev);
1340    
1341 root 1.220 #if EV_USE_EVENTFD
1342     if (evfd >= 0)
1343     close (evfd);
1344     #endif
1345    
1346     if (evpipe [0] >= 0)
1347     {
1348     close (evpipe [0]);
1349     close (evpipe [1]);
1350     }
1351 root 1.207 }
1352    
1353 root 1.152 #if EV_USE_INOTIFY
1354     if (fs_fd >= 0)
1355     close (fs_fd);
1356     #endif
1357    
1358     if (backend_fd >= 0)
1359     close (backend_fd);
1360    
1361 root 1.118 #if EV_USE_PORT
1362 root 1.130 if (backend == EVBACKEND_PORT ) port_destroy (EV_A);
1363 root 1.118 #endif
1364 root 1.56 #if EV_USE_KQUEUE
1365 root 1.130 if (backend == EVBACKEND_KQUEUE) kqueue_destroy (EV_A);
1366 root 1.56 #endif
1367     #if EV_USE_EPOLL
1368 root 1.130 if (backend == EVBACKEND_EPOLL ) epoll_destroy (EV_A);
1369 root 1.56 #endif
1370 root 1.59 #if EV_USE_POLL
1371 root 1.130 if (backend == EVBACKEND_POLL ) poll_destroy (EV_A);
1372 root 1.56 #endif
1373     #if EV_USE_SELECT
1374 root 1.130 if (backend == EVBACKEND_SELECT) select_destroy (EV_A);
1375 root 1.56 #endif
1376 root 1.1
1377 root 1.65 for (i = NUMPRI; i--; )
1378 root 1.164 {
1379     array_free (pending, [i]);
1380     #if EV_IDLE_ENABLE
1381     array_free (idle, [i]);
1382     #endif
1383     }
1384 root 1.65
1385 root 1.186 ev_free (anfds); anfdmax = 0;
1386    
1387 root 1.71 /* have to use the microsoft-never-gets-it-right macro */
1388 root 1.164 array_free (fdchange, EMPTY);
1389     array_free (timer, EMPTY);
1390 root 1.140 #if EV_PERIODIC_ENABLE
1391 root 1.164 array_free (periodic, EMPTY);
1392 root 1.93 #endif
1393 root 1.187 #if EV_FORK_ENABLE
1394     array_free (fork, EMPTY);
1395     #endif
1396 root 1.164 array_free (prepare, EMPTY);
1397     array_free (check, EMPTY);
1398 root 1.209 #if EV_ASYNC_ENABLE
1399     array_free (async, EMPTY);
1400     #endif
1401 root 1.65
1402 root 1.130 backend = 0;
1403 root 1.56 }
1404 root 1.22
1405 root 1.226 #if EV_USE_INOTIFY
1406 root 1.154 void inline_size infy_fork (EV_P);
1407 root 1.226 #endif
1408 root 1.154
1409 root 1.151 void inline_size
1410 root 1.56 loop_fork (EV_P)
1411     {
1412 root 1.118 #if EV_USE_PORT
1413 root 1.130 if (backend == EVBACKEND_PORT ) port_fork (EV_A);
1414 root 1.56 #endif
1415     #if EV_USE_KQUEUE
1416 root 1.130 if (backend == EVBACKEND_KQUEUE) kqueue_fork (EV_A);
1417 root 1.45 #endif
1418 root 1.118 #if EV_USE_EPOLL
1419 root 1.130 if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A);
1420 root 1.118 #endif
1421 root 1.154 #if EV_USE_INOTIFY
1422     infy_fork (EV_A);
1423     #endif
1424 root 1.70
1425 root 1.207 if (ev_is_active (&pipeev))
1426 root 1.70 {
1427 root 1.207 /* this "locks" the handlers against writing to the pipe */
1428 root 1.212 /* while we modify the fd vars */
1429     gotsig = 1;
1430     #if EV_ASYNC_ENABLE
1431     gotasync = 1;
1432     #endif
1433 root 1.70
1434     ev_ref (EV_A);
1435 root 1.207 ev_io_stop (EV_A_ &pipeev);
1436 root 1.220
1437     #if EV_USE_EVENTFD
1438     if (evfd >= 0)
1439     close (evfd);
1440     #endif
1441    
1442     if (evpipe [0] >= 0)
1443     {
1444     close (evpipe [0]);
1445     close (evpipe [1]);
1446     }
1447 root 1.207
1448     evpipe_init (EV_A);
1449 root 1.208 /* now iterate over everything, in case we missed something */
1450     pipecb (EV_A_ &pipeev, EV_READ);
1451 root 1.70 }
1452    
1453     postfork = 0;
1454 root 1.1 }
1455    
1456 root 1.55 #if EV_MULTIPLICITY
1457 root 1.54 struct ev_loop *
1458 root 1.108 ev_loop_new (unsigned int flags)
1459 root 1.54 {
1460 root 1.69 struct ev_loop *loop = (struct ev_loop *)ev_malloc (sizeof (struct ev_loop));
1461    
1462     memset (loop, 0, sizeof (struct ev_loop));
1463 root 1.54
1464 root 1.108 loop_init (EV_A_ flags);
1465 root 1.56
1466 root 1.130 if (ev_backend (EV_A))
1467 root 1.55 return loop;
1468 root 1.54
1469 root 1.55 return 0;
1470 root 1.54 }
1471    
1472     void
1473 root 1.56 ev_loop_destroy (EV_P)
1474 root 1.54 {
1475 root 1.56 loop_destroy (EV_A);
1476 root 1.69 ev_free (loop);
1477 root 1.54 }
1478    
1479 root 1.56 void
1480     ev_loop_fork (EV_P)
1481     {
1482 root 1.205 postfork = 1; /* must be in line with ev_default_fork */
1483 root 1.56 }
1484     #endif
1485    
1486     #if EV_MULTIPLICITY
1487     struct ev_loop *
1488 root 1.125 ev_default_loop_init (unsigned int flags)
1489 root 1.54 #else
1490     int
1491 root 1.116 ev_default_loop (unsigned int flags)
1492 root 1.56 #endif
1493 root 1.54 {
1494 root 1.116 if (!ev_default_loop_ptr)
1495 root 1.56 {
1496     #if EV_MULTIPLICITY
1497 root 1.116 struct ev_loop *loop = ev_default_loop_ptr = &default_loop_struct;
1498 root 1.56 #else
1499 ayin 1.117 ev_default_loop_ptr = 1;
1500 root 1.54 #endif
1501    
1502 root 1.110 loop_init (EV_A_ flags);
1503 root 1.56
1504 root 1.130 if (ev_backend (EV_A))
1505 root 1.56 {
1506 root 1.103 #ifndef _WIN32
1507 root 1.56 ev_signal_init (&childev, childcb, SIGCHLD);
1508     ev_set_priority (&childev, EV_MAXPRI);
1509     ev_signal_start (EV_A_ &childev);
1510     ev_unref (EV_A); /* child watcher should not keep loop alive */
1511     #endif
1512     }
1513     else
1514 root 1.116 ev_default_loop_ptr = 0;
1515 root 1.56 }
1516 root 1.8
1517 root 1.116 return ev_default_loop_ptr;
1518 root 1.1 }
1519    
1520 root 1.24 void
1521 root 1.56 ev_default_destroy (void)
1522 root 1.1 {
1523 root 1.57 #if EV_MULTIPLICITY
1524 root 1.116 struct ev_loop *loop = ev_default_loop_ptr;
1525 root 1.57 #endif
1526 root 1.56
1527 root 1.103 #ifndef _WIN32
1528 root 1.56 ev_ref (EV_A); /* child watcher */
1529     ev_signal_stop (EV_A_ &childev);
1530 root 1.71 #endif
1531 root 1.56
1532     loop_destroy (EV_A);
1533 root 1.1 }
1534    
1535 root 1.24 void
1536 root 1.60 ev_default_fork (void)
1537 root 1.1 {
1538 root 1.60 #if EV_MULTIPLICITY
1539 root 1.116 struct ev_loop *loop = ev_default_loop_ptr;
1540 root 1.60 #endif
1541    
1542 root 1.130 if (backend)
1543 root 1.205 postfork = 1; /* must be in line with ev_loop_fork */
1544 root 1.1 }
1545    
1546 root 1.8 /*****************************************************************************/
1547    
1548 root 1.168 void
1549     ev_invoke (EV_P_ void *w, int revents)
1550     {
1551     EV_CB_INVOKE ((W)w, revents);
1552     }
1553    
1554 root 1.140 void inline_speed
1555 root 1.51 call_pending (EV_P)
1556 root 1.1 {
1557 root 1.42 int pri;
1558    
1559     for (pri = NUMPRI; pri--; )
1560     while (pendingcnt [pri])
1561     {
1562     ANPENDING *p = pendings [pri] + --pendingcnt [pri];
1563 root 1.1
1564 root 1.122 if (expect_true (p->w))
1565 root 1.42 {
1566 root 1.151 /*assert (("non-pending watcher on pending list", p->w->pending));*/
1567 root 1.139
1568 root 1.42 p->w->pending = 0;
1569 root 1.82 EV_CB_INVOKE (p->w, p->events);
1570 root 1.42 }
1571     }
1572 root 1.1 }
1573    
1574 root 1.234 #if EV_IDLE_ENABLE
1575     void inline_size
1576     idle_reify (EV_P)
1577     {
1578     if (expect_false (idleall))
1579     {
1580     int pri;
1581    
1582     for (pri = NUMPRI; pri--; )
1583     {
1584     if (pendingcnt [pri])
1585     break;
1586    
1587     if (idlecnt [pri])
1588     {
1589     queue_events (EV_A_ (W *)idles [pri], idlecnt [pri], EV_IDLE);
1590     break;
1591     }
1592     }
1593     }
1594     }
1595     #endif
1596    
1597 root 1.140 void inline_size
1598 root 1.51 timers_reify (EV_P)
1599 root 1.1 {
1600 root 1.241 while (timercnt && ANHE_at (timers [HEAP0]) <= mn_now)
1601 root 1.1 {
1602 root 1.241 ev_timer *w = (ev_timer *)ANHE_w (timers [HEAP0]);
1603 root 1.1
1604 root 1.202 /*assert (("inactive timer on timer heap detected", ev_is_active (w)));*/
1605 root 1.61
1606 root 1.4 /* first reschedule or stop timer */
1607 root 1.1 if (w->repeat)
1608     {
1609 root 1.33 assert (("negative ev_timer repeat value found while processing timers", w->repeat > 0.));
1610 root 1.90
1611 root 1.228 ev_at (w) += w->repeat;
1612     if (ev_at (w) < mn_now)
1613     ev_at (w) = mn_now;
1614 root 1.90
1615 root 1.242 ANHE_at_set (timers [HEAP0]);
1616 root 1.235 downheap (timers, timercnt, HEAP0);
1617 root 1.12 }
1618     else
1619 root 1.51 ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */
1620 root 1.30
1621 root 1.78 ev_feed_event (EV_A_ (W)w, EV_TIMEOUT);
1622 root 1.12 }
1623     }
1624 root 1.4
1625 root 1.140 #if EV_PERIODIC_ENABLE
1626     void inline_size
1627 root 1.51 periodics_reify (EV_P)
1628 root 1.12 {
1629 root 1.241 while (periodiccnt && ANHE_at (periodics [HEAP0]) <= ev_rt_now)
1630 root 1.12 {
1631 root 1.241 ev_periodic *w = (ev_periodic *)ANHE_w (periodics [HEAP0]);
1632 root 1.1
1633 root 1.151 /*assert (("inactive timer on periodic heap detected", ev_is_active (w)));*/
1634 root 1.61
1635 root 1.12 /* first reschedule or stop timer */
1636 root 1.77 if (w->reschedule_cb)
1637     {
1638 root 1.228 ev_at (w) = w->reschedule_cb (w, ev_rt_now + TIME_EPSILON);
1639     assert (("ev_periodic reschedule callback returned time in the past", ev_at (w) > ev_rt_now));
1640 root 1.242 ANHE_at_set (periodics [HEAP0]);
1641     downheap (periodics, periodiccnt, HEAP0);
1642 root 1.77 }
1643     else if (w->interval)
1644 root 1.12 {
1645 root 1.228 ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
1646     if (ev_at (w) - ev_rt_now <= TIME_EPSILON) ev_at (w) += w->interval;
1647     assert (("ev_periodic timeout in the past detected while processing timers, negative interval?", ev_at (w) > ev_rt_now));
1648 root 1.242 ANHE_at_set (periodics [HEAP0]);
1649 root 1.235 downheap (periodics, periodiccnt, HEAP0);
1650 root 1.1 }
1651     else
1652 root 1.51 ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */
1653 root 1.12
1654 root 1.78 ev_feed_event (EV_A_ (W)w, EV_PERIODIC);
1655 root 1.12 }
1656     }
1657    
1658 root 1.140 static void noinline
1659 root 1.54 periodics_reschedule (EV_P)
1660 root 1.12 {
1661     int i;
1662    
1663 root 1.13 /* adjust periodics after time jump */
1664 root 1.241 for (i = HEAP0; i < periodiccnt + HEAP0; ++i)
1665 root 1.12 {
1666 root 1.241 ev_periodic *w = (ev_periodic *)ANHE_w (periodics [i]);
1667 root 1.12
1668 root 1.77 if (w->reschedule_cb)
1669 root 1.228 ev_at (w) = w->reschedule_cb (w, ev_rt_now);
1670 root 1.77 else if (w->interval)
1671 root 1.228 ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
1672 root 1.242
1673     ANHE_at_set (periodics [i]);
1674 root 1.77 }
1675 root 1.12
1676 root 1.241 /* now rebuild the heap, this for the 2-heap, inefficient for the 4-heap, but correct */
1677 root 1.235 for (i = periodiccnt >> 1; --i; )
1678     downheap (periodics, periodiccnt, i + HEAP0);
1679 root 1.1 }
1680 root 1.93 #endif
1681 root 1.1
1682 root 1.178 void inline_speed
1683     time_update (EV_P_ ev_tstamp max_block)
1684 root 1.4 {
1685     int i;
1686 root 1.12
1687 root 1.40 #if EV_USE_MONOTONIC
1688     if (expect_true (have_monotonic))
1689     {
1690 root 1.178 ev_tstamp odiff = rtmn_diff;
1691    
1692     mn_now = get_clock ();
1693    
1694     /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */
1695     /* interpolate in the meantime */
1696     if (expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5))
1697 root 1.40 {
1698 root 1.178 ev_rt_now = rtmn_diff + mn_now;
1699     return;
1700     }
1701    
1702     now_floor = mn_now;
1703     ev_rt_now = ev_time ();
1704 root 1.4
1705 root 1.178 /* loop a few times, before making important decisions.
1706     * on the choice of "4": one iteration isn't enough,
1707     * in case we get preempted during the calls to
1708     * ev_time and get_clock. a second call is almost guaranteed
1709     * to succeed in that case, though. and looping a few more times
1710     * doesn't hurt either as we only do this on time-jumps or
1711     * in the unlikely event of having been preempted here.
1712     */
1713     for (i = 4; --i; )
1714     {
1715     rtmn_diff = ev_rt_now - mn_now;
1716 root 1.4
1717 root 1.234 if (expect_true (fabs (odiff - rtmn_diff) < MIN_TIMEJUMP))
1718 root 1.178 return; /* all is well */
1719 root 1.4
1720 root 1.178 ev_rt_now = ev_time ();
1721     mn_now = get_clock ();
1722     now_floor = mn_now;
1723     }
1724 root 1.4
1725 root 1.140 # if EV_PERIODIC_ENABLE
1726 root 1.178 periodics_reschedule (EV_A);
1727 root 1.93 # endif
1728 root 1.178 /* no timer adjustment, as the monotonic clock doesn't jump */
1729     /* timers_reschedule (EV_A_ rtmn_diff - odiff) */
1730 root 1.4 }
1731     else
1732 root 1.40 #endif
1733 root 1.4 {
1734 root 1.85 ev_rt_now = ev_time ();
1735 root 1.40
1736 root 1.178 if (expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + MIN_TIMEJUMP))
1737 root 1.13 {
1738 root 1.140 #if EV_PERIODIC_ENABLE
1739 root 1.54 periodics_reschedule (EV_A);
1740 root 1.93 #endif
1741 root 1.157 /* adjust timers. this is easy, as the offset is the same for all of them */
1742 root 1.241 for (i = 0; i < timercnt; ++i)
1743     {
1744     ANHE *he = timers + i + HEAP0;
1745     ANHE_w (*he)->at += ev_rt_now - mn_now;
1746     ANHE_at_set (*he);
1747     }
1748 root 1.13 }
1749 root 1.4
1750 root 1.85 mn_now = ev_rt_now;
1751 root 1.4 }
1752     }
1753    
1754 root 1.51 void
1755     ev_ref (EV_P)
1756     {
1757     ++activecnt;
1758     }
1759 root 1.1
1760 root 1.51 void
1761     ev_unref (EV_P)
1762     {
1763     --activecnt;
1764     }
1765    
1766     static int loop_done;
1767    
1768     void
1769     ev_loop (EV_P_ int flags)
1770 root 1.1 {
1771 root 1.219 loop_done = EVUNLOOP_CANCEL;
1772 root 1.1
1773 root 1.158 call_pending (EV_A); /* in case we recurse, ensure ordering stays nice and clean */
1774    
1775 root 1.161 do
1776 root 1.9 {
1777 root 1.158 #ifndef _WIN32
1778     if (expect_false (curpid)) /* penalise the forking check even more */
1779     if (expect_false (getpid () != curpid))
1780     {
1781     curpid = getpid ();
1782     postfork = 1;
1783     }
1784     #endif
1785    
1786 root 1.157 #if EV_FORK_ENABLE
1787     /* we might have forked, so queue fork handlers */
1788     if (expect_false (postfork))
1789     if (forkcnt)
1790     {
1791     queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK);
1792     call_pending (EV_A);
1793     }
1794     #endif
1795 root 1.147
1796 root 1.170 /* queue prepare watchers (and execute them) */
1797 root 1.40 if (expect_false (preparecnt))
1798 root 1.20 {
1799 root 1.51 queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE);
1800     call_pending (EV_A);
1801 root 1.20 }
1802 root 1.9
1803 root 1.159 if (expect_false (!activecnt))
1804     break;
1805    
1806 root 1.70 /* we might have forked, so reify kernel state if necessary */
1807     if (expect_false (postfork))
1808     loop_fork (EV_A);
1809    
1810 root 1.1 /* update fd-related kernel structures */
1811 root 1.51 fd_reify (EV_A);
1812 root 1.1
1813     /* calculate blocking time */
1814 root 1.135 {
1815 root 1.193 ev_tstamp waittime = 0.;
1816     ev_tstamp sleeptime = 0.;
1817 root 1.12
1818 root 1.193 if (expect_true (!(flags & EVLOOP_NONBLOCK || idleall || !activecnt)))
1819 root 1.135 {
1820     /* update time to cancel out callback processing overhead */
1821 root 1.178 time_update (EV_A_ 1e100);
1822 root 1.135
1823 root 1.193 waittime = MAX_BLOCKTIME;
1824 root 1.135
1825     if (timercnt)
1826     {
1827 root 1.241 ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now + backend_fudge;
1828 root 1.193 if (waittime > to) waittime = to;
1829 root 1.135 }
1830 root 1.4
1831 root 1.140 #if EV_PERIODIC_ENABLE
1832 root 1.135 if (periodiccnt)
1833     {
1834 root 1.241 ev_tstamp to = ANHE_at (periodics [HEAP0]) - ev_rt_now + backend_fudge;
1835 root 1.193 if (waittime > to) waittime = to;
1836 root 1.135 }
1837 root 1.93 #endif
1838 root 1.4
1839 root 1.193 if (expect_false (waittime < timeout_blocktime))
1840     waittime = timeout_blocktime;
1841    
1842     sleeptime = waittime - backend_fudge;
1843    
1844     if (expect_true (sleeptime > io_blocktime))
1845     sleeptime = io_blocktime;
1846    
1847     if (sleeptime)
1848     {
1849     ev_sleep (sleeptime);
1850     waittime -= sleeptime;
1851     }
1852 root 1.135 }
1853 root 1.1
1854 root 1.162 ++loop_count;
1855 root 1.193 backend_poll (EV_A_ waittime);
1856 root 1.178
1857     /* update ev_rt_now, do magic */
1858 root 1.193 time_update (EV_A_ waittime + sleeptime);
1859 root 1.135 }
1860 root 1.1
1861 root 1.9 /* queue pending timers and reschedule them */
1862 root 1.51 timers_reify (EV_A); /* relative timers called last */
1863 root 1.140 #if EV_PERIODIC_ENABLE
1864 root 1.51 periodics_reify (EV_A); /* absolute timers called first */
1865 root 1.93 #endif
1866 root 1.1
1867 root 1.164 #if EV_IDLE_ENABLE
1868 root 1.137 /* queue idle watchers unless other events are pending */
1869 root 1.164 idle_reify (EV_A);
1870     #endif
1871 root 1.9
1872 root 1.20 /* queue check watchers, to be executed first */
1873 root 1.123 if (expect_false (checkcnt))
1874 root 1.51 queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK);
1875 root 1.9
1876 root 1.51 call_pending (EV_A);
1877 root 1.1 }
1878 root 1.219 while (expect_true (
1879     activecnt
1880     && !loop_done
1881     && !(flags & (EVLOOP_ONESHOT | EVLOOP_NONBLOCK))
1882     ));
1883 root 1.13
1884 root 1.135 if (loop_done == EVUNLOOP_ONE)
1885     loop_done = EVUNLOOP_CANCEL;
1886 root 1.51 }
1887    
1888     void
1889     ev_unloop (EV_P_ int how)
1890     {
1891     loop_done = how;
1892 root 1.1 }
1893    
1894 root 1.8 /*****************************************************************************/
1895    
1896 root 1.140 void inline_size
1897 root 1.10 wlist_add (WL *head, WL elem)
1898 root 1.1 {
1899     elem->next = *head;
1900     *head = elem;
1901     }
1902    
1903 root 1.140 void inline_size
1904 root 1.10 wlist_del (WL *head, WL elem)
1905 root 1.1 {
1906     while (*head)
1907     {
1908     if (*head == elem)
1909     {
1910     *head = elem->next;
1911     return;
1912     }
1913    
1914     head = &(*head)->next;
1915     }
1916     }
1917    
1918 root 1.140 void inline_speed
1919 root 1.166 clear_pending (EV_P_ W w)
1920 root 1.16 {
1921     if (w->pending)
1922     {
1923 root 1.42 pendings [ABSPRI (w)][w->pending - 1].w = 0;
1924 root 1.16 w->pending = 0;
1925     }
1926     }
1927    
1928 root 1.167 int
1929     ev_clear_pending (EV_P_ void *w)
1930 root 1.166 {
1931     W w_ = (W)w;
1932     int pending = w_->pending;
1933    
1934 root 1.172 if (expect_true (pending))
1935     {
1936     ANPENDING *p = pendings [ABSPRI (w_)] + pending - 1;
1937     w_->pending = 0;
1938     p->w = 0;
1939     return p->events;
1940     }
1941     else
1942 root 1.167 return 0;
1943 root 1.166 }
1944    
1945 root 1.164 void inline_size
1946     pri_adjust (EV_P_ W w)
1947     {
1948     int pri = w->priority;
1949     pri = pri < EV_MINPRI ? EV_MINPRI : pri;
1950     pri = pri > EV_MAXPRI ? EV_MAXPRI : pri;
1951     w->priority = pri;
1952     }
1953    
1954 root 1.140 void inline_speed
1955 root 1.51 ev_start (EV_P_ W w, int active)
1956 root 1.1 {
1957 root 1.164 pri_adjust (EV_A_ w);
1958 root 1.1 w->active = active;
1959 root 1.51 ev_ref (EV_A);
1960 root 1.1 }
1961    
1962 root 1.140 void inline_size
1963 root 1.51 ev_stop (EV_P_ W w)
1964 root 1.1 {
1965 root 1.51 ev_unref (EV_A);
1966 root 1.1 w->active = 0;
1967     }
1968    
1969 root 1.8 /*****************************************************************************/
1970    
1971 root 1.171 void noinline
1972 root 1.136 ev_io_start (EV_P_ ev_io *w)
1973 root 1.1 {
1974 root 1.37 int fd = w->fd;
1975    
1976 root 1.123 if (expect_false (ev_is_active (w)))
1977 root 1.1 return;
1978    
1979 root 1.33 assert (("ev_io_start called with negative fd", fd >= 0));
1980    
1981 root 1.51 ev_start (EV_A_ (W)w, 1);
1982 root 1.74 array_needsize (ANFD, anfds, anfdmax, fd + 1, anfds_init);
1983 root 1.182 wlist_add (&anfds[fd].head, (WL)w);
1984 root 1.1
1985 root 1.184 fd_change (EV_A_ fd, w->events & EV_IOFDSET | 1);
1986     w->events &= ~EV_IOFDSET;
1987 root 1.1 }
1988    
1989 root 1.171 void noinline
1990 root 1.136 ev_io_stop (EV_P_ ev_io *w)
1991 root 1.1 {
1992 root 1.166 clear_pending (EV_A_ (W)w);
1993 root 1.123 if (expect_false (!ev_is_active (w)))
1994 root 1.1 return;
1995    
1996 root 1.242 assert (("ev_io_stop called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax));
1997 root 1.89
1998 root 1.182 wlist_del (&anfds[w->fd].head, (WL)w);
1999 root 1.51 ev_stop (EV_A_ (W)w);
2000 root 1.1
2001 root 1.184 fd_change (EV_A_ w->fd, 1);
2002 root 1.1 }
2003    
2004 root 1.171 void noinline
2005 root 1.136 ev_timer_start (EV_P_ ev_timer *w)
2006 root 1.1 {
2007 root 1.123 if (expect_false (ev_is_active (w)))
2008 root 1.1 return;
2009    
2010 root 1.228 ev_at (w) += mn_now;
2011 root 1.12
2012 root 1.33 assert (("ev_timer_start called with negative timer repeat value", w->repeat >= 0.));
2013 root 1.13
2014 root 1.235 ev_start (EV_A_ (W)w, ++timercnt + HEAP0 - 1);
2015 root 1.241 array_needsize (ANHE, timers, timermax, ev_active (w) + 1, EMPTY2);
2016     ANHE_w (timers [ev_active (w)]) = (WT)w;
2017     ANHE_at_set (timers [ev_active (w)]);
2018 root 1.235 upheap (timers, ev_active (w));
2019 root 1.62
2020 root 1.242 /*assert (("internal timer heap corruption", timers [ev_active (w)] == (WT)w));*/
2021 root 1.12 }
2022    
2023 root 1.171 void noinline
2024 root 1.136 ev_timer_stop (EV_P_ ev_timer *w)
2025 root 1.12 {
2026 root 1.166 clear_pending (EV_A_ (W)w);
2027 root 1.123 if (expect_false (!ev_is_active (w)))
2028 root 1.12 return;
2029    
2030 root 1.230 {
2031     int active = ev_active (w);
2032 root 1.62
2033 root 1.241 assert (("internal timer heap corruption", ANHE_w (timers [active]) == (WT)w));
2034 root 1.151
2035 root 1.235 if (expect_true (active < timercnt + HEAP0 - 1))
2036 root 1.151 {
2037 root 1.235 timers [active] = timers [timercnt + HEAP0 - 1];
2038 root 1.181 adjustheap (timers, timercnt, active);
2039 root 1.151 }
2040 root 1.228
2041     --timercnt;
2042 root 1.151 }
2043 root 1.4
2044 root 1.228 ev_at (w) -= mn_now;
2045 root 1.14
2046 root 1.51 ev_stop (EV_A_ (W)w);
2047 root 1.12 }
2048 root 1.4
2049 root 1.171 void noinline
2050 root 1.136 ev_timer_again (EV_P_ ev_timer *w)
2051 root 1.14 {
2052     if (ev_is_active (w))
2053     {
2054     if (w->repeat)
2055 root 1.99 {
2056 root 1.228 ev_at (w) = mn_now + w->repeat;
2057 root 1.241 ANHE_at_set (timers [ev_active (w)]);
2058 root 1.230 adjustheap (timers, timercnt, ev_active (w));
2059 root 1.99 }
2060 root 1.14 else
2061 root 1.51 ev_timer_stop (EV_A_ w);
2062 root 1.14 }
2063     else if (w->repeat)
2064 root 1.112 {
2065 root 1.229 ev_at (w) = w->repeat;
2066 root 1.112 ev_timer_start (EV_A_ w);
2067     }
2068 root 1.14 }
2069    
2070 root 1.140 #if EV_PERIODIC_ENABLE
2071 root 1.171 void noinline
2072 root 1.136 ev_periodic_start (EV_P_ ev_periodic *w)
2073 root 1.12 {
2074 root 1.123 if (expect_false (ev_is_active (w)))
2075 root 1.12 return;
2076 root 1.1
2077 root 1.77 if (w->reschedule_cb)
2078 root 1.228 ev_at (w) = w->reschedule_cb (w, ev_rt_now);
2079 root 1.77 else if (w->interval)
2080     {
2081     assert (("ev_periodic_start called with negative interval value", w->interval >= 0.));
2082     /* this formula differs from the one in periodic_reify because we do not always round up */
2083 root 1.228 ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
2084 root 1.77 }
2085 root 1.173 else
2086 root 1.228 ev_at (w) = w->offset;
2087 root 1.12
2088 root 1.235 ev_start (EV_A_ (W)w, ++periodiccnt + HEAP0 - 1);
2089 root 1.241 array_needsize (ANHE, periodics, periodicmax, ev_active (w) + 1, EMPTY2);
2090     ANHE_w (periodics [ev_active (w)]) = (WT)w;
2091 root 1.235 upheap (periodics, ev_active (w));
2092 root 1.62
2093 root 1.241 /*assert (("internal periodic heap corruption", ANHE_w (periodics [ev_active (w)]) == (WT)w));*/
2094 root 1.1 }
2095    
2096 root 1.171 void noinline
2097 root 1.136 ev_periodic_stop (EV_P_ ev_periodic *w)
2098 root 1.1 {
2099 root 1.166 clear_pending (EV_A_ (W)w);
2100 root 1.123 if (expect_false (!ev_is_active (w)))
2101 root 1.1 return;
2102    
2103 root 1.230 {
2104     int active = ev_active (w);
2105 root 1.62
2106 root 1.241 assert (("internal periodic heap corruption", ANHE_w (periodics [active]) == (WT)w));
2107 root 1.151
2108 root 1.235 if (expect_true (active < periodiccnt + HEAP0 - 1))
2109 root 1.151 {
2110 root 1.235 periodics [active] = periodics [periodiccnt + HEAP0 - 1];
2111 root 1.181 adjustheap (periodics, periodiccnt, active);
2112 root 1.151 }
2113 root 1.228
2114     --periodiccnt;
2115 root 1.151 }
2116 root 1.2
2117 root 1.51 ev_stop (EV_A_ (W)w);
2118 root 1.1 }
2119    
2120 root 1.171 void noinline
2121 root 1.136 ev_periodic_again (EV_P_ ev_periodic *w)
2122 root 1.77 {
2123 root 1.84 /* TODO: use adjustheap and recalculation */
2124 root 1.77 ev_periodic_stop (EV_A_ w);
2125     ev_periodic_start (EV_A_ w);
2126     }
2127 root 1.93 #endif
2128 root 1.77
2129 root 1.56 #ifndef SA_RESTART
2130     # define SA_RESTART 0
2131     #endif
2132    
2133 root 1.171 void noinline
2134 root 1.136 ev_signal_start (EV_P_ ev_signal *w)
2135 root 1.56 {
2136     #if EV_MULTIPLICITY
2137 root 1.116 assert (("signal watchers are only supported in the default loop", loop == ev_default_loop_ptr));
2138 root 1.56 #endif
2139 root 1.123 if (expect_false (ev_is_active (w)))
2140 root 1.56 return;
2141    
2142     assert (("ev_signal_start called with illegal signal number", w->signum > 0));
2143    
2144 root 1.207 evpipe_init (EV_A);
2145    
2146 root 1.180 {
2147     #ifndef _WIN32
2148     sigset_t full, prev;
2149     sigfillset (&full);
2150     sigprocmask (SIG_SETMASK, &full, &prev);
2151     #endif
2152    
2153     array_needsize (ANSIG, signals, signalmax, w->signum, signals_init);
2154    
2155     #ifndef _WIN32
2156     sigprocmask (SIG_SETMASK, &prev, 0);
2157     #endif
2158     }
2159    
2160 root 1.56 ev_start (EV_A_ (W)w, 1);
2161 root 1.182 wlist_add (&signals [w->signum - 1].head, (WL)w);
2162 root 1.56
2163 root 1.63 if (!((WL)w)->next)
2164 root 1.56 {
2165 root 1.103 #if _WIN32
2166 root 1.218 signal (w->signum, ev_sighandler);
2167 root 1.67 #else
2168 root 1.56 struct sigaction sa;
2169 root 1.218 sa.sa_handler = ev_sighandler;
2170 root 1.56 sigfillset (&sa.sa_mask);
2171     sa.sa_flags = SA_RESTART; /* if restarting works we save one iteration */
2172     sigaction (w->signum, &sa, 0);
2173 root 1.67 #endif
2174 root 1.56 }
2175     }
2176    
2177 root 1.171 void noinline
2178 root 1.136 ev_signal_stop (EV_P_ ev_signal *w)
2179 root 1.56 {
2180 root 1.166 clear_pending (EV_A_ (W)w);
2181 root 1.123 if (expect_false (!ev_is_active (w)))
2182 root 1.56 return;
2183    
2184 root 1.182 wlist_del (&signals [w->signum - 1].head, (WL)w);
2185 root 1.56 ev_stop (EV_A_ (W)w);
2186    
2187     if (!signals [w->signum - 1].head)
2188     signal (w->signum, SIG_DFL);
2189     }
2190    
2191 root 1.28 void
2192 root 1.136 ev_child_start (EV_P_ ev_child *w)
2193 root 1.22 {
2194 root 1.56 #if EV_MULTIPLICITY
2195 root 1.116 assert (("child watchers are only supported in the default loop", loop == ev_default_loop_ptr));
2196 root 1.56 #endif
2197 root 1.123 if (expect_false (ev_is_active (w)))
2198 root 1.22 return;
2199    
2200 root 1.51 ev_start (EV_A_ (W)w, 1);
2201 root 1.182 wlist_add (&childs [w->pid & (EV_PID_HASHSIZE - 1)], (WL)w);
2202 root 1.22 }
2203    
2204 root 1.28 void
2205 root 1.136 ev_child_stop (EV_P_ ev_child *w)
2206 root 1.22 {
2207 root 1.166 clear_pending (EV_A_ (W)w);
2208 root 1.123 if (expect_false (!ev_is_active (w)))
2209 root 1.22 return;
2210    
2211 root 1.182 wlist_del (&childs [w->pid & (EV_PID_HASHSIZE - 1)], (WL)w);
2212 root 1.51 ev_stop (EV_A_ (W)w);
2213 root 1.22 }
2214    
2215 root 1.140 #if EV_STAT_ENABLE
2216    
2217     # ifdef _WIN32
2218 root 1.146 # undef lstat
2219     # define lstat(a,b) _stati64 (a,b)
2220 root 1.140 # endif
2221    
2222 root 1.143 #define DEF_STAT_INTERVAL 5.0074891
2223     #define MIN_STAT_INTERVAL 0.1074891
2224    
2225 root 1.157 static void noinline stat_timer_cb (EV_P_ ev_timer *w_, int revents);
2226 root 1.152
2227     #if EV_USE_INOTIFY
2228 root 1.153 # define EV_INOTIFY_BUFSIZE 8192
2229 root 1.152
2230     static void noinline
2231     infy_add (EV_P_ ev_stat *w)
2232     {
2233     w->wd = inotify_add_watch (fs_fd, w->path, IN_ATTRIB | IN_DELETE_SELF | IN_MOVE_SELF | IN_MODIFY | IN_DONT_FOLLOW | IN_MASK_ADD);
2234    
2235     if (w->wd < 0)
2236     {
2237     ev_timer_start (EV_A_ &w->timer); /* this is not race-free, so we still need to recheck periodically */
2238    
2239     /* monitor some parent directory for speedup hints */
2240 root 1.233 /* note that exceeding the hardcoded limit is not a correctness issue, */
2241     /* but an efficiency issue only */
2242 root 1.153 if ((errno == ENOENT || errno == EACCES) && strlen (w->path) < 4096)
2243 root 1.152 {
2244 root 1.153 char path [4096];
2245 root 1.152 strcpy (path, w->path);
2246    
2247     do
2248     {
2249     int mask = IN_MASK_ADD | IN_DELETE_SELF | IN_MOVE_SELF
2250     | (errno == EACCES ? IN_ATTRIB : IN_CREATE | IN_MOVED_TO);
2251    
2252     char *pend = strrchr (path, '/');
2253    
2254     if (!pend)
2255     break; /* whoops, no '/', complain to your admin */
2256    
2257     *pend = 0;
2258 root 1.153 w->wd = inotify_add_watch (fs_fd, path, mask);
2259 root 1.152 }
2260     while (w->wd < 0 && (errno == ENOENT || errno == EACCES));
2261     }
2262     }
2263     else
2264     ev_timer_stop (EV_A_ &w->timer); /* we can watch this in a race-free way */
2265    
2266     if (w->wd >= 0)
2267     wlist_add (&fs_hash [w->wd & (EV_INOTIFY_HASHSIZE - 1)].head, (WL)w);
2268     }
2269    
2270     static void noinline
2271     infy_del (EV_P_ ev_stat *w)
2272     {
2273     int slot;
2274     int wd = w->wd;
2275    
2276     if (wd < 0)
2277     return;
2278    
2279     w->wd = -2;
2280     slot = wd & (EV_INOTIFY_HASHSIZE - 1);
2281     wlist_del (&fs_hash [slot].head, (WL)w);
2282    
2283     /* remove this watcher, if others are watching it, they will rearm */
2284     inotify_rm_watch (fs_fd, wd);
2285     }
2286    
2287     static void noinline
2288     infy_wd (EV_P_ int slot, int wd, struct inotify_event *ev)
2289     {
2290     if (slot < 0)
2291     /* overflow, need to check for all hahs slots */
2292     for (slot = 0; slot < EV_INOTIFY_HASHSIZE; ++slot)
2293     infy_wd (EV_A_ slot, wd, ev);
2294     else
2295     {
2296     WL w_;
2297    
2298     for (w_ = fs_hash [slot & (EV_INOTIFY_HASHSIZE - 1)].head; w_; )
2299     {
2300     ev_stat *w = (ev_stat *)w_;
2301     w_ = w_->next; /* lets us remove this watcher and all before it */
2302    
2303     if (w->wd == wd || wd == -1)
2304     {
2305     if (ev->mask & (IN_IGNORED | IN_UNMOUNT | IN_DELETE_SELF))
2306     {
2307     w->wd = -1;
2308     infy_add (EV_A_ w); /* re-add, no matter what */
2309     }
2310    
2311 root 1.153 stat_timer_cb (EV_A_ &w->timer, 0);
2312 root 1.152 }
2313     }
2314     }
2315     }
2316    
2317     static void
2318     infy_cb (EV_P_ ev_io *w, int revents)
2319     {
2320     char buf [EV_INOTIFY_BUFSIZE];
2321     struct inotify_event *ev = (struct inotify_event *)buf;
2322     int ofs;
2323     int len = read (fs_fd, buf, sizeof (buf));
2324    
2325     for (ofs = 0; ofs < len; ofs += sizeof (struct inotify_event) + ev->len)
2326     infy_wd (EV_A_ ev->wd, ev->wd, ev);
2327     }
2328    
2329     void inline_size
2330     infy_init (EV_P)
2331     {
2332     if (fs_fd != -2)
2333     return;
2334    
2335     fs_fd = inotify_init ();
2336    
2337     if (fs_fd >= 0)
2338     {
2339     ev_io_init (&fs_w, infy_cb, fs_fd, EV_READ);
2340     ev_set_priority (&fs_w, EV_MAXPRI);
2341     ev_io_start (EV_A_ &fs_w);
2342     }
2343     }
2344    
2345 root 1.154 void inline_size
2346     infy_fork (EV_P)
2347     {
2348     int slot;
2349    
2350     if (fs_fd < 0)
2351     return;
2352    
2353     close (fs_fd);
2354     fs_fd = inotify_init ();
2355    
2356     for (slot = 0; slot < EV_INOTIFY_HASHSIZE; ++slot)
2357     {
2358     WL w_ = fs_hash [slot].head;
2359     fs_hash [slot].head = 0;
2360    
2361     while (w_)
2362     {
2363     ev_stat *w = (ev_stat *)w_;
2364     w_ = w_->next; /* lets us add this watcher */
2365    
2366     w->wd = -1;
2367    
2368     if (fs_fd >= 0)
2369     infy_add (EV_A_ w); /* re-add, no matter what */
2370     else
2371     ev_timer_start (EV_A_ &w->timer);
2372     }
2373    
2374     }
2375     }
2376    
2377 root 1.152 #endif
2378    
2379 root 1.140 void
2380     ev_stat_stat (EV_P_ ev_stat *w)
2381     {
2382     if (lstat (w->path, &w->attr) < 0)
2383     w->attr.st_nlink = 0;
2384     else if (!w->attr.st_nlink)
2385     w->attr.st_nlink = 1;
2386     }
2387    
2388 root 1.157 static void noinline
2389 root 1.140 stat_timer_cb (EV_P_ ev_timer *w_, int revents)
2390     {
2391     ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer));
2392    
2393     /* we copy this here each the time so that */
2394     /* prev has the old value when the callback gets invoked */
2395     w->prev = w->attr;
2396     ev_stat_stat (EV_A_ w);
2397    
2398 root 1.156 /* memcmp doesn't work on netbsd, they.... do stuff to their struct stat */
2399     if (
2400     w->prev.st_dev != w->attr.st_dev
2401     || w->prev.st_ino != w->attr.st_ino
2402     || w->prev.st_mode != w->attr.st_mode
2403     || w->prev.st_nlink != w->attr.st_nlink
2404     || w->prev.st_uid != w->attr.st_uid
2405     || w->prev.st_gid != w->attr.st_gid
2406     || w->prev.st_rdev != w->attr.st_rdev
2407     || w->prev.st_size != w->attr.st_size
2408     || w->prev.st_atime != w->attr.st_atime
2409     || w->prev.st_mtime != w->attr.st_mtime
2410     || w->prev.st_ctime != w->attr.st_ctime
2411     ) {
2412 root 1.152 #if EV_USE_INOTIFY
2413     infy_del (EV_A_ w);
2414     infy_add (EV_A_ w);
2415     ev_stat_stat (EV_A_ w); /* avoid race... */
2416     #endif
2417    
2418     ev_feed_event (EV_A_ w, EV_STAT);
2419     }
2420 root 1.140 }
2421    
2422     void
2423     ev_stat_start (EV_P_ ev_stat *w)
2424     {
2425     if (expect_false (ev_is_active (w)))
2426     return;
2427    
2428     /* since we use memcmp, we need to clear any padding data etc. */
2429     memset (&w->prev, 0, sizeof (ev_statdata));
2430     memset (&w->attr, 0, sizeof (ev_statdata));
2431    
2432     ev_stat_stat (EV_A_ w);
2433    
2434 root 1.143 if (w->interval < MIN_STAT_INTERVAL)
2435     w->interval = w->interval ? MIN_STAT_INTERVAL : DEF_STAT_INTERVAL;
2436    
2437 root 1.140 ev_timer_init (&w->timer, stat_timer_cb, w->interval, w->interval);
2438     ev_set_priority (&w->timer, ev_priority (w));
2439 root 1.152
2440     #if EV_USE_INOTIFY
2441     infy_init (EV_A);
2442    
2443     if (fs_fd >= 0)
2444     infy_add (EV_A_ w);
2445     else
2446     #endif
2447     ev_timer_start (EV_A_ &w->timer);
2448 root 1.140
2449     ev_start (EV_A_ (W)w, 1);
2450     }
2451    
2452     void
2453     ev_stat_stop (EV_P_ ev_stat *w)
2454     {
2455 root 1.166 clear_pending (EV_A_ (W)w);
2456 root 1.140 if (expect_false (!ev_is_active (w)))
2457     return;
2458    
2459 root 1.152 #if EV_USE_INOTIFY
2460     infy_del (EV_A_ w);
2461     #endif
2462 root 1.140 ev_timer_stop (EV_A_ &w->timer);
2463    
2464 root 1.134 ev_stop (EV_A_ (W)w);
2465     }
2466     #endif
2467    
2468 root 1.164 #if EV_IDLE_ENABLE
2469 root 1.144 void
2470     ev_idle_start (EV_P_ ev_idle *w)
2471     {
2472     if (expect_false (ev_is_active (w)))
2473     return;
2474    
2475 root 1.164 pri_adjust (EV_A_ (W)w);
2476    
2477     {
2478     int active = ++idlecnt [ABSPRI (w)];
2479    
2480     ++idleall;
2481     ev_start (EV_A_ (W)w, active);
2482    
2483     array_needsize (ev_idle *, idles [ABSPRI (w)], idlemax [ABSPRI (w)], active, EMPTY2);
2484     idles [ABSPRI (w)][active - 1] = w;
2485     }
2486 root 1.144 }
2487    
2488     void
2489     ev_idle_stop (EV_P_ ev_idle *w)
2490     {
2491 root 1.166 clear_pending (EV_A_ (W)w);
2492 root 1.144 if (expect_false (!ev_is_active (w)))
2493     return;
2494    
2495     {
2496 root 1.230 int active = ev_active (w);
2497 root 1.164
2498     idles [ABSPRI (w)][active - 1] = idles [ABSPRI (w)][--idlecnt [ABSPRI (w)]];
2499 root 1.230 ev_active (idles [ABSPRI (w)][active - 1]) = active;
2500 root 1.164
2501     ev_stop (EV_A_ (W)w);
2502     --idleall;
2503 root 1.144 }
2504     }
2505 root 1.164 #endif
2506 root 1.144
2507     void
2508     ev_prepare_start (EV_P_ ev_prepare *w)
2509     {
2510     if (expect_false (ev_is_active (w)))
2511     return;
2512    
2513     ev_start (EV_A_ (W)w, ++preparecnt);
2514     array_needsize (ev_prepare *, prepares, preparemax, preparecnt, EMPTY2);
2515     prepares [preparecnt - 1] = w;
2516     }
2517    
2518     void
2519     ev_prepare_stop (EV_P_ ev_prepare *w)
2520     {
2521 root 1.166 clear_pending (EV_A_ (W)w);
2522 root 1.144 if (expect_false (!ev_is_active (w)))
2523     return;
2524    
2525     {
2526 root 1.230 int active = ev_active (w);
2527    
2528 root 1.144 prepares [active - 1] = prepares [--preparecnt];
2529 root 1.230 ev_active (prepares [active - 1]) = active;
2530 root 1.144 }
2531    
2532     ev_stop (EV_A_ (W)w);
2533     }
2534    
2535     void
2536     ev_check_start (EV_P_ ev_check *w)
2537     {
2538     if (expect_false (ev_is_active (w)))
2539     return;
2540    
2541     ev_start (EV_A_ (W)w, ++checkcnt);
2542     array_needsize (ev_check *, checks, checkmax, checkcnt, EMPTY2);
2543     checks [checkcnt - 1] = w;
2544     }
2545    
2546     void
2547     ev_check_stop (EV_P_ ev_check *w)
2548     {
2549 root 1.166 clear_pending (EV_A_ (W)w);
2550 root 1.144 if (expect_false (!ev_is_active (w)))
2551     return;
2552    
2553     {
2554 root 1.230 int active = ev_active (w);
2555    
2556 root 1.144 checks [active - 1] = checks [--checkcnt];
2557 root 1.230 ev_active (checks [active - 1]) = active;
2558 root 1.144 }
2559    
2560     ev_stop (EV_A_ (W)w);
2561     }
2562    
2563     #if EV_EMBED_ENABLE
2564     void noinline
2565     ev_embed_sweep (EV_P_ ev_embed *w)
2566     {
2567 root 1.188 ev_loop (w->other, EVLOOP_NONBLOCK);
2568 root 1.144 }
2569    
2570     static void
2571 root 1.189 embed_io_cb (EV_P_ ev_io *io, int revents)
2572 root 1.144 {
2573     ev_embed *w = (ev_embed *)(((char *)io) - offsetof (ev_embed, io));
2574    
2575     if (ev_cb (w))
2576     ev_feed_event (EV_A_ (W)w, EV_EMBED);
2577     else
2578 root 1.195 ev_loop (w->other, EVLOOP_NONBLOCK);
2579 root 1.144 }
2580    
2581 root 1.189 static void
2582     embed_prepare_cb (EV_P_ ev_prepare *prepare, int revents)
2583     {
2584     ev_embed *w = (ev_embed *)(((char *)prepare) - offsetof (ev_embed, prepare));
2585    
2586 root 1.195 {
2587     struct ev_loop *loop = w->other;
2588    
2589     while (fdchangecnt)
2590     {
2591     fd_reify (EV_A);
2592     ev_loop (EV_A_ EVLOOP_NONBLOCK);
2593     }
2594     }
2595     }
2596    
2597     #if 0
2598     static void
2599     embed_idle_cb (EV_P_ ev_idle *idle, int revents)
2600     {
2601     ev_idle_stop (EV_A_ idle);
2602 root 1.189 }
2603 root 1.195 #endif
2604 root 1.189
2605 root 1.144 void
2606     ev_embed_start (EV_P_ ev_embed *w)
2607     {
2608     if (expect_false (ev_is_active (w)))
2609     return;
2610    
2611     {
2612 root 1.188 struct ev_loop *loop = w->other;
2613 root 1.144 assert (("loop to be embedded is not embeddable", backend & ev_embeddable_backends ()));
2614 root 1.191 ev_io_init (&w->io, embed_io_cb, backend_fd, EV_READ);
2615 root 1.144 }
2616    
2617     ev_set_priority (&w->io, ev_priority (w));
2618     ev_io_start (EV_A_ &w->io);
2619    
2620 root 1.189 ev_prepare_init (&w->prepare, embed_prepare_cb);
2621     ev_set_priority (&w->prepare, EV_MINPRI);
2622     ev_prepare_start (EV_A_ &w->prepare);
2623    
2624 root 1.195 /*ev_idle_init (&w->idle, e,bed_idle_cb);*/
2625    
2626 root 1.144 ev_start (EV_A_ (W)w, 1);
2627     }
2628    
2629     void
2630     ev_embed_stop (EV_P_ ev_embed *w)
2631     {
2632 root 1.166 clear_pending (EV_A_ (W)w);
2633 root 1.144 if (expect_false (!ev_is_active (w)))
2634     return;
2635    
2636     ev_io_stop (EV_A_ &w->io);
2637 root 1.189 ev_prepare_stop (EV_A_ &w->prepare);
2638 root 1.144
2639     ev_stop (EV_A_ (W)w);
2640     }
2641     #endif
2642    
2643 root 1.147 #if EV_FORK_ENABLE
2644     void
2645     ev_fork_start (EV_P_ ev_fork *w)
2646     {
2647     if (expect_false (ev_is_active (w)))
2648     return;
2649    
2650     ev_start (EV_A_ (W)w, ++forkcnt);
2651     array_needsize (ev_fork *, forks, forkmax, forkcnt, EMPTY2);
2652     forks [forkcnt - 1] = w;
2653     }
2654    
2655     void
2656     ev_fork_stop (EV_P_ ev_fork *w)
2657     {
2658 root 1.166 clear_pending (EV_A_ (W)w);
2659 root 1.147 if (expect_false (!ev_is_active (w)))
2660     return;
2661    
2662     {
2663 root 1.230 int active = ev_active (w);
2664    
2665 root 1.147 forks [active - 1] = forks [--forkcnt];
2666 root 1.230 ev_active (forks [active - 1]) = active;
2667 root 1.147 }
2668    
2669     ev_stop (EV_A_ (W)w);
2670     }
2671     #endif
2672    
2673 root 1.207 #if EV_ASYNC_ENABLE
2674     void
2675     ev_async_start (EV_P_ ev_async *w)
2676     {
2677     if (expect_false (ev_is_active (w)))
2678     return;
2679    
2680     evpipe_init (EV_A);
2681    
2682     ev_start (EV_A_ (W)w, ++asynccnt);
2683     array_needsize (ev_async *, asyncs, asyncmax, asynccnt, EMPTY2);
2684     asyncs [asynccnt - 1] = w;
2685     }
2686    
2687     void
2688     ev_async_stop (EV_P_ ev_async *w)
2689     {
2690     clear_pending (EV_A_ (W)w);
2691     if (expect_false (!ev_is_active (w)))
2692     return;
2693    
2694     {
2695 root 1.230 int active = ev_active (w);
2696    
2697 root 1.207 asyncs [active - 1] = asyncs [--asynccnt];
2698 root 1.230 ev_active (asyncs [active - 1]) = active;
2699 root 1.207 }
2700    
2701     ev_stop (EV_A_ (W)w);
2702     }
2703    
2704     void
2705     ev_async_send (EV_P_ ev_async *w)
2706     {
2707     w->sent = 1;
2708 root 1.214 evpipe_write (EV_A_ &gotasync);
2709 root 1.207 }
2710     #endif
2711    
2712 root 1.1 /*****************************************************************************/
2713 root 1.10
2714 root 1.16 struct ev_once
2715     {
2716 root 1.136 ev_io io;
2717     ev_timer to;
2718 root 1.16 void (*cb)(int revents, void *arg);
2719     void *arg;
2720     };
2721    
2722     static void
2723 root 1.51 once_cb (EV_P_ struct ev_once *once, int revents)
2724 root 1.16 {
2725     void (*cb)(int revents, void *arg) = once->cb;
2726     void *arg = once->arg;
2727    
2728 root 1.51 ev_io_stop (EV_A_ &once->io);
2729     ev_timer_stop (EV_A_ &once->to);
2730 root 1.69 ev_free (once);
2731 root 1.16
2732     cb (revents, arg);
2733     }
2734    
2735     static void
2736 root 1.136 once_cb_io (EV_P_ ev_io *w, int revents)
2737 root 1.16 {
2738 root 1.51 once_cb (EV_A_ (struct ev_once *)(((char *)w) - offsetof (struct ev_once, io)), revents);
2739 root 1.16 }
2740    
2741     static void
2742 root 1.136 once_cb_to (EV_P_ ev_timer *w, int revents)
2743 root 1.16 {
2744 root 1.51 once_cb (EV_A_ (struct ev_once *)(((char *)w) - offsetof (struct ev_once, to)), revents);
2745 root 1.16 }
2746    
2747     void
2748 root 1.51 ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg)
2749 root 1.16 {
2750 root 1.74 struct ev_once *once = (struct ev_once *)ev_malloc (sizeof (struct ev_once));
2751 root 1.16
2752 root 1.123 if (expect_false (!once))
2753 root 1.16 {
2754 root 1.123 cb (EV_ERROR | EV_READ | EV_WRITE | EV_TIMEOUT, arg);
2755     return;
2756     }
2757    
2758     once->cb = cb;
2759     once->arg = arg;
2760 root 1.16
2761 root 1.123 ev_init (&once->io, once_cb_io);
2762     if (fd >= 0)
2763     {
2764     ev_io_set (&once->io, fd, events);
2765     ev_io_start (EV_A_ &once->io);
2766     }
2767 root 1.16
2768 root 1.123 ev_init (&once->to, once_cb_to);
2769     if (timeout >= 0.)
2770     {
2771     ev_timer_set (&once->to, timeout, 0.);
2772     ev_timer_start (EV_A_ &once->to);
2773 root 1.16 }
2774     }
2775    
2776 root 1.188 #if EV_MULTIPLICITY
2777     #include "ev_wrap.h"
2778     #endif
2779    
2780 root 1.87 #ifdef __cplusplus
2781     }
2782     #endif
2783