ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev.c
(Generate patch)

Comparing libev/ev.c (file contents):
Revision 1.29 by root, Thu Nov 1 08:10:03 2007 UTC vs.
Revision 1.140 by root, Mon Nov 26 19:49:36 2007 UTC

1/* 1/*
2 * libev event processing core, watcher management
3 *
2 * Copyright (c) 2007 Marc Alexander Lehmann <libev@schmorp.de> 4 * Copyright (c) 2007 Marc Alexander Lehmann <libev@schmorp.de>
3 * All rights reserved. 5 * All rights reserved.
4 * 6 *
5 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are 8 * modification, are permitted provided that the following conditions are
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */ 30 */
31
32#ifdef __cplusplus
33extern "C" {
34#endif
35
36#ifndef EV_STANDALONE
29#if EV_USE_CONFIG_H 37# ifdef EV_CONFIG_H
38# include EV_CONFIG_H
39# else
30# include "config.h" 40# include "config.h"
41# endif
42
43# if HAVE_CLOCK_GETTIME
44# ifndef EV_USE_MONOTONIC
45# define EV_USE_MONOTONIC 1
46# endif
47# ifndef EV_USE_REALTIME
48# define EV_USE_REALTIME 1
49# endif
50# else
51# ifndef EV_USE_MONOTONIC
52# define EV_USE_MONOTONIC 0
53# endif
54# ifndef EV_USE_REALTIME
55# define EV_USE_REALTIME 0
56# endif
57# endif
58
59# ifndef EV_USE_SELECT
60# if HAVE_SELECT && HAVE_SYS_SELECT_H
61# define EV_USE_SELECT 1
62# else
63# define EV_USE_SELECT 0
64# endif
65# endif
66
67# ifndef EV_USE_POLL
68# if HAVE_POLL && HAVE_POLL_H
69# define EV_USE_POLL 1
70# else
71# define EV_USE_POLL 0
72# endif
73# endif
74
75# ifndef EV_USE_EPOLL
76# if HAVE_EPOLL_CTL && HAVE_SYS_EPOLL_H
77# define EV_USE_EPOLL 1
78# else
79# define EV_USE_EPOLL 0
80# endif
81# endif
82
83# ifndef EV_USE_KQUEUE
84# if HAVE_KQUEUE && HAVE_SYS_EVENT_H && HAVE_SYS_QUEUE_H
85# define EV_USE_KQUEUE 1
86# else
87# define EV_USE_KQUEUE 0
88# endif
89# endif
90
91# ifndef EV_USE_PORT
92# if HAVE_PORT_H && HAVE_PORT_CREATE
93# define EV_USE_PORT 1
94# else
95# define EV_USE_PORT 0
96# endif
97# endif
98
31#endif 99#endif
32 100
33#include <math.h> 101#include <math.h>
34#include <stdlib.h> 102#include <stdlib.h>
35#include <unistd.h>
36#include <fcntl.h> 103#include <fcntl.h>
37#include <signal.h>
38#include <stddef.h> 104#include <stddef.h>
39 105
40#include <stdio.h> 106#include <stdio.h>
41 107
42#include <assert.h> 108#include <assert.h>
43#include <errno.h> 109#include <errno.h>
44#include <sys/types.h> 110#include <sys/types.h>
45#include <sys/wait.h>
46#include <sys/time.h>
47#include <time.h> 111#include <time.h>
48 112
113#include <signal.h>
114
115#ifndef _WIN32
116# include <sys/time.h>
117# include <sys/wait.h>
118# include <unistd.h>
119#else
120# define WIN32_LEAN_AND_MEAN
121# include <windows.h>
122# ifndef EV_SELECT_IS_WINSOCKET
123# define EV_SELECT_IS_WINSOCKET 1
124# endif
125#endif
126
127/**/
128
49#ifndef EV_USE_MONOTONIC 129#ifndef EV_USE_MONOTONIC
50# ifdef CLOCK_MONOTONIC
51# define EV_USE_MONOTONIC 1 130# define EV_USE_MONOTONIC 0
52# endif 131#endif
132
133#ifndef EV_USE_REALTIME
134# define EV_USE_REALTIME 0
53#endif 135#endif
54 136
55#ifndef EV_USE_SELECT 137#ifndef EV_USE_SELECT
56# define EV_USE_SELECT 1 138# define EV_USE_SELECT 1
57#endif 139#endif
58 140
141#ifndef EV_USE_POLL
142# ifdef _WIN32
143# define EV_USE_POLL 0
144# else
145# define EV_USE_POLL 1
146# endif
147#endif
148
59#ifndef EV_USE_EPOLL 149#ifndef EV_USE_EPOLL
60# define EV_USE_EPOLL 0 150# define EV_USE_EPOLL 0
61#endif 151#endif
62 152
153#ifndef EV_USE_KQUEUE
154# define EV_USE_KQUEUE 0
155#endif
156
157#ifndef EV_USE_PORT
158# define EV_USE_PORT 0
159#endif
160
161/**/
162
163#ifndef CLOCK_MONOTONIC
164# undef EV_USE_MONOTONIC
165# define EV_USE_MONOTONIC 0
166#endif
167
168#ifndef CLOCK_REALTIME
63#ifndef EV_USE_REALTIME 169# undef EV_USE_REALTIME
64# define EV_USE_REALTIME 1 /* posix requirement, but might be slower */ 170# define EV_USE_REALTIME 0
65#endif 171#endif
172
173#if EV_SELECT_IS_WINSOCKET
174# include <winsock.h>
175#endif
176
177/**/
66 178
67#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */ 179#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
68#define MAX_BLOCKTIME 59.731 180#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
69#define PID_HASHSIZE 16 /* size of pid hahs table, must be power of two */ 181#define PID_HASHSIZE 16 /* size of pid hash table, must be power of two */
182/*#define CLEANUP_INTERVAL (MAX_BLOCKTIME * 5.) /* how often to try to free memory and re-check fds */
70 183
184#ifdef EV_H
185# include EV_H
186#else
71#include "ev.h" 187# include "ev.h"
188#endif
72 189
190#if __GNUC__ >= 3
191# define expect(expr,value) __builtin_expect ((expr),(value))
192# define inline_size static inline /* inline for codesize */
193# if EV_MINIMAL
194# define noinline __attribute__ ((noinline))
195# define inline_speed static noinline
196# else
197# define noinline
198# define inline_speed static inline
199# endif
200#else
201# define expect(expr,value) (expr)
202# define inline_speed static
203# define inline_minimal static
204# define noinline
205#endif
206
207#define expect_false(expr) expect ((expr) != 0, 0)
208#define expect_true(expr) expect ((expr) != 0, 1)
209
210#define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)
211#define ABSPRI(w) ((w)->priority - EV_MINPRI)
212
213#define EMPTY0 /* required for microsofts broken pseudo-c compiler */
214#define EMPTY2(a,b) /* used to suppress some warnings */
215
73typedef struct ev_watcher *W; 216typedef ev_watcher *W;
74typedef struct ev_watcher_list *WL; 217typedef ev_watcher_list *WL;
75typedef struct ev_watcher_time *WT; 218typedef ev_watcher_time *WT;
76 219
77static ev_tstamp now, diff; /* monotonic clock */ 220static int have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work? */
78ev_tstamp ev_now;
79int ev_method;
80 221
81static int have_monotonic; /* runtime */ 222#ifdef _WIN32
82 223# include "ev_win32.c"
83static ev_tstamp method_fudge; /* stupid epoll-returns-early bug */ 224#endif
84static void (*method_modify)(int fd, int oev, int nev);
85static void (*method_poll)(ev_tstamp timeout);
86 225
87/*****************************************************************************/ 226/*****************************************************************************/
88 227
89ev_tstamp 228static void (*syserr_cb)(const char *msg);
229
230void ev_set_syserr_cb (void (*cb)(const char *msg))
231{
232 syserr_cb = cb;
233}
234
235static void
236syserr (const char *msg)
237{
238 if (!msg)
239 msg = "(libev) system error";
240
241 if (syserr_cb)
242 syserr_cb (msg);
243 else
244 {
245 perror (msg);
246 abort ();
247 }
248}
249
250static void *(*alloc)(void *ptr, long size);
251
252void ev_set_allocator (void *(*cb)(void *ptr, long size))
253{
254 alloc = cb;
255}
256
257static void *
258ev_realloc (void *ptr, long size)
259{
260 ptr = alloc ? alloc (ptr, size) : realloc (ptr, size);
261
262 if (!ptr && size)
263 {
264 fprintf (stderr, "libev: cannot allocate %ld bytes, aborting.", size);
265 abort ();
266 }
267
268 return ptr;
269}
270
271#define ev_malloc(size) ev_realloc (0, (size))
272#define ev_free(ptr) ev_realloc ((ptr), 0)
273
274/*****************************************************************************/
275
276typedef struct
277{
278 WL head;
279 unsigned char events;
280 unsigned char reify;
281#if EV_SELECT_IS_WINSOCKET
282 SOCKET handle;
283#endif
284} ANFD;
285
286typedef struct
287{
288 W w;
289 int events;
290} ANPENDING;
291
292#if EV_MULTIPLICITY
293
294 struct ev_loop
295 {
296 ev_tstamp ev_rt_now;
297 #define ev_rt_now ((loop)->ev_rt_now)
298 #define VAR(name,decl) decl;
299 #include "ev_vars.h"
300 #undef VAR
301 };
302 #include "ev_wrap.h"
303
304 static struct ev_loop default_loop_struct;
305 struct ev_loop *ev_default_loop_ptr;
306
307#else
308
309 ev_tstamp ev_rt_now;
310 #define VAR(name,decl) static decl;
311 #include "ev_vars.h"
312 #undef VAR
313
314 static int ev_default_loop_ptr;
315
316#endif
317
318/*****************************************************************************/
319
320ev_tstamp noinline
90ev_time (void) 321ev_time (void)
91{ 322{
92#if EV_USE_REALTIME 323#if EV_USE_REALTIME
93 struct timespec ts; 324 struct timespec ts;
94 clock_gettime (CLOCK_REALTIME, &ts); 325 clock_gettime (CLOCK_REALTIME, &ts);
98 gettimeofday (&tv, 0); 329 gettimeofday (&tv, 0);
99 return tv.tv_sec + tv.tv_usec * 1e-6; 330 return tv.tv_sec + tv.tv_usec * 1e-6;
100#endif 331#endif
101} 332}
102 333
103static ev_tstamp 334ev_tstamp inline_size
104get_clock (void) 335get_clock (void)
105{ 336{
106#if EV_USE_MONOTONIC 337#if EV_USE_MONOTONIC
107 if (have_monotonic) 338 if (expect_true (have_monotonic))
108 { 339 {
109 struct timespec ts; 340 struct timespec ts;
110 clock_gettime (CLOCK_MONOTONIC, &ts); 341 clock_gettime (CLOCK_MONOTONIC, &ts);
111 return ts.tv_sec + ts.tv_nsec * 1e-9; 342 return ts.tv_sec + ts.tv_nsec * 1e-9;
112 } 343 }
113#endif 344#endif
114 345
115 return ev_time (); 346 return ev_time ();
116} 347}
117 348
349#if EV_MULTIPLICITY
350ev_tstamp
351ev_now (EV_P)
352{
353 return ev_rt_now;
354}
355#endif
356
118#define array_nextsize(n) (((n) << 1) | 4 & ~3) 357#define array_roundsize(type,n) (((n) | 4) & ~3)
119#define array_prevsize(n) (((n) >> 1) | 4 & ~3)
120 358
121#define array_needsize(base,cur,cnt,init) \ 359#define array_needsize(type,base,cur,cnt,init) \
122 if ((cnt) > cur) \ 360 if (expect_false ((cnt) > cur)) \
123 { \ 361 { \
124 int newcnt = cur; \ 362 int newcnt = cur; \
125 do \ 363 do \
126 { \ 364 { \
127 newcnt = array_nextsize (newcnt); \ 365 newcnt = array_roundsize (type, newcnt << 1); \
128 } \ 366 } \
129 while ((cnt) > newcnt); \ 367 while ((cnt) > newcnt); \
130 \ 368 \
131 base = realloc (base, sizeof (*base) * (newcnt)); \ 369 base = (type *)ev_realloc (base, sizeof (type) * (newcnt));\
132 init (base + cur, newcnt - cur); \ 370 init (base + cur, newcnt - cur); \
133 cur = newcnt; \ 371 cur = newcnt; \
134 } 372 }
373
374#define array_slim(type,stem) \
375 if (stem ## max < array_roundsize (stem ## cnt >> 2)) \
376 { \
377 stem ## max = array_roundsize (stem ## cnt >> 1); \
378 base = (type *)ev_realloc (base, sizeof (type) * (stem ## max));\
379 fprintf (stderr, "slimmed down " # stem " to %d\n", stem ## max);/*D*/\
380 }
381
382#define array_free(stem, idx) \
383 ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0;
135 384
136/*****************************************************************************/ 385/*****************************************************************************/
137 386
138typedef struct 387void inline_size
139{
140 struct ev_io *head;
141 int events;
142} ANFD;
143
144static ANFD *anfds;
145static int anfdmax;
146
147static void
148anfds_init (ANFD *base, int count) 388anfds_init (ANFD *base, int count)
149{ 389{
150 while (count--) 390 while (count--)
151 { 391 {
152 base->head = 0; 392 base->head = 0;
153 base->events = EV_NONE; 393 base->events = EV_NONE;
394 base->reify = 0;
395
154 ++base; 396 ++base;
155 } 397 }
156} 398}
157 399
158typedef struct 400void noinline
401ev_feed_event (EV_P_ void *w, int revents)
159{ 402{
160 W w; 403 W w_ = (W)w;
161 int events;
162} ANPENDING;
163 404
164static ANPENDING *pendings; 405 if (expect_false (w_->pending))
165static int pendingmax, pendingcnt; 406 {
407 pendings [ABSPRI (w_)][w_->pending - 1].events |= revents;
408 return;
409 }
410
411 w_->pending = ++pendingcnt [ABSPRI (w_)];
412 array_needsize (ANPENDING, pendings [ABSPRI (w_)], pendingmax [ABSPRI (w_)], pendingcnt [ABSPRI (w_)], EMPTY2);
413 pendings [ABSPRI (w_)][w_->pending - 1].w = w_;
414 pendings [ABSPRI (w_)][w_->pending - 1].events = revents;
415}
166 416
167static void 417static void
168event (W w, int events)
169{
170 if (w->active)
171 {
172 w->pending = ++pendingcnt;
173 array_needsize (pendings, pendingmax, pendingcnt, );
174 pendings [pendingcnt - 1].w = w;
175 pendings [pendingcnt - 1].events = events;
176 }
177}
178
179static void
180queue_events (W *events, int eventcnt, int type) 418queue_events (EV_P_ W *events, int eventcnt, int type)
181{ 419{
182 int i; 420 int i;
183 421
184 for (i = 0; i < eventcnt; ++i) 422 for (i = 0; i < eventcnt; ++i)
185 event (events [i], type); 423 ev_feed_event (EV_A_ events [i], type);
186} 424}
187 425
188static void 426void inline_speed
189fd_event (int fd, int events) 427fd_event (EV_P_ int fd, int revents)
190{ 428{
191 ANFD *anfd = anfds + fd; 429 ANFD *anfd = anfds + fd;
192 struct ev_io *w; 430 ev_io *w;
193 431
194 for (w = anfd->head; w; w = w->next) 432 for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next)
195 { 433 {
196 int ev = w->events & events; 434 int ev = w->events & revents;
197 435
198 if (ev) 436 if (ev)
199 event ((W)w, ev); 437 ev_feed_event (EV_A_ (W)w, ev);
200 } 438 }
439}
440
441void
442ev_feed_fd_event (EV_P_ int fd, int revents)
443{
444 fd_event (EV_A_ fd, revents);
201} 445}
202 446
203/*****************************************************************************/ 447/*****************************************************************************/
204 448
205static int *fdchanges; 449void inline_size
206static int fdchangemax, fdchangecnt; 450fd_reify (EV_P)
207
208static void
209fd_reify (void)
210{ 451{
211 int i; 452 int i;
212 453
213 for (i = 0; i < fdchangecnt; ++i) 454 for (i = 0; i < fdchangecnt; ++i)
214 { 455 {
215 int fd = fdchanges [i]; 456 int fd = fdchanges [i];
216 ANFD *anfd = anfds + fd; 457 ANFD *anfd = anfds + fd;
217 struct ev_io *w; 458 ev_io *w;
218 459
219 int events = 0; 460 int events = 0;
220 461
221 for (w = anfd->head; w; w = w->next) 462 for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next)
222 events |= w->events; 463 events |= w->events;
223 464
224 anfd->events &= ~EV_REIFY; 465#if EV_SELECT_IS_WINSOCKET
225 466 if (events)
226 if (anfd->events != events)
227 { 467 {
228 method_modify (fd, anfd->events, events); 468 unsigned long argp;
229 anfd->events = events; 469 anfd->handle = _get_osfhandle (fd);
470 assert (("libev only supports socket fds in this configuration", ioctlsocket (anfd->handle, FIONREAD, &argp) == 0));
230 } 471 }
472#endif
473
474 anfd->reify = 0;
475
476 backend_modify (EV_A_ fd, anfd->events, events);
477 anfd->events = events;
231 } 478 }
232 479
233 fdchangecnt = 0; 480 fdchangecnt = 0;
234} 481}
235 482
236static void 483void inline_size
237fd_change (int fd) 484fd_change (EV_P_ int fd)
238{ 485{
239 if (anfds [fd].events & EV_REIFY || fdchangecnt < 0) 486 if (expect_false (anfds [fd].reify))
240 return; 487 return;
241 488
242 anfds [fd].events |= EV_REIFY; 489 anfds [fd].reify = 1;
243 490
244 ++fdchangecnt; 491 ++fdchangecnt;
245 array_needsize (fdchanges, fdchangemax, fdchangecnt, ); 492 array_needsize (int, fdchanges, fdchangemax, fdchangecnt, EMPTY2);
246 fdchanges [fdchangecnt - 1] = fd; 493 fdchanges [fdchangecnt - 1] = fd;
247} 494}
248 495
496void inline_speed
497fd_kill (EV_P_ int fd)
498{
499 ev_io *w;
500
501 while ((w = (ev_io *)anfds [fd].head))
502 {
503 ev_io_stop (EV_A_ w);
504 ev_feed_event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE);
505 }
506}
507
508int inline_size
509fd_valid (int fd)
510{
511#ifdef _WIN32
512 return _get_osfhandle (fd) != -1;
513#else
514 return fcntl (fd, F_GETFD) != -1;
515#endif
516}
517
249/* called on EBADF to verify fds */ 518/* called on EBADF to verify fds */
250static void 519static void noinline
251fd_recheck (void) 520fd_ebadf (EV_P)
252{ 521{
253 int fd; 522 int fd;
254 523
255 for (fd = 0; fd < anfdmax; ++fd) 524 for (fd = 0; fd < anfdmax; ++fd)
256 if (anfds [fd].events) 525 if (anfds [fd].events)
257 if (fcntl (fd, F_GETFD) == -1 && errno == EBADF) 526 if (!fd_valid (fd) == -1 && errno == EBADF)
258 while (anfds [fd].head) 527 fd_kill (EV_A_ fd);
528}
529
530/* called on ENOMEM in select/poll to kill some fds and retry */
531static void noinline
532fd_enomem (EV_P)
533{
534 int fd;
535
536 for (fd = anfdmax; fd--; )
537 if (anfds [fd].events)
259 { 538 {
260 event ((W)anfds [fd].head, EV_ERROR | EV_READ | EV_WRITE | EV_TIMEOUT); 539 fd_kill (EV_A_ fd);
261 ev_io_stop (anfds [fd].head); 540 return;
262 } 541 }
542}
543
544/* usually called after fork if backend needs to re-arm all fds from scratch */
545static void noinline
546fd_rearm_all (EV_P)
547{
548 int fd;
549
550 /* this should be highly optimised to not do anything but set a flag */
551 for (fd = 0; fd < anfdmax; ++fd)
552 if (anfds [fd].events)
553 {
554 anfds [fd].events = 0;
555 fd_change (EV_A_ fd);
556 }
263} 557}
264 558
265/*****************************************************************************/ 559/*****************************************************************************/
266 560
267static struct ev_timer **timers; 561void inline_speed
268static int timermax, timercnt;
269
270static struct ev_periodic **periodics;
271static int periodicmax, periodiccnt;
272
273static void
274upheap (WT *timers, int k) 562upheap (WT *heap, int k)
275{ 563{
276 WT w = timers [k]; 564 WT w = heap [k];
277 565
278 while (k && timers [k >> 1]->at > w->at) 566 while (k && heap [k >> 1]->at > w->at)
279 { 567 {
280 timers [k] = timers [k >> 1]; 568 heap [k] = heap [k >> 1];
281 timers [k]->active = k + 1; 569 ((W)heap [k])->active = k + 1;
282 k >>= 1; 570 k >>= 1;
283 } 571 }
284 572
285 timers [k] = w; 573 heap [k] = w;
286 timers [k]->active = k + 1; 574 ((W)heap [k])->active = k + 1;
287 575
288} 576}
289 577
290static void 578void inline_speed
291downheap (WT *timers, int N, int k) 579downheap (WT *heap, int N, int k)
292{ 580{
293 WT w = timers [k]; 581 WT w = heap [k];
294 582
295 while (k < (N >> 1)) 583 while (k < (N >> 1))
296 { 584 {
297 int j = k << 1; 585 int j = k << 1;
298 586
299 if (j + 1 < N && timers [j]->at > timers [j + 1]->at) 587 if (j + 1 < N && heap [j]->at > heap [j + 1]->at)
300 ++j; 588 ++j;
301 589
302 if (w->at <= timers [j]->at) 590 if (w->at <= heap [j]->at)
303 break; 591 break;
304 592
305 timers [k] = timers [j]; 593 heap [k] = heap [j];
306 timers [k]->active = k + 1; 594 ((W)heap [k])->active = k + 1;
307 k = j; 595 k = j;
308 } 596 }
309 597
310 timers [k] = w; 598 heap [k] = w;
311 timers [k]->active = k + 1; 599 ((W)heap [k])->active = k + 1;
600}
601
602void inline_size
603adjustheap (WT *heap, int N, int k)
604{
605 upheap (heap, k);
606 downheap (heap, N, k);
312} 607}
313 608
314/*****************************************************************************/ 609/*****************************************************************************/
315 610
316typedef struct 611typedef struct
317{ 612{
318 struct ev_signal *head; 613 WL head;
319 sig_atomic_t gotsig; 614 sig_atomic_t volatile gotsig;
320} ANSIG; 615} ANSIG;
321 616
322static ANSIG *signals; 617static ANSIG *signals;
323static int signalmax; 618static int signalmax;
324 619
325static int sigpipe [2]; 620static int sigpipe [2];
326static sig_atomic_t gotsig; 621static sig_atomic_t volatile gotsig;
327static struct ev_io sigev; 622static ev_io sigev;
328 623
329static void 624void inline_size
330signals_init (ANSIG *base, int count) 625signals_init (ANSIG *base, int count)
331{ 626{
332 while (count--) 627 while (count--)
333 { 628 {
334 base->head = 0; 629 base->head = 0;
335 base->gotsig = 0; 630 base->gotsig = 0;
631
336 ++base; 632 ++base;
337 } 633 }
338} 634}
339 635
340static void 636static void
341sighandler (int signum) 637sighandler (int signum)
342{ 638{
639#if _WIN32
640 signal (signum, sighandler);
641#endif
642
343 signals [signum - 1].gotsig = 1; 643 signals [signum - 1].gotsig = 1;
344 644
345 if (!gotsig) 645 if (!gotsig)
346 { 646 {
647 int old_errno = errno;
347 gotsig = 1; 648 gotsig = 1;
348 write (sigpipe [1], &gotsig, 1); 649 write (sigpipe [1], &signum, 1);
650 errno = old_errno;
349 } 651 }
652}
653
654void noinline
655ev_feed_signal_event (EV_P_ int signum)
656{
657 WL w;
658
659#if EV_MULTIPLICITY
660 assert (("feeding signal events is only supported in the default loop", loop == ev_default_loop_ptr));
661#endif
662
663 --signum;
664
665 if (signum < 0 || signum >= signalmax)
666 return;
667
668 signals [signum].gotsig = 0;
669
670 for (w = signals [signum].head; w; w = w->next)
671 ev_feed_event (EV_A_ (W)w, EV_SIGNAL);
350} 672}
351 673
352static void 674static void
353sigcb (struct ev_io *iow, int revents) 675sigcb (EV_P_ ev_io *iow, int revents)
354{ 676{
355 struct ev_signal *w;
356 int sig; 677 int signum;
357 678
679 read (sigpipe [0], &revents, 1);
358 gotsig = 0; 680 gotsig = 0;
359 read (sigpipe [0], &revents, 1);
360 681
361 for (sig = signalmax; sig--; ) 682 for (signum = signalmax; signum--; )
362 if (signals [sig].gotsig) 683 if (signals [signum].gotsig)
363 { 684 ev_feed_signal_event (EV_A_ signum + 1);
364 signals [sig].gotsig = 0;
365
366 for (w = signals [sig].head; w; w = w->next)
367 event ((W)w, EV_SIGNAL);
368 }
369} 685}
370 686
371static void 687void inline_size
372siginit (void) 688fd_intern (int fd)
373{ 689{
690#ifdef _WIN32
691 int arg = 1;
692 ioctlsocket (_get_osfhandle (fd), FIONBIO, &arg);
693#else
374 fcntl (sigpipe [0], F_SETFD, FD_CLOEXEC); 694 fcntl (fd, F_SETFD, FD_CLOEXEC);
375 fcntl (sigpipe [1], F_SETFD, FD_CLOEXEC);
376
377 /* rather than sort out wether we really need nb, set it */
378 fcntl (sigpipe [0], F_SETFL, O_NONBLOCK); 695 fcntl (fd, F_SETFL, O_NONBLOCK);
379 fcntl (sigpipe [1], F_SETFL, O_NONBLOCK); 696#endif
697}
698
699static void noinline
700siginit (EV_P)
701{
702 fd_intern (sigpipe [0]);
703 fd_intern (sigpipe [1]);
380 704
381 ev_io_set (&sigev, sigpipe [0], EV_READ); 705 ev_io_set (&sigev, sigpipe [0], EV_READ);
382 ev_io_start (&sigev); 706 ev_io_start (EV_A_ &sigev);
707 ev_unref (EV_A); /* child watcher should not keep loop alive */
383} 708}
384 709
385/*****************************************************************************/ 710/*****************************************************************************/
386 711
387static struct ev_idle **idles;
388static int idlemax, idlecnt;
389
390static struct ev_prepare **prepares;
391static int preparemax, preparecnt;
392
393static struct ev_check **checks;
394static int checkmax, checkcnt;
395
396/*****************************************************************************/
397
398static struct ev_child *childs [PID_HASHSIZE]; 712static ev_child *childs [PID_HASHSIZE];
713
714#ifndef _WIN32
715
399static struct ev_signal childev; 716static ev_signal childev;
400 717
401#ifndef WCONTINUED 718#ifndef WCONTINUED
402# define WCONTINUED 0 719# define WCONTINUED 0
403#endif 720#endif
404 721
722void inline_speed
723child_reap (EV_P_ ev_signal *sw, int chain, int pid, int status)
724{
725 ev_child *w;
726
727 for (w = (ev_child *)childs [chain & (PID_HASHSIZE - 1)]; w; w = (ev_child *)((WL)w)->next)
728 if (w->pid == pid || !w->pid)
729 {
730 ev_priority (w) = ev_priority (sw); /* need to do it *now* */
731 w->rpid = pid;
732 w->rstatus = status;
733 ev_feed_event (EV_A_ (W)w, EV_CHILD);
734 }
735}
736
405static void 737static void
406childcb (struct ev_signal *sw, int revents) 738childcb (EV_P_ ev_signal *sw, int revents)
407{ 739{
408 struct ev_child *w;
409 int pid, status; 740 int pid, status;
410 741
411 while ((pid = waitpid (-1, &status, WNOHANG | WUNTRACED | WCONTINUED)) != -1) 742 if (0 < (pid = waitpid (-1, &status, WNOHANG | WUNTRACED | WCONTINUED)))
412 for (w = childs [pid & (PID_HASHSIZE - 1)]; w; w = w->next) 743 {
413 if (w->pid == pid || w->pid == -1) 744 /* make sure we are called again until all childs have been reaped */
414 { 745 /* we need to do it this way so that the callback gets called before we continue */
415 w->status = status; 746 ev_feed_event (EV_A_ (W)sw, EV_SIGNAL);
416 event ((W)w, EV_CHILD); 747
417 } 748 child_reap (EV_A_ sw, pid, pid, status);
749 child_reap (EV_A_ sw, 0, pid, status); /* this might trigger a watcher twice, but feed_event catches that */
750 }
418} 751}
752
753#endif
419 754
420/*****************************************************************************/ 755/*****************************************************************************/
421 756
757#if EV_USE_PORT
758# include "ev_port.c"
759#endif
760#if EV_USE_KQUEUE
761# include "ev_kqueue.c"
762#endif
422#if EV_USE_EPOLL 763#if EV_USE_EPOLL
423# include "ev_epoll.c" 764# include "ev_epoll.c"
424#endif 765#endif
766#if EV_USE_POLL
767# include "ev_poll.c"
768#endif
425#if EV_USE_SELECT 769#if EV_USE_SELECT
426# include "ev_select.c" 770# include "ev_select.c"
427#endif 771#endif
428 772
429int 773int
436ev_version_minor (void) 780ev_version_minor (void)
437{ 781{
438 return EV_VERSION_MINOR; 782 return EV_VERSION_MINOR;
439} 783}
440 784
441int ev_init (int flags) 785/* return true if we are running with elevated privileges and should ignore env variables */
786int inline_size
787enable_secure (void)
442{ 788{
443 if (!ev_method) 789#ifdef _WIN32
790 return 0;
791#else
792 return getuid () != geteuid ()
793 || getgid () != getegid ();
794#endif
795}
796
797unsigned int
798ev_supported_backends (void)
799{
800 unsigned int flags = 0;
801
802 if (EV_USE_PORT ) flags |= EVBACKEND_PORT;
803 if (EV_USE_KQUEUE) flags |= EVBACKEND_KQUEUE;
804 if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL;
805 if (EV_USE_POLL ) flags |= EVBACKEND_POLL;
806 if (EV_USE_SELECT) flags |= EVBACKEND_SELECT;
807
808 return flags;
809}
810
811unsigned int
812ev_recommended_backends (void)
813{
814 unsigned int flags = ev_supported_backends ();
815
816#ifndef __NetBSD__
817 /* kqueue is borked on everything but netbsd apparently */
818 /* it usually doesn't work correctly on anything but sockets and pipes */
819 flags &= ~EVBACKEND_KQUEUE;
820#endif
821#ifdef __APPLE__
822 // flags &= ~EVBACKEND_KQUEUE; for documentation
823 flags &= ~EVBACKEND_POLL;
824#endif
825
826 return flags;
827}
828
829unsigned int
830ev_embeddable_backends (void)
831{
832 return EVBACKEND_EPOLL
833 | EVBACKEND_KQUEUE
834 | EVBACKEND_PORT;
835}
836
837unsigned int
838ev_backend (EV_P)
839{
840 return backend;
841}
842
843static void
844loop_init (EV_P_ unsigned int flags)
845{
846 if (!backend)
444 { 847 {
445#if EV_USE_MONOTONIC 848#if EV_USE_MONOTONIC
446 { 849 {
447 struct timespec ts; 850 struct timespec ts;
448 if (!clock_gettime (CLOCK_MONOTONIC, &ts)) 851 if (!clock_gettime (CLOCK_MONOTONIC, &ts))
449 have_monotonic = 1; 852 have_monotonic = 1;
450 } 853 }
451#endif 854#endif
452 855
453 ev_now = ev_time (); 856 ev_rt_now = ev_time ();
454 now = get_clock (); 857 mn_now = get_clock ();
858 now_floor = mn_now;
455 diff = ev_now - now; 859 rtmn_diff = ev_rt_now - mn_now;
456 860
457 if (pipe (sigpipe)) 861 if (!(flags & EVFLAG_NOENV)
458 return 0; 862 && !enable_secure ()
863 && getenv ("LIBEV_FLAGS"))
864 flags = atoi (getenv ("LIBEV_FLAGS"));
459 865
460 ev_method = EVMETHOD_NONE; 866 if (!(flags & 0x0000ffffUL))
867 flags |= ev_recommended_backends ();
868
869 backend = 0;
870#if EV_USE_PORT
871 if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags);
872#endif
873#if EV_USE_KQUEUE
874 if (!backend && (flags & EVBACKEND_KQUEUE)) backend = kqueue_init (EV_A_ flags);
875#endif
461#if EV_USE_EPOLL 876#if EV_USE_EPOLL
462 if (ev_method == EVMETHOD_NONE) epoll_init (flags); 877 if (!backend && (flags & EVBACKEND_EPOLL )) backend = epoll_init (EV_A_ flags);
878#endif
879#if EV_USE_POLL
880 if (!backend && (flags & EVBACKEND_POLL )) backend = poll_init (EV_A_ flags);
463#endif 881#endif
464#if EV_USE_SELECT 882#if EV_USE_SELECT
465 if (ev_method == EVMETHOD_NONE) select_init (flags); 883 if (!backend && (flags & EVBACKEND_SELECT)) backend = select_init (EV_A_ flags);
466#endif 884#endif
467 885
468 if (ev_method) 886 ev_init (&sigev, sigcb);
887 ev_set_priority (&sigev, EV_MAXPRI);
888 }
889}
890
891static void
892loop_destroy (EV_P)
893{
894 int i;
895
896#if EV_USE_PORT
897 if (backend == EVBACKEND_PORT ) port_destroy (EV_A);
898#endif
899#if EV_USE_KQUEUE
900 if (backend == EVBACKEND_KQUEUE) kqueue_destroy (EV_A);
901#endif
902#if EV_USE_EPOLL
903 if (backend == EVBACKEND_EPOLL ) epoll_destroy (EV_A);
904#endif
905#if EV_USE_POLL
906 if (backend == EVBACKEND_POLL ) poll_destroy (EV_A);
907#endif
908#if EV_USE_SELECT
909 if (backend == EVBACKEND_SELECT) select_destroy (EV_A);
910#endif
911
912 for (i = NUMPRI; i--; )
913 array_free (pending, [i]);
914
915 /* have to use the microsoft-never-gets-it-right macro */
916 array_free (fdchange, EMPTY0);
917 array_free (timer, EMPTY0);
918#if EV_PERIODIC_ENABLE
919 array_free (periodic, EMPTY0);
920#endif
921 array_free (idle, EMPTY0);
922 array_free (prepare, EMPTY0);
923 array_free (check, EMPTY0);
924
925 backend = 0;
926}
927
928static void
929loop_fork (EV_P)
930{
931#if EV_USE_PORT
932 if (backend == EVBACKEND_PORT ) port_fork (EV_A);
933#endif
934#if EV_USE_KQUEUE
935 if (backend == EVBACKEND_KQUEUE) kqueue_fork (EV_A);
936#endif
937#if EV_USE_EPOLL
938 if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A);
939#endif
940
941 if (ev_is_active (&sigev))
942 {
943 /* default loop */
944
945 ev_ref (EV_A);
946 ev_io_stop (EV_A_ &sigev);
947 close (sigpipe [0]);
948 close (sigpipe [1]);
949
950 while (pipe (sigpipe))
951 syserr ("(libev) error creating pipe");
952
953 siginit (EV_A);
954 }
955
956 postfork = 0;
957}
958
959#if EV_MULTIPLICITY
960struct ev_loop *
961ev_loop_new (unsigned int flags)
962{
963 struct ev_loop *loop = (struct ev_loop *)ev_malloc (sizeof (struct ev_loop));
964
965 memset (loop, 0, sizeof (struct ev_loop));
966
967 loop_init (EV_A_ flags);
968
969 if (ev_backend (EV_A))
970 return loop;
971
972 return 0;
973}
974
975void
976ev_loop_destroy (EV_P)
977{
978 loop_destroy (EV_A);
979 ev_free (loop);
980}
981
982void
983ev_loop_fork (EV_P)
984{
985 postfork = 1;
986}
987
988#endif
989
990#if EV_MULTIPLICITY
991struct ev_loop *
992ev_default_loop_init (unsigned int flags)
993#else
994int
995ev_default_loop (unsigned int flags)
996#endif
997{
998 if (sigpipe [0] == sigpipe [1])
999 if (pipe (sigpipe))
1000 return 0;
1001
1002 if (!ev_default_loop_ptr)
1003 {
1004#if EV_MULTIPLICITY
1005 struct ev_loop *loop = ev_default_loop_ptr = &default_loop_struct;
1006#else
1007 ev_default_loop_ptr = 1;
1008#endif
1009
1010 loop_init (EV_A_ flags);
1011
1012 if (ev_backend (EV_A))
469 { 1013 {
470 ev_watcher_init (&sigev, sigcb);
471 siginit (); 1014 siginit (EV_A);
472 1015
1016#ifndef _WIN32
473 ev_signal_init (&childev, childcb, SIGCHLD); 1017 ev_signal_init (&childev, childcb, SIGCHLD);
1018 ev_set_priority (&childev, EV_MAXPRI);
474 ev_signal_start (&childev); 1019 ev_signal_start (EV_A_ &childev);
1020 ev_unref (EV_A); /* child watcher should not keep loop alive */
1021#endif
475 } 1022 }
1023 else
1024 ev_default_loop_ptr = 0;
476 } 1025 }
477 1026
478 return ev_method; 1027 return ev_default_loop_ptr;
1028}
1029
1030void
1031ev_default_destroy (void)
1032{
1033#if EV_MULTIPLICITY
1034 struct ev_loop *loop = ev_default_loop_ptr;
1035#endif
1036
1037#ifndef _WIN32
1038 ev_ref (EV_A); /* child watcher */
1039 ev_signal_stop (EV_A_ &childev);
1040#endif
1041
1042 ev_ref (EV_A); /* signal watcher */
1043 ev_io_stop (EV_A_ &sigev);
1044
1045 close (sigpipe [0]); sigpipe [0] = 0;
1046 close (sigpipe [1]); sigpipe [1] = 0;
1047
1048 loop_destroy (EV_A);
1049}
1050
1051void
1052ev_default_fork (void)
1053{
1054#if EV_MULTIPLICITY
1055 struct ev_loop *loop = ev_default_loop_ptr;
1056#endif
1057
1058 if (backend)
1059 postfork = 1;
479} 1060}
480 1061
481/*****************************************************************************/ 1062/*****************************************************************************/
482 1063
483void 1064int inline_size
484ev_prefork (void) 1065any_pending (EV_P)
485{ 1066{
486 /* nop */ 1067 int pri;
487}
488 1068
489void 1069 for (pri = NUMPRI; pri--; )
490ev_postfork_parent (void) 1070 if (pendingcnt [pri])
491{ 1071 return 1;
492 /* nop */
493}
494 1072
495void 1073 return 0;
496ev_postfork_child (void)
497{
498#if EV_USE_EPOLL
499 if (ev_method == EVMETHOD_EPOLL)
500 epoll_postfork_child ();
501#endif
502
503 ev_io_stop (&sigev);
504 close (sigpipe [0]);
505 close (sigpipe [1]);
506 pipe (sigpipe);
507 siginit ();
508} 1074}
509 1075
510/*****************************************************************************/ 1076void inline_speed
511
512static void
513call_pending (void) 1077call_pending (EV_P)
514{ 1078{
1079 int pri;
1080
1081 for (pri = NUMPRI; pri--; )
515 while (pendingcnt) 1082 while (pendingcnt [pri])
516 { 1083 {
517 ANPENDING *p = pendings + --pendingcnt; 1084 ANPENDING *p = pendings [pri] + --pendingcnt [pri];
518 1085
519 if (p->w) 1086 if (expect_true (p->w))
520 { 1087 {
1088 assert (("non-pending watcher on pending list", p->w->pending));
1089
521 p->w->pending = 0; 1090 p->w->pending = 0;
522 p->w->cb (p->w, p->events); 1091 EV_CB_INVOKE (p->w, p->events);
523 } 1092 }
524 } 1093 }
525} 1094}
526 1095
527static void 1096void inline_size
528timers_reify (void) 1097timers_reify (EV_P)
529{ 1098{
530 while (timercnt && timers [0]->at <= now) 1099 while (timercnt && ((WT)timers [0])->at <= mn_now)
531 { 1100 {
532 struct ev_timer *w = timers [0]; 1101 ev_timer *w = timers [0];
533 1102
534 event ((W)w, EV_TIMEOUT); 1103 assert (("inactive timer on timer heap detected", ev_is_active (w)));
535 1104
536 /* first reschedule or stop timer */ 1105 /* first reschedule or stop timer */
537 if (w->repeat) 1106 if (w->repeat)
538 { 1107 {
1108 assert (("negative ev_timer repeat value found while processing timers", w->repeat > 0.));
1109
539 w->at = now + w->repeat; 1110 ((WT)w)->at += w->repeat;
540 assert (("timer timeout in the past, negative repeat?", w->at > now)); 1111 if (((WT)w)->at < mn_now)
1112 ((WT)w)->at = mn_now;
1113
541 downheap ((WT *)timers, timercnt, 0); 1114 downheap ((WT *)timers, timercnt, 0);
542 } 1115 }
543 else 1116 else
544 ev_timer_stop (w); /* nonrepeating: stop timer */ 1117 ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */
545 }
546}
547 1118
548static void 1119 ev_feed_event (EV_A_ (W)w, EV_TIMEOUT);
1120 }
1121}
1122
1123#if EV_PERIODIC_ENABLE
1124void inline_size
549periodics_reify (void) 1125periodics_reify (EV_P)
550{ 1126{
551 while (periodiccnt && periodics [0]->at <= ev_now) 1127 while (periodiccnt && ((WT)periodics [0])->at <= ev_rt_now)
552 { 1128 {
553 struct ev_periodic *w = periodics [0]; 1129 ev_periodic *w = periodics [0];
1130
1131 assert (("inactive timer on periodic heap detected", ev_is_active (w)));
554 1132
555 /* first reschedule or stop timer */ 1133 /* first reschedule or stop timer */
556 if (w->interval) 1134 if (w->reschedule_cb)
557 { 1135 {
1136 ((WT)w)->at = w->reschedule_cb (w, ev_rt_now + 0.0001);
1137 assert (("ev_periodic reschedule callback returned time in the past", ((WT)w)->at > ev_rt_now));
1138 downheap ((WT *)periodics, periodiccnt, 0);
1139 }
1140 else if (w->interval)
1141 {
558 w->at += floor ((ev_now - w->at) / w->interval + 1.) * w->interval; 1142 ((WT)w)->at += floor ((ev_rt_now - ((WT)w)->at) / w->interval + 1.) * w->interval;
559 assert (("periodic timeout in the past, negative interval?", w->at > ev_now)); 1143 assert (("ev_periodic timeout in the past detected while processing timers, negative interval?", ((WT)w)->at > ev_rt_now));
560 downheap ((WT *)periodics, periodiccnt, 0); 1144 downheap ((WT *)periodics, periodiccnt, 0);
561 } 1145 }
562 else 1146 else
563 ev_periodic_stop (w); /* nonrepeating: stop timer */ 1147 ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */
564 1148
565 event ((W)w, EV_TIMEOUT); 1149 ev_feed_event (EV_A_ (W)w, EV_PERIODIC);
566 } 1150 }
567} 1151}
568 1152
569static void 1153static void noinline
570periodics_reschedule (ev_tstamp diff) 1154periodics_reschedule (EV_P)
571{ 1155{
572 int i; 1156 int i;
573 1157
574 /* adjust periodics after time jump */ 1158 /* adjust periodics after time jump */
575 for (i = 0; i < periodiccnt; ++i) 1159 for (i = 0; i < periodiccnt; ++i)
576 { 1160 {
577 struct ev_periodic *w = periodics [i]; 1161 ev_periodic *w = periodics [i];
578 1162
1163 if (w->reschedule_cb)
1164 ((WT)w)->at = w->reschedule_cb (w, ev_rt_now);
579 if (w->interval) 1165 else if (w->interval)
1166 ((WT)w)->at += ceil ((ev_rt_now - ((WT)w)->at) / w->interval) * w->interval;
1167 }
1168
1169 /* now rebuild the heap */
1170 for (i = periodiccnt >> 1; i--; )
1171 downheap ((WT *)periodics, periodiccnt, i);
1172}
1173#endif
1174
1175int inline_size
1176time_update_monotonic (EV_P)
1177{
1178 mn_now = get_clock ();
1179
1180 if (expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5))
1181 {
1182 ev_rt_now = rtmn_diff + mn_now;
1183 return 0;
1184 }
1185 else
1186 {
1187 now_floor = mn_now;
1188 ev_rt_now = ev_time ();
1189 return 1;
1190 }
1191}
1192
1193void inline_size
1194time_update (EV_P)
1195{
1196 int i;
1197
1198#if EV_USE_MONOTONIC
1199 if (expect_true (have_monotonic))
1200 {
1201 if (time_update_monotonic (EV_A))
580 { 1202 {
581 ev_tstamp diff = ceil ((ev_now - w->at) / w->interval) * w->interval; 1203 ev_tstamp odiff = rtmn_diff;
582 1204
583 if (fabs (diff) >= 1e-4) 1205 /* loop a few times, before making important decisions.
1206 * on the choice of "4": one iteration isn't enough,
1207 * in case we get preempted during the calls to
1208 * ev_time and get_clock. a second call is almost guarenteed
1209 * to succeed in that case, though. and looping a few more times
1210 * doesn't hurt either as we only do this on time-jumps or
1211 * in the unlikely event of getting preempted here.
1212 */
1213 for (i = 4; --i; )
584 { 1214 {
585 ev_periodic_stop (w); 1215 rtmn_diff = ev_rt_now - mn_now;
586 ev_periodic_start (w);
587 1216
588 i = 0; /* restart loop, inefficient, but time jumps should be rare */ 1217 if (fabs (odiff - rtmn_diff) < MIN_TIMEJUMP)
1218 return; /* all is well */
1219
1220 ev_rt_now = ev_time ();
1221 mn_now = get_clock ();
1222 now_floor = mn_now;
589 } 1223 }
1224
1225# if EV_PERIODIC_ENABLE
1226 periodics_reschedule (EV_A);
1227# endif
1228 /* no timer adjustment, as the monotonic clock doesn't jump */
1229 /* timers_reschedule (EV_A_ rtmn_diff - odiff) */
590 } 1230 }
591 } 1231 }
592} 1232 else
593 1233#endif
594static void 1234 {
595time_update (void)
596{
597 int i;
598
599 ev_now = ev_time (); 1235 ev_rt_now = ev_time ();
600 1236
601 if (have_monotonic) 1237 if (expect_false (mn_now > ev_rt_now || mn_now < ev_rt_now - MAX_BLOCKTIME - MIN_TIMEJUMP))
602 {
603 ev_tstamp odiff = diff;
604
605 for (i = 4; --i; ) /* loop a few times, before making important decisions */
606 { 1238 {
607 now = get_clock (); 1239#if EV_PERIODIC_ENABLE
608 diff = ev_now - now;
609
610 if (fabs (odiff - diff) < MIN_TIMEJUMP)
611 return; /* all is well */
612
613 ev_now = ev_time ();
614 }
615
616 periodics_reschedule (diff - odiff);
617 /* no timer adjustment, as the monotonic clock doesn't jump */
618 }
619 else
620 {
621 if (now > ev_now || now < ev_now - MAX_BLOCKTIME - MIN_TIMEJUMP)
622 {
623 periodics_reschedule (ev_now - now); 1240 periodics_reschedule (EV_A);
1241#endif
624 1242
625 /* adjust timers. this is easy, as the offset is the same for all */ 1243 /* adjust timers. this is easy, as the offset is the same for all */
626 for (i = 0; i < timercnt; ++i) 1244 for (i = 0; i < timercnt; ++i)
627 timers [i]->at += diff; 1245 ((WT)timers [i])->at += ev_rt_now - mn_now;
628 } 1246 }
629 1247
630 now = ev_now; 1248 mn_now = ev_rt_now;
631 } 1249 }
632} 1250}
633 1251
634int ev_loop_done; 1252void
1253ev_ref (EV_P)
1254{
1255 ++activecnt;
1256}
635 1257
1258void
1259ev_unref (EV_P)
1260{
1261 --activecnt;
1262}
1263
1264static int loop_done;
1265
1266void
636void ev_loop (int flags) 1267ev_loop (EV_P_ int flags)
637{ 1268{
638 double block;
639 ev_loop_done = flags & (EVLOOP_ONESHOT | EVLOOP_NONBLOCK) ? 1 : 0; 1269 loop_done = flags & (EVLOOP_ONESHOT | EVLOOP_NONBLOCK)
1270 ? EVUNLOOP_ONE
1271 : EVUNLOOP_CANCEL;
640 1272
641 do 1273 while (activecnt)
642 { 1274 {
643 /* queue check watchers (and execute them) */ 1275 /* queue check watchers (and execute them) */
644 if (preparecnt) 1276 if (expect_false (preparecnt))
645 { 1277 {
646 queue_events ((W *)prepares, preparecnt, EV_PREPARE); 1278 queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE);
647 call_pending (); 1279 call_pending (EV_A);
648 } 1280 }
649 1281
1282 /* we might have forked, so reify kernel state if necessary */
1283 if (expect_false (postfork))
1284 loop_fork (EV_A);
1285
650 /* update fd-related kernel structures */ 1286 /* update fd-related kernel structures */
651 fd_reify (); 1287 fd_reify (EV_A);
652 1288
653 /* calculate blocking time */ 1289 /* calculate blocking time */
1290 {
1291 double block;
654 1292
655 /* we only need this for !monotonic clockor timers, but as we basically
656 always have timers, we just calculate it always */
657 ev_now = ev_time ();
658
659 if (flags & EVLOOP_NONBLOCK || idlecnt) 1293 if (flags & EVLOOP_NONBLOCK || idlecnt)
660 block = 0.; 1294 block = 0.; /* do not block at all */
661 else 1295 else
662 { 1296 {
1297 /* update time to cancel out callback processing overhead */
1298#if EV_USE_MONOTONIC
1299 if (expect_true (have_monotonic))
1300 time_update_monotonic (EV_A);
1301 else
1302#endif
1303 {
1304 ev_rt_now = ev_time ();
1305 mn_now = ev_rt_now;
1306 }
1307
663 block = MAX_BLOCKTIME; 1308 block = MAX_BLOCKTIME;
664 1309
665 if (timercnt) 1310 if (timercnt)
666 { 1311 {
667 ev_tstamp to = timers [0]->at - (have_monotonic ? get_clock () : ev_now) + method_fudge; 1312 ev_tstamp to = ((WT)timers [0])->at - mn_now + backend_fudge;
668 if (block > to) block = to; 1313 if (block > to) block = to;
669 } 1314 }
670 1315
1316#if EV_PERIODIC_ENABLE
671 if (periodiccnt) 1317 if (periodiccnt)
672 { 1318 {
673 ev_tstamp to = periodics [0]->at - ev_now + method_fudge; 1319 ev_tstamp to = ((WT)periodics [0])->at - ev_rt_now + backend_fudge;
674 if (block > to) block = to; 1320 if (block > to) block = to;
675 } 1321 }
1322#endif
676 1323
677 if (block < 0.) block = 0.; 1324 if (expect_false (block < 0.)) block = 0.;
678 } 1325 }
679 1326
680 method_poll (block); 1327 backend_poll (EV_A_ block);
1328 }
681 1329
682 /* update ev_now, do magic */ 1330 /* update ev_rt_now, do magic */
683 time_update (); 1331 time_update (EV_A);
684 1332
685 /* queue pending timers and reschedule them */ 1333 /* queue pending timers and reschedule them */
686 timers_reify (); /* relative timers called last */ 1334 timers_reify (EV_A); /* relative timers called last */
1335#if EV_PERIODIC_ENABLE
687 periodics_reify (); /* absolute timers called first */ 1336 periodics_reify (EV_A); /* absolute timers called first */
1337#endif
688 1338
689 /* queue idle watchers unless io or timers are pending */ 1339 /* queue idle watchers unless other events are pending */
690 if (!pendingcnt) 1340 if (idlecnt && !any_pending (EV_A))
691 queue_events ((W *)idles, idlecnt, EV_IDLE); 1341 queue_events (EV_A_ (W *)idles, idlecnt, EV_IDLE);
692 1342
693 /* queue check watchers, to be executed first */ 1343 /* queue check watchers, to be executed first */
694 if (checkcnt) 1344 if (expect_false (checkcnt))
695 queue_events ((W *)checks, checkcnt, EV_CHECK); 1345 queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK);
696 1346
697 call_pending (); 1347 call_pending (EV_A);
698 }
699 while (!ev_loop_done);
700 1348
701 if (ev_loop_done != 2) 1349 if (expect_false (loop_done))
1350 break;
1351 }
1352
1353 if (loop_done == EVUNLOOP_ONE)
1354 loop_done = EVUNLOOP_CANCEL;
1355}
1356
1357void
1358ev_unloop (EV_P_ int how)
1359{
702 ev_loop_done = 0; 1360 loop_done = how;
703} 1361}
704 1362
705/*****************************************************************************/ 1363/*****************************************************************************/
706 1364
707static void 1365void inline_size
708wlist_add (WL *head, WL elem) 1366wlist_add (WL *head, WL elem)
709{ 1367{
710 elem->next = *head; 1368 elem->next = *head;
711 *head = elem; 1369 *head = elem;
712} 1370}
713 1371
714static void 1372void inline_size
715wlist_del (WL *head, WL elem) 1373wlist_del (WL *head, WL elem)
716{ 1374{
717 while (*head) 1375 while (*head)
718 { 1376 {
719 if (*head == elem) 1377 if (*head == elem)
724 1382
725 head = &(*head)->next; 1383 head = &(*head)->next;
726 } 1384 }
727} 1385}
728 1386
729static void 1387void inline_speed
730ev_clear (W w) 1388ev_clear_pending (EV_P_ W w)
731{ 1389{
732 if (w->pending) 1390 if (w->pending)
733 { 1391 {
734 pendings [w->pending - 1].w = 0; 1392 pendings [ABSPRI (w)][w->pending - 1].w = 0;
735 w->pending = 0; 1393 w->pending = 0;
736 } 1394 }
737} 1395}
738 1396
739static void 1397void inline_speed
740ev_start (W w, int active) 1398ev_start (EV_P_ W w, int active)
741{ 1399{
1400 if (w->priority < EV_MINPRI) w->priority = EV_MINPRI;
1401 if (w->priority > EV_MAXPRI) w->priority = EV_MAXPRI;
1402
742 w->active = active; 1403 w->active = active;
1404 ev_ref (EV_A);
743} 1405}
744 1406
745static void 1407void inline_size
746ev_stop (W w) 1408ev_stop (EV_P_ W w)
747{ 1409{
1410 ev_unref (EV_A);
748 w->active = 0; 1411 w->active = 0;
749} 1412}
750 1413
751/*****************************************************************************/ 1414/*****************************************************************************/
752 1415
753void 1416void
754ev_io_start (struct ev_io *w) 1417ev_io_start (EV_P_ ev_io *w)
755{ 1418{
756 if (ev_is_active (w))
757 return;
758
759 int fd = w->fd; 1419 int fd = w->fd;
760 1420
1421 if (expect_false (ev_is_active (w)))
1422 return;
1423
1424 assert (("ev_io_start called with negative fd", fd >= 0));
1425
761 ev_start ((W)w, 1); 1426 ev_start (EV_A_ (W)w, 1);
762 array_needsize (anfds, anfdmax, fd + 1, anfds_init); 1427 array_needsize (ANFD, anfds, anfdmax, fd + 1, anfds_init);
763 wlist_add ((WL *)&anfds[fd].head, (WL)w); 1428 wlist_add ((WL *)&anfds[fd].head, (WL)w);
764 1429
765 fd_change (fd); 1430 fd_change (EV_A_ fd);
766} 1431}
767 1432
768void 1433void
769ev_io_stop (struct ev_io *w) 1434ev_io_stop (EV_P_ ev_io *w)
770{ 1435{
771 ev_clear ((W)w); 1436 ev_clear_pending (EV_A_ (W)w);
772 if (!ev_is_active (w)) 1437 if (expect_false (!ev_is_active (w)))
773 return; 1438 return;
1439
1440 assert (("ev_io_start called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax));
774 1441
775 wlist_del ((WL *)&anfds[w->fd].head, (WL)w); 1442 wlist_del ((WL *)&anfds[w->fd].head, (WL)w);
776 ev_stop ((W)w); 1443 ev_stop (EV_A_ (W)w);
777 1444
778 fd_change (w->fd); 1445 fd_change (EV_A_ w->fd);
779} 1446}
780 1447
781void 1448void
782ev_timer_start (struct ev_timer *w) 1449ev_timer_start (EV_P_ ev_timer *w)
783{ 1450{
784 if (ev_is_active (w)) 1451 if (expect_false (ev_is_active (w)))
785 return; 1452 return;
786 1453
787 w->at += now; 1454 ((WT)w)->at += mn_now;
788 1455
789 assert (("timer repeat value less than zero not allowed", w->repeat >= 0.)); 1456 assert (("ev_timer_start called with negative timer repeat value", w->repeat >= 0.));
790 1457
791 ev_start ((W)w, ++timercnt); 1458 ev_start (EV_A_ (W)w, ++timercnt);
792 array_needsize (timers, timermax, timercnt, ); 1459 array_needsize (ev_timer *, timers, timermax, timercnt, EMPTY2);
793 timers [timercnt - 1] = w; 1460 timers [timercnt - 1] = w;
794 upheap ((WT *)timers, timercnt - 1); 1461 upheap ((WT *)timers, timercnt - 1);
795}
796 1462
1463 assert (("internal timer heap corruption", timers [((W)w)->active - 1] == w));
1464}
1465
797void 1466void
798ev_timer_stop (struct ev_timer *w) 1467ev_timer_stop (EV_P_ ev_timer *w)
799{ 1468{
800 ev_clear ((W)w); 1469 ev_clear_pending (EV_A_ (W)w);
801 if (!ev_is_active (w)) 1470 if (expect_false (!ev_is_active (w)))
802 return; 1471 return;
803 1472
1473 assert (("internal timer heap corruption", timers [((W)w)->active - 1] == w));
1474
804 if (w->active < timercnt--) 1475 if (expect_true (((W)w)->active < timercnt--))
805 { 1476 {
806 timers [w->active - 1] = timers [timercnt]; 1477 timers [((W)w)->active - 1] = timers [timercnt];
807 downheap ((WT *)timers, timercnt, w->active - 1); 1478 adjustheap ((WT *)timers, timercnt, ((W)w)->active - 1);
808 } 1479 }
809 1480
810 w->at = w->repeat; 1481 ((WT)w)->at -= mn_now;
811 1482
812 ev_stop ((W)w); 1483 ev_stop (EV_A_ (W)w);
813} 1484}
814 1485
815void 1486void
816ev_timer_again (struct ev_timer *w) 1487ev_timer_again (EV_P_ ev_timer *w)
817{ 1488{
818 if (ev_is_active (w)) 1489 if (ev_is_active (w))
819 { 1490 {
820 if (w->repeat) 1491 if (w->repeat)
821 { 1492 {
822 w->at = now + w->repeat; 1493 ((WT)w)->at = mn_now + w->repeat;
823 downheap ((WT *)timers, timercnt, w->active - 1); 1494 adjustheap ((WT *)timers, timercnt, ((W)w)->active - 1);
824 } 1495 }
825 else 1496 else
826 ev_timer_stop (w); 1497 ev_timer_stop (EV_A_ w);
827 } 1498 }
828 else if (w->repeat) 1499 else if (w->repeat)
1500 {
1501 w->at = w->repeat;
829 ev_timer_start (w); 1502 ev_timer_start (EV_A_ w);
1503 }
830} 1504}
831 1505
1506#if EV_PERIODIC_ENABLE
832void 1507void
833ev_periodic_start (struct ev_periodic *w) 1508ev_periodic_start (EV_P_ ev_periodic *w)
834{ 1509{
835 if (ev_is_active (w)) 1510 if (expect_false (ev_is_active (w)))
836 return; 1511 return;
837 1512
838 assert (("periodic interval value less than zero not allowed", w->interval >= 0.)); 1513 if (w->reschedule_cb)
839 1514 ((WT)w)->at = w->reschedule_cb (w, ev_rt_now);
1515 else if (w->interval)
1516 {
1517 assert (("ev_periodic_start called with negative interval value", w->interval >= 0.));
840 /* this formula differs from the one in periodic_reify because we do not always round up */ 1518 /* this formula differs from the one in periodic_reify because we do not always round up */
841 if (w->interval)
842 w->at += ceil ((ev_now - w->at) / w->interval) * w->interval; 1519 ((WT)w)->at += ceil ((ev_rt_now - ((WT)w)->at) / w->interval) * w->interval;
1520 }
843 1521
844 ev_start ((W)w, ++periodiccnt); 1522 ev_start (EV_A_ (W)w, ++periodiccnt);
845 array_needsize (periodics, periodicmax, periodiccnt, ); 1523 array_needsize (ev_periodic *, periodics, periodicmax, periodiccnt, EMPTY2);
846 periodics [periodiccnt - 1] = w; 1524 periodics [periodiccnt - 1] = w;
847 upheap ((WT *)periodics, periodiccnt - 1); 1525 upheap ((WT *)periodics, periodiccnt - 1);
848}
849 1526
1527 assert (("internal periodic heap corruption", periodics [((W)w)->active - 1] == w));
1528}
1529
850void 1530void
851ev_periodic_stop (struct ev_periodic *w) 1531ev_periodic_stop (EV_P_ ev_periodic *w)
852{ 1532{
853 ev_clear ((W)w); 1533 ev_clear_pending (EV_A_ (W)w);
854 if (!ev_is_active (w)) 1534 if (expect_false (!ev_is_active (w)))
855 return; 1535 return;
856 1536
1537 assert (("internal periodic heap corruption", periodics [((W)w)->active - 1] == w));
1538
857 if (w->active < periodiccnt--) 1539 if (expect_true (((W)w)->active < periodiccnt--))
858 { 1540 {
859 periodics [w->active - 1] = periodics [periodiccnt]; 1541 periodics [((W)w)->active - 1] = periodics [periodiccnt];
860 downheap ((WT *)periodics, periodiccnt, w->active - 1); 1542 adjustheap ((WT *)periodics, periodiccnt, ((W)w)->active - 1);
861 } 1543 }
862 1544
863 ev_stop ((W)w); 1545 ev_stop (EV_A_ (W)w);
864} 1546}
865 1547
866void 1548void
1549ev_periodic_again (EV_P_ ev_periodic *w)
1550{
1551 /* TODO: use adjustheap and recalculation */
1552 ev_periodic_stop (EV_A_ w);
1553 ev_periodic_start (EV_A_ w);
1554}
1555#endif
1556
1557void
1558ev_idle_start (EV_P_ ev_idle *w)
1559{
1560 if (expect_false (ev_is_active (w)))
1561 return;
1562
1563 ev_start (EV_A_ (W)w, ++idlecnt);
1564 array_needsize (ev_idle *, idles, idlemax, idlecnt, EMPTY2);
1565 idles [idlecnt - 1] = w;
1566}
1567
1568void
1569ev_idle_stop (EV_P_ ev_idle *w)
1570{
1571 ev_clear_pending (EV_A_ (W)w);
1572 if (expect_false (!ev_is_active (w)))
1573 return;
1574
1575 {
1576 int active = ((W)w)->active;
1577 idles [active - 1] = idles [--idlecnt];
1578 ((W)idles [active - 1])->active = active;
1579 }
1580
1581 ev_stop (EV_A_ (W)w);
1582}
1583
1584void
1585ev_prepare_start (EV_P_ ev_prepare *w)
1586{
1587 if (expect_false (ev_is_active (w)))
1588 return;
1589
1590 ev_start (EV_A_ (W)w, ++preparecnt);
1591 array_needsize (ev_prepare *, prepares, preparemax, preparecnt, EMPTY2);
1592 prepares [preparecnt - 1] = w;
1593}
1594
1595void
1596ev_prepare_stop (EV_P_ ev_prepare *w)
1597{
1598 ev_clear_pending (EV_A_ (W)w);
1599 if (expect_false (!ev_is_active (w)))
1600 return;
1601
1602 {
1603 int active = ((W)w)->active;
1604 prepares [active - 1] = prepares [--preparecnt];
1605 ((W)prepares [active - 1])->active = active;
1606 }
1607
1608 ev_stop (EV_A_ (W)w);
1609}
1610
1611void
1612ev_check_start (EV_P_ ev_check *w)
1613{
1614 if (expect_false (ev_is_active (w)))
1615 return;
1616
1617 ev_start (EV_A_ (W)w, ++checkcnt);
1618 array_needsize (ev_check *, checks, checkmax, checkcnt, EMPTY2);
1619 checks [checkcnt - 1] = w;
1620}
1621
1622void
1623ev_check_stop (EV_P_ ev_check *w)
1624{
1625 ev_clear_pending (EV_A_ (W)w);
1626 if (expect_false (!ev_is_active (w)))
1627 return;
1628
1629 {
1630 int active = ((W)w)->active;
1631 checks [active - 1] = checks [--checkcnt];
1632 ((W)checks [active - 1])->active = active;
1633 }
1634
1635 ev_stop (EV_A_ (W)w);
1636}
1637
1638#ifndef SA_RESTART
1639# define SA_RESTART 0
1640#endif
1641
1642void
867ev_signal_start (struct ev_signal *w) 1643ev_signal_start (EV_P_ ev_signal *w)
868{ 1644{
1645#if EV_MULTIPLICITY
1646 assert (("signal watchers are only supported in the default loop", loop == ev_default_loop_ptr));
1647#endif
869 if (ev_is_active (w)) 1648 if (expect_false (ev_is_active (w)))
870 return; 1649 return;
871 1650
1651 assert (("ev_signal_start called with illegal signal number", w->signum > 0));
1652
872 ev_start ((W)w, 1); 1653 ev_start (EV_A_ (W)w, 1);
873 array_needsize (signals, signalmax, w->signum, signals_init); 1654 array_needsize (ANSIG, signals, signalmax, w->signum, signals_init);
874 wlist_add ((WL *)&signals [w->signum - 1].head, (WL)w); 1655 wlist_add ((WL *)&signals [w->signum - 1].head, (WL)w);
875 1656
876 if (!w->next) 1657 if (!((WL)w)->next)
877 { 1658 {
1659#if _WIN32
1660 signal (w->signum, sighandler);
1661#else
878 struct sigaction sa; 1662 struct sigaction sa;
879 sa.sa_handler = sighandler; 1663 sa.sa_handler = sighandler;
880 sigfillset (&sa.sa_mask); 1664 sigfillset (&sa.sa_mask);
881 sa.sa_flags = 0; 1665 sa.sa_flags = SA_RESTART; /* if restarting works we save one iteration */
882 sigaction (w->signum, &sa, 0); 1666 sigaction (w->signum, &sa, 0);
1667#endif
883 } 1668 }
884} 1669}
885 1670
886void 1671void
887ev_signal_stop (struct ev_signal *w) 1672ev_signal_stop (EV_P_ ev_signal *w)
888{ 1673{
889 ev_clear ((W)w); 1674 ev_clear_pending (EV_A_ (W)w);
890 if (!ev_is_active (w)) 1675 if (expect_false (!ev_is_active (w)))
891 return; 1676 return;
892 1677
893 wlist_del ((WL *)&signals [w->signum - 1].head, (WL)w); 1678 wlist_del ((WL *)&signals [w->signum - 1].head, (WL)w);
894 ev_stop ((W)w); 1679 ev_stop (EV_A_ (W)w);
895 1680
896 if (!signals [w->signum - 1].head) 1681 if (!signals [w->signum - 1].head)
897 signal (w->signum, SIG_DFL); 1682 signal (w->signum, SIG_DFL);
898} 1683}
899 1684
900void 1685void
901ev_idle_start (struct ev_idle *w)
902{
903 if (ev_is_active (w))
904 return;
905
906 ev_start ((W)w, ++idlecnt);
907 array_needsize (idles, idlemax, idlecnt, );
908 idles [idlecnt - 1] = w;
909}
910
911void
912ev_idle_stop (struct ev_idle *w)
913{
914 ev_clear ((W)w);
915 if (ev_is_active (w))
916 return;
917
918 idles [w->active - 1] = idles [--idlecnt];
919 ev_stop ((W)w);
920}
921
922void
923ev_prepare_start (struct ev_prepare *w)
924{
925 if (ev_is_active (w))
926 return;
927
928 ev_start ((W)w, ++preparecnt);
929 array_needsize (prepares, preparemax, preparecnt, );
930 prepares [preparecnt - 1] = w;
931}
932
933void
934ev_prepare_stop (struct ev_prepare *w)
935{
936 ev_clear ((W)w);
937 if (ev_is_active (w))
938 return;
939
940 prepares [w->active - 1] = prepares [--preparecnt];
941 ev_stop ((W)w);
942}
943
944void
945ev_check_start (struct ev_check *w)
946{
947 if (ev_is_active (w))
948 return;
949
950 ev_start ((W)w, ++checkcnt);
951 array_needsize (checks, checkmax, checkcnt, );
952 checks [checkcnt - 1] = w;
953}
954
955void
956ev_check_stop (struct ev_check *w)
957{
958 ev_clear ((W)w);
959 if (ev_is_active (w))
960 return;
961
962 checks [w->active - 1] = checks [--checkcnt];
963 ev_stop ((W)w);
964}
965
966void
967ev_child_start (struct ev_child *w) 1686ev_child_start (EV_P_ ev_child *w)
968{ 1687{
1688#if EV_MULTIPLICITY
1689 assert (("child watchers are only supported in the default loop", loop == ev_default_loop_ptr));
1690#endif
969 if (ev_is_active (w)) 1691 if (expect_false (ev_is_active (w)))
970 return; 1692 return;
971 1693
972 ev_start ((W)w, 1); 1694 ev_start (EV_A_ (W)w, 1);
973 wlist_add ((WL *)&childs [w->pid & (PID_HASHSIZE - 1)], (WL)w); 1695 wlist_add ((WL *)&childs [w->pid & (PID_HASHSIZE - 1)], (WL)w);
974} 1696}
975 1697
976void 1698void
977ev_child_stop (struct ev_child *w) 1699ev_child_stop (EV_P_ ev_child *w)
978{ 1700{
979 ev_clear ((W)w); 1701 ev_clear_pending (EV_A_ (W)w);
980 if (ev_is_active (w)) 1702 if (expect_false (!ev_is_active (w)))
981 return; 1703 return;
982 1704
983 wlist_del ((WL *)&childs [w->pid & (PID_HASHSIZE - 1)], (WL)w); 1705 wlist_del ((WL *)&childs [w->pid & (PID_HASHSIZE - 1)], (WL)w);
984 ev_stop ((W)w); 1706 ev_stop (EV_A_ (W)w);
985} 1707}
1708
1709#if EV_EMBED_ENABLE
1710void noinline
1711ev_embed_sweep (EV_P_ ev_embed *w)
1712{
1713 ev_loop (w->loop, EVLOOP_NONBLOCK);
1714}
1715
1716static void
1717embed_cb (EV_P_ ev_io *io, int revents)
1718{
1719 ev_embed *w = (ev_embed *)(((char *)io) - offsetof (ev_embed, io));
1720
1721 if (ev_cb (w))
1722 ev_feed_event (EV_A_ (W)w, EV_EMBED);
1723 else
1724 ev_embed_sweep (loop, w);
1725}
1726
1727void
1728ev_embed_start (EV_P_ ev_embed *w)
1729{
1730 if (expect_false (ev_is_active (w)))
1731 return;
1732
1733 {
1734 struct ev_loop *loop = w->loop;
1735 assert (("loop to be embedded is not embeddable", backend & ev_embeddable_backends ()));
1736 ev_io_init (&w->io, embed_cb, backend_fd, EV_READ);
1737 }
1738
1739 ev_set_priority (&w->io, ev_priority (w));
1740 ev_io_start (EV_A_ &w->io);
1741
1742 ev_start (EV_A_ (W)w, 1);
1743}
1744
1745void
1746ev_embed_stop (EV_P_ ev_embed *w)
1747{
1748 ev_clear_pending (EV_A_ (W)w);
1749 if (expect_false (!ev_is_active (w)))
1750 return;
1751
1752 ev_io_stop (EV_A_ &w->io);
1753
1754 ev_stop (EV_A_ (W)w);
1755}
1756#endif
1757
1758#if EV_STAT_ENABLE
1759
1760# ifdef _WIN32
1761# define lstat(a,b) stat(a,b)
1762# endif
1763
1764void
1765ev_stat_stat (EV_P_ ev_stat *w)
1766{
1767 if (lstat (w->path, &w->attr) < 0)
1768 w->attr.st_nlink = 0;
1769 else if (!w->attr.st_nlink)
1770 w->attr.st_nlink = 1;
1771}
1772
1773static void
1774stat_timer_cb (EV_P_ ev_timer *w_, int revents)
1775{
1776 ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer));
1777
1778 /* we copy this here each the time so that */
1779 /* prev has the old value when the callback gets invoked */
1780 w->prev = w->attr;
1781 ev_stat_stat (EV_A_ w);
1782
1783 if (memcmp (&w->prev, &w->attr, sizeof (ev_statdata)))
1784 ev_feed_event (EV_A_ w, EV_STAT);
1785}
1786
1787void
1788ev_stat_start (EV_P_ ev_stat *w)
1789{
1790 if (expect_false (ev_is_active (w)))
1791 return;
1792
1793 /* since we use memcmp, we need to clear any padding data etc. */
1794 memset (&w->prev, 0, sizeof (ev_statdata));
1795 memset (&w->attr, 0, sizeof (ev_statdata));
1796
1797 ev_stat_stat (EV_A_ w);
1798
1799 ev_timer_init (&w->timer, stat_timer_cb, w->interval, w->interval);
1800 ev_set_priority (&w->timer, ev_priority (w));
1801 ev_timer_start (EV_A_ &w->timer);
1802
1803 ev_start (EV_A_ (W)w, 1);
1804}
1805
1806void
1807ev_stat_stop (EV_P_ ev_stat *w)
1808{
1809 ev_clear_pending (EV_A_ (W)w);
1810 if (expect_false (!ev_is_active (w)))
1811 return;
1812
1813 ev_timer_stop (EV_A_ &w->timer);
1814
1815 ev_stop (EV_A_ (W)w);
1816}
1817#endif
986 1818
987/*****************************************************************************/ 1819/*****************************************************************************/
988 1820
989struct ev_once 1821struct ev_once
990{ 1822{
991 struct ev_io io; 1823 ev_io io;
992 struct ev_timer to; 1824 ev_timer to;
993 void (*cb)(int revents, void *arg); 1825 void (*cb)(int revents, void *arg);
994 void *arg; 1826 void *arg;
995}; 1827};
996 1828
997static void 1829static void
998once_cb (struct ev_once *once, int revents) 1830once_cb (EV_P_ struct ev_once *once, int revents)
999{ 1831{
1000 void (*cb)(int revents, void *arg) = once->cb; 1832 void (*cb)(int revents, void *arg) = once->cb;
1001 void *arg = once->arg; 1833 void *arg = once->arg;
1002 1834
1003 ev_io_stop (&once->io); 1835 ev_io_stop (EV_A_ &once->io);
1004 ev_timer_stop (&once->to); 1836 ev_timer_stop (EV_A_ &once->to);
1005 free (once); 1837 ev_free (once);
1006 1838
1007 cb (revents, arg); 1839 cb (revents, arg);
1008} 1840}
1009 1841
1010static void 1842static void
1011once_cb_io (struct ev_io *w, int revents) 1843once_cb_io (EV_P_ ev_io *w, int revents)
1012{ 1844{
1013 once_cb ((struct ev_once *)(((char *)w) - offsetof (struct ev_once, io)), revents); 1845 once_cb (EV_A_ (struct ev_once *)(((char *)w) - offsetof (struct ev_once, io)), revents);
1014} 1846}
1015 1847
1016static void 1848static void
1017once_cb_to (struct ev_timer *w, int revents) 1849once_cb_to (EV_P_ ev_timer *w, int revents)
1018{ 1850{
1019 once_cb ((struct ev_once *)(((char *)w) - offsetof (struct ev_once, to)), revents); 1851 once_cb (EV_A_ (struct ev_once *)(((char *)w) - offsetof (struct ev_once, to)), revents);
1020} 1852}
1021 1853
1022void 1854void
1023ev_once (int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg) 1855ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg)
1024{ 1856{
1025 struct ev_once *once = malloc (sizeof (struct ev_once)); 1857 struct ev_once *once = (struct ev_once *)ev_malloc (sizeof (struct ev_once));
1026 1858
1027 if (!once) 1859 if (expect_false (!once))
1860 {
1028 cb (EV_ERROR | EV_READ | EV_WRITE | EV_TIMEOUT, arg); 1861 cb (EV_ERROR | EV_READ | EV_WRITE | EV_TIMEOUT, arg);
1029 else 1862 return;
1030 { 1863 }
1864
1031 once->cb = cb; 1865 once->cb = cb;
1032 once->arg = arg; 1866 once->arg = arg;
1033 1867
1034 ev_watcher_init (&once->io, once_cb_io); 1868 ev_init (&once->io, once_cb_io);
1035 if (fd >= 0) 1869 if (fd >= 0)
1036 { 1870 {
1037 ev_io_set (&once->io, fd, events); 1871 ev_io_set (&once->io, fd, events);
1038 ev_io_start (&once->io); 1872 ev_io_start (EV_A_ &once->io);
1039 } 1873 }
1040 1874
1041 ev_watcher_init (&once->to, once_cb_to); 1875 ev_init (&once->to, once_cb_to);
1042 if (timeout >= 0.) 1876 if (timeout >= 0.)
1043 { 1877 {
1044 ev_timer_set (&once->to, timeout, 0.); 1878 ev_timer_set (&once->to, timeout, 0.);
1045 ev_timer_start (&once->to); 1879 ev_timer_start (EV_A_ &once->to);
1046 }
1047 }
1048}
1049
1050/*****************************************************************************/
1051
1052#if 0
1053
1054struct ev_io wio;
1055
1056static void
1057sin_cb (struct ev_io *w, int revents)
1058{
1059 fprintf (stderr, "sin %d, revents %d\n", w->fd, revents);
1060}
1061
1062static void
1063ocb (struct ev_timer *w, int revents)
1064{
1065 //fprintf (stderr, "timer %f,%f (%x) (%f) d%p\n", w->at, w->repeat, revents, w->at - ev_time (), w->data);
1066 ev_timer_stop (w);
1067 ev_timer_start (w);
1068}
1069
1070static void
1071scb (struct ev_signal *w, int revents)
1072{
1073 fprintf (stderr, "signal %x,%d\n", revents, w->signum);
1074 ev_io_stop (&wio);
1075 ev_io_start (&wio);
1076}
1077
1078static void
1079gcb (struct ev_signal *w, int revents)
1080{
1081 fprintf (stderr, "generic %x\n", revents);
1082
1083}
1084
1085int main (void)
1086{
1087 ev_init (0);
1088
1089 ev_io_init (&wio, sin_cb, 0, EV_READ);
1090 ev_io_start (&wio);
1091
1092 struct ev_timer t[10000];
1093
1094#if 0
1095 int i;
1096 for (i = 0; i < 10000; ++i)
1097 { 1880 }
1098 struct ev_timer *w = t + i;
1099 ev_watcher_init (w, ocb, i);
1100 ev_timer_init_abs (w, ocb, drand48 (), 0.99775533);
1101 ev_timer_start (w);
1102 if (drand48 () < 0.5)
1103 ev_timer_stop (w);
1104 }
1105#endif
1106
1107 struct ev_timer t1;
1108 ev_timer_init (&t1, ocb, 5, 10);
1109 ev_timer_start (&t1);
1110
1111 struct ev_signal sig;
1112 ev_signal_init (&sig, scb, SIGQUIT);
1113 ev_signal_start (&sig);
1114
1115 struct ev_check cw;
1116 ev_check_init (&cw, gcb);
1117 ev_check_start (&cw);
1118
1119 struct ev_idle iw;
1120 ev_idle_init (&iw, gcb);
1121 ev_idle_start (&iw);
1122
1123 ev_loop (0);
1124
1125 return 0;
1126} 1881}
1127 1882
1883#ifdef __cplusplus
1884}
1128#endif 1885#endif
1129 1886
1130
1131
1132

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines