ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev.c
(Generate patch)

Comparing libev/ev.c (file contents):
Revision 1.121 by root, Fri Nov 16 10:37:28 2007 UTC vs.
Revision 1.151 by root, Tue Nov 27 19:59:08 2007 UTC

32#ifdef __cplusplus 32#ifdef __cplusplus
33extern "C" { 33extern "C" {
34#endif 34#endif
35 35
36#ifndef EV_STANDALONE 36#ifndef EV_STANDALONE
37# ifdef EV_CONFIG_H
38# include EV_CONFIG_H
39# else
37# include "config.h" 40# include "config.h"
41# endif
38 42
39# if HAVE_CLOCK_GETTIME 43# if HAVE_CLOCK_GETTIME
40# ifndef EV_USE_MONOTONIC 44# ifndef EV_USE_MONOTONIC
41# define EV_USE_MONOTONIC 1 45# define EV_USE_MONOTONIC 1
42# endif 46# endif
43# ifndef EV_USE_REALTIME 47# ifndef EV_USE_REALTIME
44# define EV_USE_REALTIME 1 48# define EV_USE_REALTIME 1
45# endif 49# endif
50# else
51# ifndef EV_USE_MONOTONIC
52# define EV_USE_MONOTONIC 0
53# endif
54# ifndef EV_USE_REALTIME
55# define EV_USE_REALTIME 0
56# endif
46# endif 57# endif
47 58
48# if HAVE_SELECT && HAVE_SYS_SELECT_H && !defined (EV_USE_SELECT) 59# ifndef EV_USE_SELECT
60# if HAVE_SELECT && HAVE_SYS_SELECT_H
49# define EV_USE_SELECT 1 61# define EV_USE_SELECT 1
62# else
63# define EV_USE_SELECT 0
64# endif
50# endif 65# endif
51 66
52# if HAVE_POLL && HAVE_POLL_H && !defined (EV_USE_POLL) 67# ifndef EV_USE_POLL
68# if HAVE_POLL && HAVE_POLL_H
53# define EV_USE_POLL 1 69# define EV_USE_POLL 1
70# else
71# define EV_USE_POLL 0
72# endif
54# endif 73# endif
55 74
56# if HAVE_EPOLL_CTL && HAVE_SYS_EPOLL_H && !defined (EV_USE_EPOLL) 75# ifndef EV_USE_EPOLL
76# if HAVE_EPOLL_CTL && HAVE_SYS_EPOLL_H
57# define EV_USE_EPOLL 1 77# define EV_USE_EPOLL 1
78# else
79# define EV_USE_EPOLL 0
80# endif
58# endif 81# endif
59 82
83# ifndef EV_USE_KQUEUE
60# if HAVE_KQUEUE && HAVE_SYS_EVENT_H && HAVE_SYS_QUEUE_H && !defined (EV_USE_KQUEUE) 84# if HAVE_KQUEUE && HAVE_SYS_EVENT_H && HAVE_SYS_QUEUE_H
61# define EV_USE_KQUEUE 1 85# define EV_USE_KQUEUE 1
86# else
87# define EV_USE_KQUEUE 0
88# endif
62# endif 89# endif
63 90
64# if HAVE_PORT_H && HAVE_PORT_CREATE && !defined (EV_USE_PORT) 91# ifndef EV_USE_PORT
92# if HAVE_PORT_H && HAVE_PORT_CREATE
65# define EV_USE_PORT 1 93# define EV_USE_PORT 1
94# else
95# define EV_USE_PORT 0
96# endif
66# endif 97# endif
67 98
68#endif 99#endif
69 100
70#include <math.h> 101#include <math.h>
80#include <time.h> 111#include <time.h>
81 112
82#include <signal.h> 113#include <signal.h>
83 114
84#ifndef _WIN32 115#ifndef _WIN32
85# include <unistd.h>
86# include <sys/time.h> 116# include <sys/time.h>
87# include <sys/wait.h> 117# include <sys/wait.h>
118# include <unistd.h>
88#else 119#else
89# define WIN32_LEAN_AND_MEAN 120# define WIN32_LEAN_AND_MEAN
90# include <windows.h> 121# include <windows.h>
91# ifndef EV_SELECT_IS_WINSOCKET 122# ifndef EV_SELECT_IS_WINSOCKET
92# define EV_SELECT_IS_WINSOCKET 1 123# define EV_SELECT_IS_WINSOCKET 1
125 156
126#ifndef EV_USE_PORT 157#ifndef EV_USE_PORT
127# define EV_USE_PORT 0 158# define EV_USE_PORT 0
128#endif 159#endif
129 160
161#ifndef EV_PID_HASHSIZE
162# if EV_MINIMAL
163# define EV_PID_HASHSIZE 1
164# else
165# define EV_PID_HASHSIZE 16
166# endif
167#endif
168
130/**/ 169/**/
131
132/* darwin simply cannot be helped */
133#ifdef __APPLE__
134# undef EV_USE_POLL
135# undef EV_USE_KQUEUE
136#endif
137 170
138#ifndef CLOCK_MONOTONIC 171#ifndef CLOCK_MONOTONIC
139# undef EV_USE_MONOTONIC 172# undef EV_USE_MONOTONIC
140# define EV_USE_MONOTONIC 0 173# define EV_USE_MONOTONIC 0
141#endif 174#endif
151 184
152/**/ 185/**/
153 186
154#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */ 187#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
155#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */ 188#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
156#define PID_HASHSIZE 16 /* size of pid hash table, must be power of two */
157/*#define CLEANUP_INTERVAL (MAX_BLOCKTIME * 5.) /* how often to try to free memory and re-check fds */ 189/*#define CLEANUP_INTERVAL (MAX_BLOCKTIME * 5.) /* how often to try to free memory and re-check fds */
158 190
159#ifdef EV_H 191#ifdef EV_H
160# include EV_H 192# include EV_H
161#else 193#else
162# include "ev.h" 194# include "ev.h"
163#endif 195#endif
164 196
165#if __GNUC__ >= 3 197#if __GNUC__ >= 3
166# define expect(expr,value) __builtin_expect ((expr),(value)) 198# define expect(expr,value) __builtin_expect ((expr),(value))
199# define inline_size static inline /* inline for codesize */
200# if EV_MINIMAL
167# define inline inline 201# define noinline __attribute__ ((noinline))
202# define inline_speed static noinline
203# else
204# define noinline
205# define inline_speed static inline
206# endif
168#else 207#else
169# define expect(expr,value) (expr) 208# define expect(expr,value) (expr)
209# define inline_speed static
170# define inline static 210# define inline_size static
211# define noinline
171#endif 212#endif
172 213
173#define expect_false(expr) expect ((expr) != 0, 0) 214#define expect_false(expr) expect ((expr) != 0, 0)
174#define expect_true(expr) expect ((expr) != 0, 1) 215#define expect_true(expr) expect ((expr) != 0, 1)
175 216
177#define ABSPRI(w) ((w)->priority - EV_MINPRI) 218#define ABSPRI(w) ((w)->priority - EV_MINPRI)
178 219
179#define EMPTY0 /* required for microsofts broken pseudo-c compiler */ 220#define EMPTY0 /* required for microsofts broken pseudo-c compiler */
180#define EMPTY2(a,b) /* used to suppress some warnings */ 221#define EMPTY2(a,b) /* used to suppress some warnings */
181 222
182typedef struct ev_watcher *W; 223typedef ev_watcher *W;
183typedef struct ev_watcher_list *WL; 224typedef ev_watcher_list *WL;
184typedef struct ev_watcher_time *WT; 225typedef ev_watcher_time *WT;
185 226
186static int have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work? */ 227static int have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work? */
187 228
188#ifdef _WIN32 229#ifdef _WIN32
189# include "ev_win32.c" 230# include "ev_win32.c"
191 232
192/*****************************************************************************/ 233/*****************************************************************************/
193 234
194static void (*syserr_cb)(const char *msg); 235static void (*syserr_cb)(const char *msg);
195 236
237void
196void ev_set_syserr_cb (void (*cb)(const char *msg)) 238ev_set_syserr_cb (void (*cb)(const char *msg))
197{ 239{
198 syserr_cb = cb; 240 syserr_cb = cb;
199} 241}
200 242
201static void 243static void noinline
202syserr (const char *msg) 244syserr (const char *msg)
203{ 245{
204 if (!msg) 246 if (!msg)
205 msg = "(libev) system error"; 247 msg = "(libev) system error";
206 248
211 perror (msg); 253 perror (msg);
212 abort (); 254 abort ();
213 } 255 }
214} 256}
215 257
216static void *(*alloc)(void *ptr, long size); 258static void *(*alloc)(void *ptr, size_t size) = realloc;
217 259
260void
218void ev_set_allocator (void *(*cb)(void *ptr, long size)) 261ev_set_allocator (void *(*cb)(void *ptr, size_t size))
219{ 262{
220 alloc = cb; 263 alloc = cb;
221} 264}
222 265
223static void * 266inline_speed void *
224ev_realloc (void *ptr, long size) 267ev_realloc (void *ptr, size_t size)
225{ 268{
226 ptr = alloc ? alloc (ptr, size) : realloc (ptr, size); 269 ptr = alloc (ptr, size);
227 270
228 if (!ptr && size) 271 if (!ptr && size)
229 { 272 {
230 fprintf (stderr, "libev: cannot allocate %ld bytes, aborting.", size); 273 fprintf (stderr, "libev: cannot allocate %ld bytes, aborting.", (long)size);
231 abort (); 274 abort ();
232 } 275 }
233 276
234 return ptr; 277 return ptr;
235} 278}
295 gettimeofday (&tv, 0); 338 gettimeofday (&tv, 0);
296 return tv.tv_sec + tv.tv_usec * 1e-6; 339 return tv.tv_sec + tv.tv_usec * 1e-6;
297#endif 340#endif
298} 341}
299 342
300inline ev_tstamp 343ev_tstamp inline_size
301get_clock (void) 344get_clock (void)
302{ 345{
303#if EV_USE_MONOTONIC 346#if EV_USE_MONOTONIC
304 if (expect_true (have_monotonic)) 347 if (expect_true (have_monotonic))
305 { 348 {
348#define array_free(stem, idx) \ 391#define array_free(stem, idx) \
349 ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0; 392 ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0;
350 393
351/*****************************************************************************/ 394/*****************************************************************************/
352 395
353static void 396void noinline
354anfds_init (ANFD *base, int count)
355{
356 while (count--)
357 {
358 base->head = 0;
359 base->events = EV_NONE;
360 base->reify = 0;
361
362 ++base;
363 }
364}
365
366void
367ev_feed_event (EV_P_ void *w, int revents) 397ev_feed_event (EV_P_ void *w, int revents)
368{ 398{
369 W w_ = (W)w; 399 W w_ = (W)w;
370 400
371 if (w_->pending) 401 if (expect_false (w_->pending))
372 { 402 {
373 pendings [ABSPRI (w_)][w_->pending - 1].events |= revents; 403 pendings [ABSPRI (w_)][w_->pending - 1].events |= revents;
374 return; 404 return;
375 } 405 }
376 406
378 array_needsize (ANPENDING, pendings [ABSPRI (w_)], pendingmax [ABSPRI (w_)], pendingcnt [ABSPRI (w_)], EMPTY2); 408 array_needsize (ANPENDING, pendings [ABSPRI (w_)], pendingmax [ABSPRI (w_)], pendingcnt [ABSPRI (w_)], EMPTY2);
379 pendings [ABSPRI (w_)][w_->pending - 1].w = w_; 409 pendings [ABSPRI (w_)][w_->pending - 1].w = w_;
380 pendings [ABSPRI (w_)][w_->pending - 1].events = revents; 410 pendings [ABSPRI (w_)][w_->pending - 1].events = revents;
381} 411}
382 412
383static void 413void inline_size
384queue_events (EV_P_ W *events, int eventcnt, int type) 414queue_events (EV_P_ W *events, int eventcnt, int type)
385{ 415{
386 int i; 416 int i;
387 417
388 for (i = 0; i < eventcnt; ++i) 418 for (i = 0; i < eventcnt; ++i)
389 ev_feed_event (EV_A_ events [i], type); 419 ev_feed_event (EV_A_ events [i], type);
390} 420}
391 421
392inline void 422/*****************************************************************************/
423
424void inline_size
425anfds_init (ANFD *base, int count)
426{
427 while (count--)
428 {
429 base->head = 0;
430 base->events = EV_NONE;
431 base->reify = 0;
432
433 ++base;
434 }
435}
436
437void inline_speed
393fd_event (EV_P_ int fd, int revents) 438fd_event (EV_P_ int fd, int revents)
394{ 439{
395 ANFD *anfd = anfds + fd; 440 ANFD *anfd = anfds + fd;
396 struct ev_io *w; 441 ev_io *w;
397 442
398 for (w = (struct ev_io *)anfd->head; w; w = (struct ev_io *)((WL)w)->next) 443 for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next)
399 { 444 {
400 int ev = w->events & revents; 445 int ev = w->events & revents;
401 446
402 if (ev) 447 if (ev)
403 ev_feed_event (EV_A_ (W)w, ev); 448 ev_feed_event (EV_A_ (W)w, ev);
408ev_feed_fd_event (EV_P_ int fd, int revents) 453ev_feed_fd_event (EV_P_ int fd, int revents)
409{ 454{
410 fd_event (EV_A_ fd, revents); 455 fd_event (EV_A_ fd, revents);
411} 456}
412 457
413/*****************************************************************************/ 458void inline_size
414
415static void
416fd_reify (EV_P) 459fd_reify (EV_P)
417{ 460{
418 int i; 461 int i;
419 462
420 for (i = 0; i < fdchangecnt; ++i) 463 for (i = 0; i < fdchangecnt; ++i)
421 { 464 {
422 int fd = fdchanges [i]; 465 int fd = fdchanges [i];
423 ANFD *anfd = anfds + fd; 466 ANFD *anfd = anfds + fd;
424 struct ev_io *w; 467 ev_io *w;
425 468
426 int events = 0; 469 int events = 0;
427 470
428 for (w = (struct ev_io *)anfd->head; w; w = (struct ev_io *)((WL)w)->next) 471 for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next)
429 events |= w->events; 472 events |= w->events;
430 473
431#if EV_SELECT_IS_WINSOCKET 474#if EV_SELECT_IS_WINSOCKET
432 if (events) 475 if (events)
433 { 476 {
437 } 480 }
438#endif 481#endif
439 482
440 anfd->reify = 0; 483 anfd->reify = 0;
441 484
442 method_modify (EV_A_ fd, anfd->events, events); 485 backend_modify (EV_A_ fd, anfd->events, events);
443 anfd->events = events; 486 anfd->events = events;
444 } 487 }
445 488
446 fdchangecnt = 0; 489 fdchangecnt = 0;
447} 490}
448 491
449static void 492void inline_size
450fd_change (EV_P_ int fd) 493fd_change (EV_P_ int fd)
451{ 494{
452 if (anfds [fd].reify) 495 if (expect_false (anfds [fd].reify))
453 return; 496 return;
454 497
455 anfds [fd].reify = 1; 498 anfds [fd].reify = 1;
456 499
457 ++fdchangecnt; 500 ++fdchangecnt;
458 array_needsize (int, fdchanges, fdchangemax, fdchangecnt, EMPTY2); 501 array_needsize (int, fdchanges, fdchangemax, fdchangecnt, EMPTY2);
459 fdchanges [fdchangecnt - 1] = fd; 502 fdchanges [fdchangecnt - 1] = fd;
460} 503}
461 504
462static void 505void inline_speed
463fd_kill (EV_P_ int fd) 506fd_kill (EV_P_ int fd)
464{ 507{
465 struct ev_io *w; 508 ev_io *w;
466 509
467 while ((w = (struct ev_io *)anfds [fd].head)) 510 while ((w = (ev_io *)anfds [fd].head))
468 { 511 {
469 ev_io_stop (EV_A_ w); 512 ev_io_stop (EV_A_ w);
470 ev_feed_event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE); 513 ev_feed_event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE);
471 } 514 }
472} 515}
473 516
474static int 517int inline_size
475fd_valid (int fd) 518fd_valid (int fd)
476{ 519{
477#ifdef _WIN32 520#ifdef _WIN32
478 return _get_osfhandle (fd) != -1; 521 return _get_osfhandle (fd) != -1;
479#else 522#else
480 return fcntl (fd, F_GETFD) != -1; 523 return fcntl (fd, F_GETFD) != -1;
481#endif 524#endif
482} 525}
483 526
484/* called on EBADF to verify fds */ 527/* called on EBADF to verify fds */
485static void 528static void noinline
486fd_ebadf (EV_P) 529fd_ebadf (EV_P)
487{ 530{
488 int fd; 531 int fd;
489 532
490 for (fd = 0; fd < anfdmax; ++fd) 533 for (fd = 0; fd < anfdmax; ++fd)
492 if (!fd_valid (fd) == -1 && errno == EBADF) 535 if (!fd_valid (fd) == -1 && errno == EBADF)
493 fd_kill (EV_A_ fd); 536 fd_kill (EV_A_ fd);
494} 537}
495 538
496/* called on ENOMEM in select/poll to kill some fds and retry */ 539/* called on ENOMEM in select/poll to kill some fds and retry */
497static void 540static void noinline
498fd_enomem (EV_P) 541fd_enomem (EV_P)
499{ 542{
500 int fd; 543 int fd;
501 544
502 for (fd = anfdmax; fd--; ) 545 for (fd = anfdmax; fd--; )
505 fd_kill (EV_A_ fd); 548 fd_kill (EV_A_ fd);
506 return; 549 return;
507 } 550 }
508} 551}
509 552
510/* usually called after fork if method needs to re-arm all fds from scratch */ 553/* usually called after fork if backend needs to re-arm all fds from scratch */
511static void 554static void noinline
512fd_rearm_all (EV_P) 555fd_rearm_all (EV_P)
513{ 556{
514 int fd; 557 int fd;
515 558
516 /* this should be highly optimised to not do anything but set a flag */ 559 /* this should be highly optimised to not do anything but set a flag */
522 } 565 }
523} 566}
524 567
525/*****************************************************************************/ 568/*****************************************************************************/
526 569
527static void 570void inline_speed
528upheap (WT *heap, int k) 571upheap (WT *heap, int k)
529{ 572{
530 WT w = heap [k]; 573 WT w = heap [k];
531 574
532 while (k && heap [k >> 1]->at > w->at) 575 while (k && heap [k >> 1]->at > w->at)
539 heap [k] = w; 582 heap [k] = w;
540 ((W)heap [k])->active = k + 1; 583 ((W)heap [k])->active = k + 1;
541 584
542} 585}
543 586
544static void 587void inline_speed
545downheap (WT *heap, int N, int k) 588downheap (WT *heap, int N, int k)
546{ 589{
547 WT w = heap [k]; 590 WT w = heap [k];
548 591
549 while (k < (N >> 1)) 592 while (k < (N >> 1))
563 606
564 heap [k] = w; 607 heap [k] = w;
565 ((W)heap [k])->active = k + 1; 608 ((W)heap [k])->active = k + 1;
566} 609}
567 610
568inline void 611void inline_size
569adjustheap (WT *heap, int N, int k) 612adjustheap (WT *heap, int N, int k)
570{ 613{
571 upheap (heap, k); 614 upheap (heap, k);
572 downheap (heap, N, k); 615 downheap (heap, N, k);
573} 616}
583static ANSIG *signals; 626static ANSIG *signals;
584static int signalmax; 627static int signalmax;
585 628
586static int sigpipe [2]; 629static int sigpipe [2];
587static sig_atomic_t volatile gotsig; 630static sig_atomic_t volatile gotsig;
588static struct ev_io sigev; 631static ev_io sigev;
589 632
590static void 633void inline_size
591signals_init (ANSIG *base, int count) 634signals_init (ANSIG *base, int count)
592{ 635{
593 while (count--) 636 while (count--)
594 { 637 {
595 base->head = 0; 638 base->head = 0;
615 write (sigpipe [1], &signum, 1); 658 write (sigpipe [1], &signum, 1);
616 errno = old_errno; 659 errno = old_errno;
617 } 660 }
618} 661}
619 662
620void 663void noinline
621ev_feed_signal_event (EV_P_ int signum) 664ev_feed_signal_event (EV_P_ int signum)
622{ 665{
623 WL w; 666 WL w;
624 667
625#if EV_MULTIPLICITY 668#if EV_MULTIPLICITY
636 for (w = signals [signum].head; w; w = w->next) 679 for (w = signals [signum].head; w; w = w->next)
637 ev_feed_event (EV_A_ (W)w, EV_SIGNAL); 680 ev_feed_event (EV_A_ (W)w, EV_SIGNAL);
638} 681}
639 682
640static void 683static void
641sigcb (EV_P_ struct ev_io *iow, int revents) 684sigcb (EV_P_ ev_io *iow, int revents)
642{ 685{
643 int signum; 686 int signum;
644 687
645 read (sigpipe [0], &revents, 1); 688 read (sigpipe [0], &revents, 1);
646 gotsig = 0; 689 gotsig = 0;
648 for (signum = signalmax; signum--; ) 691 for (signum = signalmax; signum--; )
649 if (signals [signum].gotsig) 692 if (signals [signum].gotsig)
650 ev_feed_signal_event (EV_A_ signum + 1); 693 ev_feed_signal_event (EV_A_ signum + 1);
651} 694}
652 695
653inline void 696void inline_size
654fd_intern (int fd) 697fd_intern (int fd)
655{ 698{
656#ifdef _WIN32 699#ifdef _WIN32
657 int arg = 1; 700 int arg = 1;
658 ioctlsocket (_get_osfhandle (fd), FIONBIO, &arg); 701 ioctlsocket (_get_osfhandle (fd), FIONBIO, &arg);
660 fcntl (fd, F_SETFD, FD_CLOEXEC); 703 fcntl (fd, F_SETFD, FD_CLOEXEC);
661 fcntl (fd, F_SETFL, O_NONBLOCK); 704 fcntl (fd, F_SETFL, O_NONBLOCK);
662#endif 705#endif
663} 706}
664 707
665static void 708static void noinline
666siginit (EV_P) 709siginit (EV_P)
667{ 710{
668 fd_intern (sigpipe [0]); 711 fd_intern (sigpipe [0]);
669 fd_intern (sigpipe [1]); 712 fd_intern (sigpipe [1]);
670 713
673 ev_unref (EV_A); /* child watcher should not keep loop alive */ 716 ev_unref (EV_A); /* child watcher should not keep loop alive */
674} 717}
675 718
676/*****************************************************************************/ 719/*****************************************************************************/
677 720
678static struct ev_child *childs [PID_HASHSIZE]; 721static ev_child *childs [EV_PID_HASHSIZE];
679 722
680#ifndef _WIN32 723#ifndef _WIN32
681 724
682static struct ev_signal childev; 725static ev_signal childev;
683 726
684#ifndef WCONTINUED 727void inline_speed
685# define WCONTINUED 0
686#endif
687
688static void
689child_reap (EV_P_ struct ev_signal *sw, int chain, int pid, int status) 728child_reap (EV_P_ ev_signal *sw, int chain, int pid, int status)
690{ 729{
691 struct ev_child *w; 730 ev_child *w;
692 731
693 for (w = (struct ev_child *)childs [chain & (PID_HASHSIZE - 1)]; w; w = (struct ev_child *)((WL)w)->next) 732 for (w = (ev_child *)childs [chain & (EV_PID_HASHSIZE - 1)]; w; w = (ev_child *)((WL)w)->next)
694 if (w->pid == pid || !w->pid) 733 if (w->pid == pid || !w->pid)
695 { 734 {
696 ev_priority (w) = ev_priority (sw); /* need to do it *now* */ 735 ev_priority (w) = ev_priority (sw); /* need to do it *now* */
697 w->rpid = pid; 736 w->rpid = pid;
698 w->rstatus = status; 737 w->rstatus = status;
699 ev_feed_event (EV_A_ (W)w, EV_CHILD); 738 ev_feed_event (EV_A_ (W)w, EV_CHILD);
700 } 739 }
701} 740}
702 741
742#ifndef WCONTINUED
743# define WCONTINUED 0
744#endif
745
703static void 746static void
704childcb (EV_P_ struct ev_signal *sw, int revents) 747childcb (EV_P_ ev_signal *sw, int revents)
705{ 748{
706 int pid, status; 749 int pid, status;
707 750
751 /* some systems define WCONTINUED but then fail to support it (linux 2.4) */
708 if (0 < (pid = waitpid (-1, &status, WNOHANG | WUNTRACED | WCONTINUED))) 752 if (0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED | WCONTINUED)))
709 { 753 if (!WCONTINUED
754 || errno != EINVAL
755 || 0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED)))
756 return;
757
710 /* make sure we are called again until all childs have been reaped */ 758 /* make sure we are called again until all childs have been reaped */
759 /* we need to do it this way so that the callback gets called before we continue */
711 ev_feed_event (EV_A_ (W)sw, EV_SIGNAL); 760 ev_feed_event (EV_A_ (W)sw, EV_SIGNAL);
712 761
713 child_reap (EV_A_ sw, pid, pid, status); 762 child_reap (EV_A_ sw, pid, pid, status);
763 if (EV_PID_HASHSIZE > 1)
714 child_reap (EV_A_ sw, 0, pid, status); /* this might trigger a watcher twice, but event catches that */ 764 child_reap (EV_A_ sw, 0, pid, status); /* this might trigger a watcher twice, but feed_event catches that */
715 }
716} 765}
717 766
718#endif 767#endif
719 768
720/*****************************************************************************/ 769/*****************************************************************************/
746{ 795{
747 return EV_VERSION_MINOR; 796 return EV_VERSION_MINOR;
748} 797}
749 798
750/* return true if we are running with elevated privileges and should ignore env variables */ 799/* return true if we are running with elevated privileges and should ignore env variables */
751static int 800int inline_size
752enable_secure (void) 801enable_secure (void)
753{ 802{
754#ifdef _WIN32 803#ifdef _WIN32
755 return 0; 804 return 0;
756#else 805#else
758 || getgid () != getegid (); 807 || getgid () != getegid ();
759#endif 808#endif
760} 809}
761 810
762unsigned int 811unsigned int
763ev_method (EV_P) 812ev_supported_backends (void)
764{ 813{
765 return method; 814 unsigned int flags = 0;
766}
767 815
768static void 816 if (EV_USE_PORT ) flags |= EVBACKEND_PORT;
817 if (EV_USE_KQUEUE) flags |= EVBACKEND_KQUEUE;
818 if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL;
819 if (EV_USE_POLL ) flags |= EVBACKEND_POLL;
820 if (EV_USE_SELECT) flags |= EVBACKEND_SELECT;
821
822 return flags;
823}
824
825unsigned int
826ev_recommended_backends (void)
827{
828 unsigned int flags = ev_supported_backends ();
829
830#ifndef __NetBSD__
831 /* kqueue is borked on everything but netbsd apparently */
832 /* it usually doesn't work correctly on anything but sockets and pipes */
833 flags &= ~EVBACKEND_KQUEUE;
834#endif
835#ifdef __APPLE__
836 // flags &= ~EVBACKEND_KQUEUE; for documentation
837 flags &= ~EVBACKEND_POLL;
838#endif
839
840 return flags;
841}
842
843unsigned int
844ev_embeddable_backends (void)
845{
846 return EVBACKEND_EPOLL
847 | EVBACKEND_KQUEUE
848 | EVBACKEND_PORT;
849}
850
851unsigned int
852ev_backend (EV_P)
853{
854 return backend;
855}
856
857static void noinline
769loop_init (EV_P_ unsigned int flags) 858loop_init (EV_P_ unsigned int flags)
770{ 859{
771 if (!method) 860 if (!backend)
772 { 861 {
773#if EV_USE_MONOTONIC 862#if EV_USE_MONOTONIC
774 { 863 {
775 struct timespec ts; 864 struct timespec ts;
776 if (!clock_gettime (CLOCK_MONOTONIC, &ts)) 865 if (!clock_gettime (CLOCK_MONOTONIC, &ts))
781 ev_rt_now = ev_time (); 870 ev_rt_now = ev_time ();
782 mn_now = get_clock (); 871 mn_now = get_clock ();
783 now_floor = mn_now; 872 now_floor = mn_now;
784 rtmn_diff = ev_rt_now - mn_now; 873 rtmn_diff = ev_rt_now - mn_now;
785 874
786 if (!(flags & EVFLAG_NOENV) && !enable_secure () && getenv ("LIBEV_FLAGS")) 875 if (!(flags & EVFLAG_NOENV)
876 && !enable_secure ()
877 && getenv ("LIBEV_FLAGS"))
787 flags = atoi (getenv ("LIBEV_FLAGS")); 878 flags = atoi (getenv ("LIBEV_FLAGS"));
788 879
789 if (!(flags & 0x0000ffff)) 880 if (!(flags & 0x0000ffffUL))
790 flags |= 0x0000ffff; 881 flags |= ev_recommended_backends ();
791 882
792 method = 0; 883 backend = 0;
793#if EV_USE_PORT 884#if EV_USE_PORT
794 if (!method && (flags & EVMETHOD_PORT )) method = port_init (EV_A_ flags); 885 if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags);
795#endif 886#endif
796#if EV_USE_KQUEUE 887#if EV_USE_KQUEUE
797 if (!method && (flags & EVMETHOD_KQUEUE)) method = kqueue_init (EV_A_ flags); 888 if (!backend && (flags & EVBACKEND_KQUEUE)) backend = kqueue_init (EV_A_ flags);
798#endif 889#endif
799#if EV_USE_EPOLL 890#if EV_USE_EPOLL
800 if (!method && (flags & EVMETHOD_EPOLL )) method = epoll_init (EV_A_ flags); 891 if (!backend && (flags & EVBACKEND_EPOLL )) backend = epoll_init (EV_A_ flags);
801#endif 892#endif
802#if EV_USE_POLL 893#if EV_USE_POLL
803 if (!method && (flags & EVMETHOD_POLL )) method = poll_init (EV_A_ flags); 894 if (!backend && (flags & EVBACKEND_POLL )) backend = poll_init (EV_A_ flags);
804#endif 895#endif
805#if EV_USE_SELECT 896#if EV_USE_SELECT
806 if (!method && (flags & EVMETHOD_SELECT)) method = select_init (EV_A_ flags); 897 if (!backend && (flags & EVBACKEND_SELECT)) backend = select_init (EV_A_ flags);
807#endif 898#endif
808 899
809 ev_init (&sigev, sigcb); 900 ev_init (&sigev, sigcb);
810 ev_set_priority (&sigev, EV_MAXPRI); 901 ev_set_priority (&sigev, EV_MAXPRI);
811 } 902 }
812} 903}
813 904
814void 905static void noinline
815loop_destroy (EV_P) 906loop_destroy (EV_P)
816{ 907{
817 int i; 908 int i;
818 909
819#if EV_USE_PORT 910#if EV_USE_PORT
820 if (method == EVMETHOD_PORT ) port_destroy (EV_A); 911 if (backend == EVBACKEND_PORT ) port_destroy (EV_A);
821#endif 912#endif
822#if EV_USE_KQUEUE 913#if EV_USE_KQUEUE
823 if (method == EVMETHOD_KQUEUE) kqueue_destroy (EV_A); 914 if (backend == EVBACKEND_KQUEUE) kqueue_destroy (EV_A);
824#endif 915#endif
825#if EV_USE_EPOLL 916#if EV_USE_EPOLL
826 if (method == EVMETHOD_EPOLL ) epoll_destroy (EV_A); 917 if (backend == EVBACKEND_EPOLL ) epoll_destroy (EV_A);
827#endif 918#endif
828#if EV_USE_POLL 919#if EV_USE_POLL
829 if (method == EVMETHOD_POLL ) poll_destroy (EV_A); 920 if (backend == EVBACKEND_POLL ) poll_destroy (EV_A);
830#endif 921#endif
831#if EV_USE_SELECT 922#if EV_USE_SELECT
832 if (method == EVMETHOD_SELECT) select_destroy (EV_A); 923 if (backend == EVBACKEND_SELECT) select_destroy (EV_A);
833#endif 924#endif
834 925
835 for (i = NUMPRI; i--; ) 926 for (i = NUMPRI; i--; )
836 array_free (pending, [i]); 927 array_free (pending, [i]);
837 928
838 /* have to use the microsoft-never-gets-it-right macro */ 929 /* have to use the microsoft-never-gets-it-right macro */
839 array_free (fdchange, EMPTY0); 930 array_free (fdchange, EMPTY0);
840 array_free (timer, EMPTY0); 931 array_free (timer, EMPTY0);
841#if EV_PERIODICS 932#if EV_PERIODIC_ENABLE
842 array_free (periodic, EMPTY0); 933 array_free (periodic, EMPTY0);
843#endif 934#endif
844 array_free (idle, EMPTY0); 935 array_free (idle, EMPTY0);
845 array_free (prepare, EMPTY0); 936 array_free (prepare, EMPTY0);
846 array_free (check, EMPTY0); 937 array_free (check, EMPTY0);
847 938
848 method = 0; 939 backend = 0;
849} 940}
850 941
851static void 942void inline_size
852loop_fork (EV_P) 943loop_fork (EV_P)
853{ 944{
854#if EV_USE_PORT 945#if EV_USE_PORT
855 if (method == EVMETHOD_PORT ) port_fork (EV_A); 946 if (backend == EVBACKEND_PORT ) port_fork (EV_A);
856#endif 947#endif
857#if EV_USE_KQUEUE 948#if EV_USE_KQUEUE
858 if (method == EVMETHOD_KQUEUE) kqueue_fork (EV_A); 949 if (backend == EVBACKEND_KQUEUE) kqueue_fork (EV_A);
859#endif 950#endif
860#if EV_USE_EPOLL 951#if EV_USE_EPOLL
861 if (method == EVMETHOD_EPOLL ) epoll_fork (EV_A); 952 if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A);
862#endif 953#endif
863 954
864 if (ev_is_active (&sigev)) 955 if (ev_is_active (&sigev))
865 { 956 {
866 /* default loop */ 957 /* default loop */
887 978
888 memset (loop, 0, sizeof (struct ev_loop)); 979 memset (loop, 0, sizeof (struct ev_loop));
889 980
890 loop_init (EV_A_ flags); 981 loop_init (EV_A_ flags);
891 982
892 if (ev_method (EV_A)) 983 if (ev_backend (EV_A))
893 return loop; 984 return loop;
894 985
895 return 0; 986 return 0;
896} 987}
897 988
910 1001
911#endif 1002#endif
912 1003
913#if EV_MULTIPLICITY 1004#if EV_MULTIPLICITY
914struct ev_loop * 1005struct ev_loop *
915ev_default_loop_ (unsigned int flags) 1006ev_default_loop_init (unsigned int flags)
916#else 1007#else
917int 1008int
918ev_default_loop (unsigned int flags) 1009ev_default_loop (unsigned int flags)
919#endif 1010#endif
920{ 1011{
930 ev_default_loop_ptr = 1; 1021 ev_default_loop_ptr = 1;
931#endif 1022#endif
932 1023
933 loop_init (EV_A_ flags); 1024 loop_init (EV_A_ flags);
934 1025
935 if (ev_method (EV_A)) 1026 if (ev_backend (EV_A))
936 { 1027 {
937 siginit (EV_A); 1028 siginit (EV_A);
938 1029
939#ifndef _WIN32 1030#ifndef _WIN32
940 ev_signal_init (&childev, childcb, SIGCHLD); 1031 ev_signal_init (&childev, childcb, SIGCHLD);
976{ 1067{
977#if EV_MULTIPLICITY 1068#if EV_MULTIPLICITY
978 struct ev_loop *loop = ev_default_loop_ptr; 1069 struct ev_loop *loop = ev_default_loop_ptr;
979#endif 1070#endif
980 1071
981 if (method) 1072 if (backend)
982 postfork = 1; 1073 postfork = 1;
983} 1074}
984 1075
985/*****************************************************************************/ 1076/*****************************************************************************/
986 1077
987static int 1078int inline_size
988any_pending (EV_P) 1079any_pending (EV_P)
989{ 1080{
990 int pri; 1081 int pri;
991 1082
992 for (pri = NUMPRI; pri--; ) 1083 for (pri = NUMPRI; pri--; )
994 return 1; 1085 return 1;
995 1086
996 return 0; 1087 return 0;
997} 1088}
998 1089
999static void 1090void inline_speed
1000call_pending (EV_P) 1091call_pending (EV_P)
1001{ 1092{
1002 int pri; 1093 int pri;
1003 1094
1004 for (pri = NUMPRI; pri--; ) 1095 for (pri = NUMPRI; pri--; )
1005 while (pendingcnt [pri]) 1096 while (pendingcnt [pri])
1006 { 1097 {
1007 ANPENDING *p = pendings [pri] + --pendingcnt [pri]; 1098 ANPENDING *p = pendings [pri] + --pendingcnt [pri];
1008 1099
1009 if (p->w) 1100 if (expect_true (p->w))
1010 { 1101 {
1102 /*assert (("non-pending watcher on pending list", p->w->pending));*/
1103
1011 p->w->pending = 0; 1104 p->w->pending = 0;
1012 EV_CB_INVOKE (p->w, p->events); 1105 EV_CB_INVOKE (p->w, p->events);
1013 } 1106 }
1014 } 1107 }
1015} 1108}
1016 1109
1017static void 1110void inline_size
1018timers_reify (EV_P) 1111timers_reify (EV_P)
1019{ 1112{
1020 while (timercnt && ((WT)timers [0])->at <= mn_now) 1113 while (timercnt && ((WT)timers [0])->at <= mn_now)
1021 { 1114 {
1022 struct ev_timer *w = timers [0]; 1115 ev_timer *w = timers [0];
1023 1116
1024 assert (("inactive timer on timer heap detected", ev_is_active (w))); 1117 /*assert (("inactive timer on timer heap detected", ev_is_active (w)));*/
1025 1118
1026 /* first reschedule or stop timer */ 1119 /* first reschedule or stop timer */
1027 if (w->repeat) 1120 if (w->repeat)
1028 { 1121 {
1029 assert (("negative ev_timer repeat value found while processing timers", w->repeat > 0.)); 1122 assert (("negative ev_timer repeat value found while processing timers", w->repeat > 0.));
1039 1132
1040 ev_feed_event (EV_A_ (W)w, EV_TIMEOUT); 1133 ev_feed_event (EV_A_ (W)w, EV_TIMEOUT);
1041 } 1134 }
1042} 1135}
1043 1136
1044#if EV_PERIODICS 1137#if EV_PERIODIC_ENABLE
1045static void 1138void inline_size
1046periodics_reify (EV_P) 1139periodics_reify (EV_P)
1047{ 1140{
1048 while (periodiccnt && ((WT)periodics [0])->at <= ev_rt_now) 1141 while (periodiccnt && ((WT)periodics [0])->at <= ev_rt_now)
1049 { 1142 {
1050 struct ev_periodic *w = periodics [0]; 1143 ev_periodic *w = periodics [0];
1051 1144
1052 assert (("inactive timer on periodic heap detected", ev_is_active (w))); 1145 /*assert (("inactive timer on periodic heap detected", ev_is_active (w)));*/
1053 1146
1054 /* first reschedule or stop timer */ 1147 /* first reschedule or stop timer */
1055 if (w->reschedule_cb) 1148 if (w->reschedule_cb)
1056 { 1149 {
1057 ((WT)w)->at = w->reschedule_cb (w, ev_rt_now + 0.0001); 1150 ((WT)w)->at = w->reschedule_cb (w, ev_rt_now + 0.0001);
1069 1162
1070 ev_feed_event (EV_A_ (W)w, EV_PERIODIC); 1163 ev_feed_event (EV_A_ (W)w, EV_PERIODIC);
1071 } 1164 }
1072} 1165}
1073 1166
1074static void 1167static void noinline
1075periodics_reschedule (EV_P) 1168periodics_reschedule (EV_P)
1076{ 1169{
1077 int i; 1170 int i;
1078 1171
1079 /* adjust periodics after time jump */ 1172 /* adjust periodics after time jump */
1080 for (i = 0; i < periodiccnt; ++i) 1173 for (i = 0; i < periodiccnt; ++i)
1081 { 1174 {
1082 struct ev_periodic *w = periodics [i]; 1175 ev_periodic *w = periodics [i];
1083 1176
1084 if (w->reschedule_cb) 1177 if (w->reschedule_cb)
1085 ((WT)w)->at = w->reschedule_cb (w, ev_rt_now); 1178 ((WT)w)->at = w->reschedule_cb (w, ev_rt_now);
1086 else if (w->interval) 1179 else if (w->interval)
1087 ((WT)w)->at += ceil ((ev_rt_now - ((WT)w)->at) / w->interval) * w->interval; 1180 ((WT)w)->at += ceil ((ev_rt_now - ((WT)w)->at) / w->interval) * w->interval;
1091 for (i = periodiccnt >> 1; i--; ) 1184 for (i = periodiccnt >> 1; i--; )
1092 downheap ((WT *)periodics, periodiccnt, i); 1185 downheap ((WT *)periodics, periodiccnt, i);
1093} 1186}
1094#endif 1187#endif
1095 1188
1096inline int 1189int inline_size
1097time_update_monotonic (EV_P) 1190time_update_monotonic (EV_P)
1098{ 1191{
1099 mn_now = get_clock (); 1192 mn_now = get_clock ();
1100 1193
1101 if (expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5)) 1194 if (expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5))
1109 ev_rt_now = ev_time (); 1202 ev_rt_now = ev_time ();
1110 return 1; 1203 return 1;
1111 } 1204 }
1112} 1205}
1113 1206
1114static void 1207void inline_size
1115time_update (EV_P) 1208time_update (EV_P)
1116{ 1209{
1117 int i; 1210 int i;
1118 1211
1119#if EV_USE_MONOTONIC 1212#if EV_USE_MONOTONIC
1121 { 1214 {
1122 if (time_update_monotonic (EV_A)) 1215 if (time_update_monotonic (EV_A))
1123 { 1216 {
1124 ev_tstamp odiff = rtmn_diff; 1217 ev_tstamp odiff = rtmn_diff;
1125 1218
1126 for (i = 4; --i; ) /* loop a few times, before making important decisions */ 1219 /* loop a few times, before making important decisions.
1220 * on the choice of "4": one iteration isn't enough,
1221 * in case we get preempted during the calls to
1222 * ev_time and get_clock. a second call is almost guarenteed
1223 * to succeed in that case, though. and looping a few more times
1224 * doesn't hurt either as we only do this on time-jumps or
1225 * in the unlikely event of getting preempted here.
1226 */
1227 for (i = 4; --i; )
1127 { 1228 {
1128 rtmn_diff = ev_rt_now - mn_now; 1229 rtmn_diff = ev_rt_now - mn_now;
1129 1230
1130 if (fabs (odiff - rtmn_diff) < MIN_TIMEJUMP) 1231 if (fabs (odiff - rtmn_diff) < MIN_TIMEJUMP)
1131 return; /* all is well */ 1232 return; /* all is well */
1133 ev_rt_now = ev_time (); 1234 ev_rt_now = ev_time ();
1134 mn_now = get_clock (); 1235 mn_now = get_clock ();
1135 now_floor = mn_now; 1236 now_floor = mn_now;
1136 } 1237 }
1137 1238
1138# if EV_PERIODICS 1239# if EV_PERIODIC_ENABLE
1139 periodics_reschedule (EV_A); 1240 periodics_reschedule (EV_A);
1140# endif 1241# endif
1141 /* no timer adjustment, as the monotonic clock doesn't jump */ 1242 /* no timer adjustment, as the monotonic clock doesn't jump */
1142 /* timers_reschedule (EV_A_ rtmn_diff - odiff) */ 1243 /* timers_reschedule (EV_A_ rtmn_diff - odiff) */
1143 } 1244 }
1147 { 1248 {
1148 ev_rt_now = ev_time (); 1249 ev_rt_now = ev_time ();
1149 1250
1150 if (expect_false (mn_now > ev_rt_now || mn_now < ev_rt_now - MAX_BLOCKTIME - MIN_TIMEJUMP)) 1251 if (expect_false (mn_now > ev_rt_now || mn_now < ev_rt_now - MAX_BLOCKTIME - MIN_TIMEJUMP))
1151 { 1252 {
1152#if EV_PERIODICS 1253#if EV_PERIODIC_ENABLE
1153 periodics_reschedule (EV_A); 1254 periodics_reschedule (EV_A);
1154#endif 1255#endif
1155 1256
1156 /* adjust timers. this is easy, as the offset is the same for all */ 1257 /* adjust timers. this is easy, as the offset is the same for all */
1157 for (i = 0; i < timercnt; ++i) 1258 for (i = 0; i < timercnt; ++i)
1177static int loop_done; 1278static int loop_done;
1178 1279
1179void 1280void
1180ev_loop (EV_P_ int flags) 1281ev_loop (EV_P_ int flags)
1181{ 1282{
1182 double block;
1183 loop_done = flags & (EVLOOP_ONESHOT | EVLOOP_NONBLOCK) ? 1 : 0; 1283 loop_done = flags & (EVLOOP_ONESHOT | EVLOOP_NONBLOCK)
1284 ? EVUNLOOP_ONE
1285 : EVUNLOOP_CANCEL;
1184 1286
1185 while (activecnt) 1287 while (activecnt)
1186 { 1288 {
1289 /* we might have forked, so reify kernel state if necessary */
1290 #if EV_FORK_ENABLE
1291 if (expect_false (postfork))
1292 if (forkcnt)
1293 {
1294 queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK);
1295 call_pending (EV_A);
1296 }
1297 #endif
1298
1187 /* queue check watchers (and execute them) */ 1299 /* queue check watchers (and execute them) */
1188 if (expect_false (preparecnt)) 1300 if (expect_false (preparecnt))
1189 { 1301 {
1190 queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE); 1302 queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE);
1191 call_pending (EV_A); 1303 call_pending (EV_A);
1197 1309
1198 /* update fd-related kernel structures */ 1310 /* update fd-related kernel structures */
1199 fd_reify (EV_A); 1311 fd_reify (EV_A);
1200 1312
1201 /* calculate blocking time */ 1313 /* calculate blocking time */
1314 {
1315 double block;
1202 1316
1203 /* we only need this for !monotonic clock or timers, but as we basically 1317 if (flags & EVLOOP_NONBLOCK || idlecnt)
1204 always have timers, we just calculate it always */ 1318 block = 0.; /* do not block at all */
1319 else
1320 {
1321 /* update time to cancel out callback processing overhead */
1205#if EV_USE_MONOTONIC 1322#if EV_USE_MONOTONIC
1206 if (expect_true (have_monotonic)) 1323 if (expect_true (have_monotonic))
1207 time_update_monotonic (EV_A); 1324 time_update_monotonic (EV_A);
1208 else 1325 else
1209#endif 1326#endif
1210 { 1327 {
1211 ev_rt_now = ev_time (); 1328 ev_rt_now = ev_time ();
1212 mn_now = ev_rt_now; 1329 mn_now = ev_rt_now;
1213 } 1330 }
1214 1331
1215 if (flags & EVLOOP_NONBLOCK || idlecnt)
1216 block = 0.;
1217 else
1218 {
1219 block = MAX_BLOCKTIME; 1332 block = MAX_BLOCKTIME;
1220 1333
1221 if (timercnt) 1334 if (timercnt)
1222 { 1335 {
1223 ev_tstamp to = ((WT)timers [0])->at - mn_now + method_fudge; 1336 ev_tstamp to = ((WT)timers [0])->at - mn_now + backend_fudge;
1224 if (block > to) block = to; 1337 if (block > to) block = to;
1225 } 1338 }
1226 1339
1227#if EV_PERIODICS 1340#if EV_PERIODIC_ENABLE
1228 if (periodiccnt) 1341 if (periodiccnt)
1229 { 1342 {
1230 ev_tstamp to = ((WT)periodics [0])->at - ev_rt_now + method_fudge; 1343 ev_tstamp to = ((WT)periodics [0])->at - ev_rt_now + backend_fudge;
1231 if (block > to) block = to; 1344 if (block > to) block = to;
1232 } 1345 }
1233#endif 1346#endif
1234 1347
1235 if (block < 0.) block = 0.; 1348 if (expect_false (block < 0.)) block = 0.;
1236 } 1349 }
1237 1350
1238 method_poll (EV_A_ block); 1351 backend_poll (EV_A_ block);
1352 }
1239 1353
1240 /* update ev_rt_now, do magic */ 1354 /* update ev_rt_now, do magic */
1241 time_update (EV_A); 1355 time_update (EV_A);
1242 1356
1243 /* queue pending timers and reschedule them */ 1357 /* queue pending timers and reschedule them */
1244 timers_reify (EV_A); /* relative timers called last */ 1358 timers_reify (EV_A); /* relative timers called last */
1245#if EV_PERIODICS 1359#if EV_PERIODIC_ENABLE
1246 periodics_reify (EV_A); /* absolute timers called first */ 1360 periodics_reify (EV_A); /* absolute timers called first */
1247#endif 1361#endif
1248 1362
1249 /* queue idle watchers unless io or timers are pending */ 1363 /* queue idle watchers unless other events are pending */
1250 if (idlecnt && !any_pending (EV_A)) 1364 if (idlecnt && !any_pending (EV_A))
1251 queue_events (EV_A_ (W *)idles, idlecnt, EV_IDLE); 1365 queue_events (EV_A_ (W *)idles, idlecnt, EV_IDLE);
1252 1366
1253 /* queue check watchers, to be executed first */ 1367 /* queue check watchers, to be executed first */
1254 if (checkcnt) 1368 if (expect_false (checkcnt))
1255 queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK); 1369 queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK);
1256 1370
1257 call_pending (EV_A); 1371 call_pending (EV_A);
1258 1372
1259 if (loop_done) 1373 if (expect_false (loop_done))
1260 break; 1374 break;
1261 } 1375 }
1262 1376
1263 if (loop_done != 2) 1377 if (loop_done == EVUNLOOP_ONE)
1264 loop_done = 0; 1378 loop_done = EVUNLOOP_CANCEL;
1265} 1379}
1266 1380
1267void 1381void
1268ev_unloop (EV_P_ int how) 1382ev_unloop (EV_P_ int how)
1269{ 1383{
1270 loop_done = how; 1384 loop_done = how;
1271} 1385}
1272 1386
1273/*****************************************************************************/ 1387/*****************************************************************************/
1274 1388
1275inline void 1389void inline_size
1276wlist_add (WL *head, WL elem) 1390wlist_add (WL *head, WL elem)
1277{ 1391{
1278 elem->next = *head; 1392 elem->next = *head;
1279 *head = elem; 1393 *head = elem;
1280} 1394}
1281 1395
1282inline void 1396void inline_size
1283wlist_del (WL *head, WL elem) 1397wlist_del (WL *head, WL elem)
1284{ 1398{
1285 while (*head) 1399 while (*head)
1286 { 1400 {
1287 if (*head == elem) 1401 if (*head == elem)
1292 1406
1293 head = &(*head)->next; 1407 head = &(*head)->next;
1294 } 1408 }
1295} 1409}
1296 1410
1297inline void 1411void inline_speed
1298ev_clear_pending (EV_P_ W w) 1412ev_clear_pending (EV_P_ W w)
1299{ 1413{
1300 if (w->pending) 1414 if (w->pending)
1301 { 1415 {
1302 pendings [ABSPRI (w)][w->pending - 1].w = 0; 1416 pendings [ABSPRI (w)][w->pending - 1].w = 0;
1303 w->pending = 0; 1417 w->pending = 0;
1304 } 1418 }
1305} 1419}
1306 1420
1307inline void 1421void inline_speed
1308ev_start (EV_P_ W w, int active) 1422ev_start (EV_P_ W w, int active)
1309{ 1423{
1310 if (w->priority < EV_MINPRI) w->priority = EV_MINPRI; 1424 if (w->priority < EV_MINPRI) w->priority = EV_MINPRI;
1311 if (w->priority > EV_MAXPRI) w->priority = EV_MAXPRI; 1425 if (w->priority > EV_MAXPRI) w->priority = EV_MAXPRI;
1312 1426
1313 w->active = active; 1427 w->active = active;
1314 ev_ref (EV_A); 1428 ev_ref (EV_A);
1315} 1429}
1316 1430
1317inline void 1431void inline_size
1318ev_stop (EV_P_ W w) 1432ev_stop (EV_P_ W w)
1319{ 1433{
1320 ev_unref (EV_A); 1434 ev_unref (EV_A);
1321 w->active = 0; 1435 w->active = 0;
1322} 1436}
1323 1437
1324/*****************************************************************************/ 1438/*****************************************************************************/
1325 1439
1326void 1440void
1327ev_io_start (EV_P_ struct ev_io *w) 1441ev_io_start (EV_P_ ev_io *w)
1328{ 1442{
1329 int fd = w->fd; 1443 int fd = w->fd;
1330 1444
1331 if (ev_is_active (w)) 1445 if (expect_false (ev_is_active (w)))
1332 return; 1446 return;
1333 1447
1334 assert (("ev_io_start called with negative fd", fd >= 0)); 1448 assert (("ev_io_start called with negative fd", fd >= 0));
1335 1449
1336 ev_start (EV_A_ (W)w, 1); 1450 ev_start (EV_A_ (W)w, 1);
1339 1453
1340 fd_change (EV_A_ fd); 1454 fd_change (EV_A_ fd);
1341} 1455}
1342 1456
1343void 1457void
1344ev_io_stop (EV_P_ struct ev_io *w) 1458ev_io_stop (EV_P_ ev_io *w)
1345{ 1459{
1346 ev_clear_pending (EV_A_ (W)w); 1460 ev_clear_pending (EV_A_ (W)w);
1347 if (!ev_is_active (w)) 1461 if (expect_false (!ev_is_active (w)))
1348 return; 1462 return;
1349 1463
1350 assert (("ev_io_start called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax)); 1464 assert (("ev_io_start called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax));
1351 1465
1352 wlist_del ((WL *)&anfds[w->fd].head, (WL)w); 1466 wlist_del ((WL *)&anfds[w->fd].head, (WL)w);
1354 1468
1355 fd_change (EV_A_ w->fd); 1469 fd_change (EV_A_ w->fd);
1356} 1470}
1357 1471
1358void 1472void
1359ev_timer_start (EV_P_ struct ev_timer *w) 1473ev_timer_start (EV_P_ ev_timer *w)
1360{ 1474{
1361 if (ev_is_active (w)) 1475 if (expect_false (ev_is_active (w)))
1362 return; 1476 return;
1363 1477
1364 ((WT)w)->at += mn_now; 1478 ((WT)w)->at += mn_now;
1365 1479
1366 assert (("ev_timer_start called with negative timer repeat value", w->repeat >= 0.)); 1480 assert (("ev_timer_start called with negative timer repeat value", w->repeat >= 0.));
1367 1481
1368 ev_start (EV_A_ (W)w, ++timercnt); 1482 ev_start (EV_A_ (W)w, ++timercnt);
1369 array_needsize (struct ev_timer *, timers, timermax, timercnt, EMPTY2); 1483 array_needsize (ev_timer *, timers, timermax, timercnt, EMPTY2);
1370 timers [timercnt - 1] = w; 1484 timers [timercnt - 1] = w;
1371 upheap ((WT *)timers, timercnt - 1); 1485 upheap ((WT *)timers, timercnt - 1);
1372 1486
1487 /*assert (("internal timer heap corruption", timers [((W)w)->active - 1] == w));*/
1488}
1489
1490void
1491ev_timer_stop (EV_P_ ev_timer *w)
1492{
1493 ev_clear_pending (EV_A_ (W)w);
1494 if (expect_false (!ev_is_active (w)))
1495 return;
1496
1373 assert (("internal timer heap corruption", timers [((W)w)->active - 1] == w)); 1497 assert (("internal timer heap corruption", timers [((W)w)->active - 1] == w));
1374}
1375 1498
1376void 1499 {
1377ev_timer_stop (EV_P_ struct ev_timer *w) 1500 int active = ((W)w)->active;
1378{
1379 ev_clear_pending (EV_A_ (W)w);
1380 if (!ev_is_active (w))
1381 return;
1382 1501
1383 assert (("internal timer heap corruption", timers [((W)w)->active - 1] == w)); 1502 if (expect_true (--active < --timercnt))
1384
1385 if (((W)w)->active < timercnt--)
1386 { 1503 {
1387 timers [((W)w)->active - 1] = timers [timercnt]; 1504 timers [active] = timers [timercnt];
1388 adjustheap ((WT *)timers, timercnt, ((W)w)->active - 1); 1505 adjustheap ((WT *)timers, timercnt, active);
1389 } 1506 }
1507 }
1390 1508
1391 ((WT)w)->at -= mn_now; 1509 ((WT)w)->at -= mn_now;
1392 1510
1393 ev_stop (EV_A_ (W)w); 1511 ev_stop (EV_A_ (W)w);
1394} 1512}
1395 1513
1396void 1514void
1397ev_timer_again (EV_P_ struct ev_timer *w) 1515ev_timer_again (EV_P_ ev_timer *w)
1398{ 1516{
1399 if (ev_is_active (w)) 1517 if (ev_is_active (w))
1400 { 1518 {
1401 if (w->repeat) 1519 if (w->repeat)
1402 { 1520 {
1411 w->at = w->repeat; 1529 w->at = w->repeat;
1412 ev_timer_start (EV_A_ w); 1530 ev_timer_start (EV_A_ w);
1413 } 1531 }
1414} 1532}
1415 1533
1416#if EV_PERIODICS 1534#if EV_PERIODIC_ENABLE
1417void 1535void
1418ev_periodic_start (EV_P_ struct ev_periodic *w) 1536ev_periodic_start (EV_P_ ev_periodic *w)
1419{ 1537{
1420 if (ev_is_active (w)) 1538 if (expect_false (ev_is_active (w)))
1421 return; 1539 return;
1422 1540
1423 if (w->reschedule_cb) 1541 if (w->reschedule_cb)
1424 ((WT)w)->at = w->reschedule_cb (w, ev_rt_now); 1542 ((WT)w)->at = w->reschedule_cb (w, ev_rt_now);
1425 else if (w->interval) 1543 else if (w->interval)
1428 /* this formula differs from the one in periodic_reify because we do not always round up */ 1546 /* this formula differs from the one in periodic_reify because we do not always round up */
1429 ((WT)w)->at += ceil ((ev_rt_now - ((WT)w)->at) / w->interval) * w->interval; 1547 ((WT)w)->at += ceil ((ev_rt_now - ((WT)w)->at) / w->interval) * w->interval;
1430 } 1548 }
1431 1549
1432 ev_start (EV_A_ (W)w, ++periodiccnt); 1550 ev_start (EV_A_ (W)w, ++periodiccnt);
1433 array_needsize (struct ev_periodic *, periodics, periodicmax, periodiccnt, EMPTY2); 1551 array_needsize (ev_periodic *, periodics, periodicmax, periodiccnt, EMPTY2);
1434 periodics [periodiccnt - 1] = w; 1552 periodics [periodiccnt - 1] = w;
1435 upheap ((WT *)periodics, periodiccnt - 1); 1553 upheap ((WT *)periodics, periodiccnt - 1);
1436 1554
1555 /*assert (("internal periodic heap corruption", periodics [((W)w)->active - 1] == w));*/
1556}
1557
1558void
1559ev_periodic_stop (EV_P_ ev_periodic *w)
1560{
1561 ev_clear_pending (EV_A_ (W)w);
1562 if (expect_false (!ev_is_active (w)))
1563 return;
1564
1437 assert (("internal periodic heap corruption", periodics [((W)w)->active - 1] == w)); 1565 assert (("internal periodic heap corruption", periodics [((W)w)->active - 1] == w));
1438}
1439 1566
1440void 1567 {
1441ev_periodic_stop (EV_P_ struct ev_periodic *w) 1568 int active = ((W)w)->active;
1442{
1443 ev_clear_pending (EV_A_ (W)w);
1444 if (!ev_is_active (w))
1445 return;
1446 1569
1447 assert (("internal periodic heap corruption", periodics [((W)w)->active - 1] == w)); 1570 if (expect_true (--active < --periodiccnt))
1448
1449 if (((W)w)->active < periodiccnt--)
1450 { 1571 {
1451 periodics [((W)w)->active - 1] = periodics [periodiccnt]; 1572 periodics [active] = periodics [periodiccnt];
1452 adjustheap ((WT *)periodics, periodiccnt, ((W)w)->active - 1); 1573 adjustheap ((WT *)periodics, periodiccnt, active);
1453 } 1574 }
1575 }
1454 1576
1455 ev_stop (EV_A_ (W)w); 1577 ev_stop (EV_A_ (W)w);
1456} 1578}
1457 1579
1458void 1580void
1459ev_periodic_again (EV_P_ struct ev_periodic *w) 1581ev_periodic_again (EV_P_ ev_periodic *w)
1460{ 1582{
1461 /* TODO: use adjustheap and recalculation */ 1583 /* TODO: use adjustheap and recalculation */
1462 ev_periodic_stop (EV_A_ w); 1584 ev_periodic_stop (EV_A_ w);
1463 ev_periodic_start (EV_A_ w); 1585 ev_periodic_start (EV_A_ w);
1464} 1586}
1465#endif 1587#endif
1466 1588
1467void
1468ev_idle_start (EV_P_ struct ev_idle *w)
1469{
1470 if (ev_is_active (w))
1471 return;
1472
1473 ev_start (EV_A_ (W)w, ++idlecnt);
1474 array_needsize (struct ev_idle *, idles, idlemax, idlecnt, EMPTY2);
1475 idles [idlecnt - 1] = w;
1476}
1477
1478void
1479ev_idle_stop (EV_P_ struct ev_idle *w)
1480{
1481 ev_clear_pending (EV_A_ (W)w);
1482 if (!ev_is_active (w))
1483 return;
1484
1485 idles [((W)w)->active - 1] = idles [--idlecnt];
1486 ev_stop (EV_A_ (W)w);
1487}
1488
1489void
1490ev_prepare_start (EV_P_ struct ev_prepare *w)
1491{
1492 if (ev_is_active (w))
1493 return;
1494
1495 ev_start (EV_A_ (W)w, ++preparecnt);
1496 array_needsize (struct ev_prepare *, prepares, preparemax, preparecnt, EMPTY2);
1497 prepares [preparecnt - 1] = w;
1498}
1499
1500void
1501ev_prepare_stop (EV_P_ struct ev_prepare *w)
1502{
1503 ev_clear_pending (EV_A_ (W)w);
1504 if (!ev_is_active (w))
1505 return;
1506
1507 prepares [((W)w)->active - 1] = prepares [--preparecnt];
1508 ev_stop (EV_A_ (W)w);
1509}
1510
1511void
1512ev_check_start (EV_P_ struct ev_check *w)
1513{
1514 if (ev_is_active (w))
1515 return;
1516
1517 ev_start (EV_A_ (W)w, ++checkcnt);
1518 array_needsize (struct ev_check *, checks, checkmax, checkcnt, EMPTY2);
1519 checks [checkcnt - 1] = w;
1520}
1521
1522void
1523ev_check_stop (EV_P_ struct ev_check *w)
1524{
1525 ev_clear_pending (EV_A_ (W)w);
1526 if (!ev_is_active (w))
1527 return;
1528
1529 checks [((W)w)->active - 1] = checks [--checkcnt];
1530 ev_stop (EV_A_ (W)w);
1531}
1532
1533#ifndef SA_RESTART 1589#ifndef SA_RESTART
1534# define SA_RESTART 0 1590# define SA_RESTART 0
1535#endif 1591#endif
1536 1592
1537void 1593void
1538ev_signal_start (EV_P_ struct ev_signal *w) 1594ev_signal_start (EV_P_ ev_signal *w)
1539{ 1595{
1540#if EV_MULTIPLICITY 1596#if EV_MULTIPLICITY
1541 assert (("signal watchers are only supported in the default loop", loop == ev_default_loop_ptr)); 1597 assert (("signal watchers are only supported in the default loop", loop == ev_default_loop_ptr));
1542#endif 1598#endif
1543 if (ev_is_active (w)) 1599 if (expect_false (ev_is_active (w)))
1544 return; 1600 return;
1545 1601
1546 assert (("ev_signal_start called with illegal signal number", w->signum > 0)); 1602 assert (("ev_signal_start called with illegal signal number", w->signum > 0));
1547 1603
1548 ev_start (EV_A_ (W)w, 1); 1604 ev_start (EV_A_ (W)w, 1);
1562#endif 1618#endif
1563 } 1619 }
1564} 1620}
1565 1621
1566void 1622void
1567ev_signal_stop (EV_P_ struct ev_signal *w) 1623ev_signal_stop (EV_P_ ev_signal *w)
1568{ 1624{
1569 ev_clear_pending (EV_A_ (W)w); 1625 ev_clear_pending (EV_A_ (W)w);
1570 if (!ev_is_active (w)) 1626 if (expect_false (!ev_is_active (w)))
1571 return; 1627 return;
1572 1628
1573 wlist_del ((WL *)&signals [w->signum - 1].head, (WL)w); 1629 wlist_del ((WL *)&signals [w->signum - 1].head, (WL)w);
1574 ev_stop (EV_A_ (W)w); 1630 ev_stop (EV_A_ (W)w);
1575 1631
1576 if (!signals [w->signum - 1].head) 1632 if (!signals [w->signum - 1].head)
1577 signal (w->signum, SIG_DFL); 1633 signal (w->signum, SIG_DFL);
1578} 1634}
1579 1635
1580void 1636void
1581ev_child_start (EV_P_ struct ev_child *w) 1637ev_child_start (EV_P_ ev_child *w)
1582{ 1638{
1583#if EV_MULTIPLICITY 1639#if EV_MULTIPLICITY
1584 assert (("child watchers are only supported in the default loop", loop == ev_default_loop_ptr)); 1640 assert (("child watchers are only supported in the default loop", loop == ev_default_loop_ptr));
1585#endif 1641#endif
1586 if (ev_is_active (w)) 1642 if (expect_false (ev_is_active (w)))
1587 return; 1643 return;
1588 1644
1589 ev_start (EV_A_ (W)w, 1); 1645 ev_start (EV_A_ (W)w, 1);
1590 wlist_add ((WL *)&childs [w->pid & (PID_HASHSIZE - 1)], (WL)w); 1646 wlist_add ((WL *)&childs [w->pid & (EV_PID_HASHSIZE - 1)], (WL)w);
1591} 1647}
1592 1648
1593void 1649void
1594ev_child_stop (EV_P_ struct ev_child *w) 1650ev_child_stop (EV_P_ ev_child *w)
1595{ 1651{
1596 ev_clear_pending (EV_A_ (W)w); 1652 ev_clear_pending (EV_A_ (W)w);
1597 if (!ev_is_active (w)) 1653 if (expect_false (!ev_is_active (w)))
1598 return; 1654 return;
1599 1655
1600 wlist_del ((WL *)&childs [w->pid & (PID_HASHSIZE - 1)], (WL)w); 1656 wlist_del ((WL *)&childs [w->pid & (EV_PID_HASHSIZE - 1)], (WL)w);
1601 ev_stop (EV_A_ (W)w); 1657 ev_stop (EV_A_ (W)w);
1602} 1658}
1603 1659
1660#if EV_STAT_ENABLE
1661
1662# ifdef _WIN32
1663# undef lstat
1664# define lstat(a,b) _stati64 (a,b)
1665# endif
1666
1667#define DEF_STAT_INTERVAL 5.0074891
1668#define MIN_STAT_INTERVAL 0.1074891
1669
1670void
1671ev_stat_stat (EV_P_ ev_stat *w)
1672{
1673 if (lstat (w->path, &w->attr) < 0)
1674 w->attr.st_nlink = 0;
1675 else if (!w->attr.st_nlink)
1676 w->attr.st_nlink = 1;
1677}
1678
1679static void
1680stat_timer_cb (EV_P_ ev_timer *w_, int revents)
1681{
1682 ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer));
1683
1684 /* we copy this here each the time so that */
1685 /* prev has the old value when the callback gets invoked */
1686 w->prev = w->attr;
1687 ev_stat_stat (EV_A_ w);
1688
1689 if (memcmp (&w->prev, &w->attr, sizeof (ev_statdata)))
1690 ev_feed_event (EV_A_ w, EV_STAT);
1691}
1692
1693void
1694ev_stat_start (EV_P_ ev_stat *w)
1695{
1696 if (expect_false (ev_is_active (w)))
1697 return;
1698
1699 /* since we use memcmp, we need to clear any padding data etc. */
1700 memset (&w->prev, 0, sizeof (ev_statdata));
1701 memset (&w->attr, 0, sizeof (ev_statdata));
1702
1703 ev_stat_stat (EV_A_ w);
1704
1705 if (w->interval < MIN_STAT_INTERVAL)
1706 w->interval = w->interval ? MIN_STAT_INTERVAL : DEF_STAT_INTERVAL;
1707
1708 ev_timer_init (&w->timer, stat_timer_cb, w->interval, w->interval);
1709 ev_set_priority (&w->timer, ev_priority (w));
1710 ev_timer_start (EV_A_ &w->timer);
1711
1712 ev_start (EV_A_ (W)w, 1);
1713}
1714
1715void
1716ev_stat_stop (EV_P_ ev_stat *w)
1717{
1718 ev_clear_pending (EV_A_ (W)w);
1719 if (expect_false (!ev_is_active (w)))
1720 return;
1721
1722 ev_timer_stop (EV_A_ &w->timer);
1723
1724 ev_stop (EV_A_ (W)w);
1725}
1726#endif
1727
1728void
1729ev_idle_start (EV_P_ ev_idle *w)
1730{
1731 if (expect_false (ev_is_active (w)))
1732 return;
1733
1734 ev_start (EV_A_ (W)w, ++idlecnt);
1735 array_needsize (ev_idle *, idles, idlemax, idlecnt, EMPTY2);
1736 idles [idlecnt - 1] = w;
1737}
1738
1739void
1740ev_idle_stop (EV_P_ ev_idle *w)
1741{
1742 ev_clear_pending (EV_A_ (W)w);
1743 if (expect_false (!ev_is_active (w)))
1744 return;
1745
1746 {
1747 int active = ((W)w)->active;
1748 idles [active - 1] = idles [--idlecnt];
1749 ((W)idles [active - 1])->active = active;
1750 }
1751
1752 ev_stop (EV_A_ (W)w);
1753}
1754
1755void
1756ev_prepare_start (EV_P_ ev_prepare *w)
1757{
1758 if (expect_false (ev_is_active (w)))
1759 return;
1760
1761 ev_start (EV_A_ (W)w, ++preparecnt);
1762 array_needsize (ev_prepare *, prepares, preparemax, preparecnt, EMPTY2);
1763 prepares [preparecnt - 1] = w;
1764}
1765
1766void
1767ev_prepare_stop (EV_P_ ev_prepare *w)
1768{
1769 ev_clear_pending (EV_A_ (W)w);
1770 if (expect_false (!ev_is_active (w)))
1771 return;
1772
1773 {
1774 int active = ((W)w)->active;
1775 prepares [active - 1] = prepares [--preparecnt];
1776 ((W)prepares [active - 1])->active = active;
1777 }
1778
1779 ev_stop (EV_A_ (W)w);
1780}
1781
1782void
1783ev_check_start (EV_P_ ev_check *w)
1784{
1785 if (expect_false (ev_is_active (w)))
1786 return;
1787
1788 ev_start (EV_A_ (W)w, ++checkcnt);
1789 array_needsize (ev_check *, checks, checkmax, checkcnt, EMPTY2);
1790 checks [checkcnt - 1] = w;
1791}
1792
1793void
1794ev_check_stop (EV_P_ ev_check *w)
1795{
1796 ev_clear_pending (EV_A_ (W)w);
1797 if (expect_false (!ev_is_active (w)))
1798 return;
1799
1800 {
1801 int active = ((W)w)->active;
1802 checks [active - 1] = checks [--checkcnt];
1803 ((W)checks [active - 1])->active = active;
1804 }
1805
1806 ev_stop (EV_A_ (W)w);
1807}
1808
1809#if EV_EMBED_ENABLE
1810void noinline
1811ev_embed_sweep (EV_P_ ev_embed *w)
1812{
1813 ev_loop (w->loop, EVLOOP_NONBLOCK);
1814}
1815
1816static void
1817embed_cb (EV_P_ ev_io *io, int revents)
1818{
1819 ev_embed *w = (ev_embed *)(((char *)io) - offsetof (ev_embed, io));
1820
1821 if (ev_cb (w))
1822 ev_feed_event (EV_A_ (W)w, EV_EMBED);
1823 else
1824 ev_embed_sweep (loop, w);
1825}
1826
1827void
1828ev_embed_start (EV_P_ ev_embed *w)
1829{
1830 if (expect_false (ev_is_active (w)))
1831 return;
1832
1833 {
1834 struct ev_loop *loop = w->loop;
1835 assert (("loop to be embedded is not embeddable", backend & ev_embeddable_backends ()));
1836 ev_io_init (&w->io, embed_cb, backend_fd, EV_READ);
1837 }
1838
1839 ev_set_priority (&w->io, ev_priority (w));
1840 ev_io_start (EV_A_ &w->io);
1841
1842 ev_start (EV_A_ (W)w, 1);
1843}
1844
1845void
1846ev_embed_stop (EV_P_ ev_embed *w)
1847{
1848 ev_clear_pending (EV_A_ (W)w);
1849 if (expect_false (!ev_is_active (w)))
1850 return;
1851
1852 ev_io_stop (EV_A_ &w->io);
1853
1854 ev_stop (EV_A_ (W)w);
1855}
1856#endif
1857
1858#if EV_FORK_ENABLE
1859void
1860ev_fork_start (EV_P_ ev_fork *w)
1861{
1862 if (expect_false (ev_is_active (w)))
1863 return;
1864
1865 ev_start (EV_A_ (W)w, ++forkcnt);
1866 array_needsize (ev_fork *, forks, forkmax, forkcnt, EMPTY2);
1867 forks [forkcnt - 1] = w;
1868}
1869
1870void
1871ev_fork_stop (EV_P_ ev_fork *w)
1872{
1873 ev_clear_pending (EV_A_ (W)w);
1874 if (expect_false (!ev_is_active (w)))
1875 return;
1876
1877 {
1878 int active = ((W)w)->active;
1879 forks [active - 1] = forks [--forkcnt];
1880 ((W)forks [active - 1])->active = active;
1881 }
1882
1883 ev_stop (EV_A_ (W)w);
1884}
1885#endif
1886
1604/*****************************************************************************/ 1887/*****************************************************************************/
1605 1888
1606struct ev_once 1889struct ev_once
1607{ 1890{
1608 struct ev_io io; 1891 ev_io io;
1609 struct ev_timer to; 1892 ev_timer to;
1610 void (*cb)(int revents, void *arg); 1893 void (*cb)(int revents, void *arg);
1611 void *arg; 1894 void *arg;
1612}; 1895};
1613 1896
1614static void 1897static void
1623 1906
1624 cb (revents, arg); 1907 cb (revents, arg);
1625} 1908}
1626 1909
1627static void 1910static void
1628once_cb_io (EV_P_ struct ev_io *w, int revents) 1911once_cb_io (EV_P_ ev_io *w, int revents)
1629{ 1912{
1630 once_cb (EV_A_ (struct ev_once *)(((char *)w) - offsetof (struct ev_once, io)), revents); 1913 once_cb (EV_A_ (struct ev_once *)(((char *)w) - offsetof (struct ev_once, io)), revents);
1631} 1914}
1632 1915
1633static void 1916static void
1634once_cb_to (EV_P_ struct ev_timer *w, int revents) 1917once_cb_to (EV_P_ ev_timer *w, int revents)
1635{ 1918{
1636 once_cb (EV_A_ (struct ev_once *)(((char *)w) - offsetof (struct ev_once, to)), revents); 1919 once_cb (EV_A_ (struct ev_once *)(((char *)w) - offsetof (struct ev_once, to)), revents);
1637} 1920}
1638 1921
1639void 1922void
1640ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg) 1923ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg)
1641{ 1924{
1642 struct ev_once *once = (struct ev_once *)ev_malloc (sizeof (struct ev_once)); 1925 struct ev_once *once = (struct ev_once *)ev_malloc (sizeof (struct ev_once));
1643 1926
1644 if (!once) 1927 if (expect_false (!once))
1928 {
1645 cb (EV_ERROR | EV_READ | EV_WRITE | EV_TIMEOUT, arg); 1929 cb (EV_ERROR | EV_READ | EV_WRITE | EV_TIMEOUT, arg);
1646 else 1930 return;
1647 { 1931 }
1932
1648 once->cb = cb; 1933 once->cb = cb;
1649 once->arg = arg; 1934 once->arg = arg;
1650 1935
1651 ev_init (&once->io, once_cb_io); 1936 ev_init (&once->io, once_cb_io);
1652 if (fd >= 0) 1937 if (fd >= 0)
1653 { 1938 {
1654 ev_io_set (&once->io, fd, events); 1939 ev_io_set (&once->io, fd, events);
1655 ev_io_start (EV_A_ &once->io); 1940 ev_io_start (EV_A_ &once->io);
1656 } 1941 }
1657 1942
1658 ev_init (&once->to, once_cb_to); 1943 ev_init (&once->to, once_cb_to);
1659 if (timeout >= 0.) 1944 if (timeout >= 0.)
1660 { 1945 {
1661 ev_timer_set (&once->to, timeout, 0.); 1946 ev_timer_set (&once->to, timeout, 0.);
1662 ev_timer_start (EV_A_ &once->to); 1947 ev_timer_start (EV_A_ &once->to);
1663 }
1664 } 1948 }
1665} 1949}
1666 1950
1667#ifdef __cplusplus 1951#ifdef __cplusplus
1668} 1952}

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines