--- libev/ev.c 2008/02/01 13:22:48 1.208 +++ libev/ev.c 2008/05/21 23:25:21 1.248 @@ -41,6 +41,7 @@ extern "C" { #endif +/* this big block deduces configuration from config.h */ #ifndef EV_STANDALONE # ifdef EV_CONFIG_H # include EV_CONFIG_H @@ -120,6 +121,14 @@ # endif # endif +# ifndef EV_USE_EVENTFD +# if HAVE_EVENTFD +# define EV_USE_EVENTFD 1 +# else +# define EV_USE_EVENTFD 0 +# endif +# endif + #endif #include @@ -154,7 +163,7 @@ # endif #endif -/**/ +/* this block tries to deduce configuration from header-defined symbols and defaults */ #ifndef EV_USE_MONOTONIC # define EV_USE_MONOTONIC 0 @@ -181,7 +190,11 @@ #endif #ifndef EV_USE_EPOLL -# define EV_USE_EPOLL 0 +# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4)) +# define EV_USE_EPOLL 1 +# else +# define EV_USE_EPOLL 0 +# endif #endif #ifndef EV_USE_KQUEUE @@ -193,7 +206,11 @@ #endif #ifndef EV_USE_INOTIFY -# define EV_USE_INOTIFY 0 +# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4)) +# define EV_USE_INOTIFY 1 +# else +# define EV_USE_INOTIFY 0 +# endif #endif #ifndef EV_PID_HASHSIZE @@ -212,7 +229,23 @@ # endif #endif -/**/ +#ifndef EV_USE_EVENTFD +# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7)) +# define EV_USE_EVENTFD 1 +# else +# define EV_USE_EVENTFD 0 +# endif +#endif + +#ifndef EV_USE_4HEAP +# define EV_USE_4HEAP !EV_MINIMAL +#endif + +#ifndef EV_HEAP_CACHE_AT +# define EV_HEAP_CACHE_AT !EV_MINIMAL +#endif + +/* this block fixes any misconfiguration where we know we run into trouble otherwise */ #ifndef CLOCK_MONOTONIC # undef EV_USE_MONOTONIC @@ -243,8 +276,31 @@ # include #endif +#if EV_USE_EVENTFD +/* our minimum requirement is glibc 2.7 which has the stub, but not the header */ +# include +# ifdef __cplusplus +extern "C" { +# endif +int eventfd (unsigned int initval, int flags); +# ifdef __cplusplus +} +# endif +#endif + /**/ +/* undefined or zero: no verification done or available */ +/* 1 or higher: ev_loop_verify function available */ +/* 2 or higher: ev_loop_verify is called frequently */ +#define EV_VERIFY 1 + +#if EV_VERIFY > 1 +# define EV_FREQUENT_CHECK ev_loop_verify (EV_A) +#else +# define EV_FREQUENT_CHECK do { } while (0) +#endif + /* * This is used to avoid floating point rounding problems. * It is added to ev_rt_now when scheduling periodics @@ -265,7 +321,7 @@ #else # define expect(expr,value) (expr) # define noinline -# if __STDC_VERSION__ < 199901L +# if __STDC_VERSION__ < 199901L && __GNUC__ < 2 # define inline # endif #endif @@ -290,6 +346,9 @@ typedef ev_watcher_list *WL; typedef ev_watcher_time *WT; +#define ev_active(w) ((W)(w))->active +#define ev_at(w) ((WT)(w))->at + #if EV_USE_MONOTONIC /* sig_atomic_t is used to avoid per-thread variables or locking but still */ /* giving it a reasonably high chance of working on typical architetcures */ @@ -325,7 +384,22 @@ } } -static void *(*alloc)(void *ptr, long size); +static void * +ev_realloc_emul (void *ptr, long size) +{ + /* some systems, notably openbsd and darwin, fail to properly + * implement realloc (x, 0) (as required by both ansi c-98 and + * the single unix specification, so work around them here. + */ + + if (size) + return realloc (ptr, size); + + free (ptr); + return 0; +} + +static void *(*alloc)(void *ptr, long size) = ev_realloc_emul; void ev_set_allocator (void *(*cb)(void *ptr, long size)) @@ -336,7 +410,7 @@ inline_speed void * ev_realloc (void *ptr, long size) { - ptr = alloc ? alloc (ptr, size) : realloc (ptr, size); + ptr = alloc (ptr, size); if (!ptr && size) { @@ -369,12 +443,31 @@ } ANPENDING; #if EV_USE_INOTIFY +/* hash table entry per inotify-id */ typedef struct { WL head; } ANFS; #endif +/* Heap Entry */ +#if EV_HEAP_CACHE_AT + typedef struct { + ev_tstamp at; + WT w; + } ANHE; + + #define ANHE_w(he) (he).w /* access watcher, read-write */ + #define ANHE_at(he) (he).at /* access cached at, read-only */ + #define ANHE_at_cache(he) (he).at = (he).w->at /* update at from watcher */ +#else + typedef WT ANHE; + + #define ANHE_w(he) (he) + #define ANHE_at(he) (he)->at + #define ANHE_at_cache(he) +#endif + #if EV_MULTIPLICITY struct ev_loop @@ -453,7 +546,7 @@ nanosleep (&ts, 0); #elif defined(_WIN32) - Sleep (delay * 1e3); + Sleep ((unsigned long)(delay * 1e3)); #else struct timeval tv; @@ -467,6 +560,8 @@ /*****************************************************************************/ +#define MALLOC_ROUND 4096 /* prefer to allocate in chunks of this size, must be 2**n and >> 4 longs */ + int inline_size array_nextsize (int elem, int cur, int cnt) { @@ -476,11 +571,11 @@ ncur <<= 1; while (cnt > ncur); - /* if size > 4096, round to 4096 - 4 * longs to accomodate malloc overhead */ - if (elem * ncur > 4096) + /* if size is large, round to MALLOC_ROUND - 4 * longs to accomodate malloc overhead */ + if (elem * ncur > MALLOC_ROUND - sizeof (void *) * 4) { ncur *= elem; - ncur = (ncur + elem + 4095 + sizeof (void *) * 4) & ~4095; + ncur = (ncur + elem + (MALLOC_ROUND - 1) + sizeof (void *) * 4) & ~(MALLOC_ROUND - 1); ncur = ncur - sizeof (void *) * 4; ncur /= elem; } @@ -704,61 +799,161 @@ /*****************************************************************************/ +/* + * the heap functions want a real array index. array index 0 uis guaranteed to not + * be in-use at any time. the first heap entry is at array [HEAP0]. DHEAP gives + * the branching factor of the d-tree. + */ + +/* + * at the moment we allow libev the luxury of two heaps, + * a small-code-size 2-heap one and a ~1.5kb larger 4-heap + * which is more cache-efficient. + * the difference is about 5% with 50000+ watchers. + */ +#if EV_USE_4HEAP + +#define DHEAP 4 +#define HEAP0 (DHEAP - 1) /* index of first element in heap */ +#define HPARENT(k) ((((k) - HEAP0 - 1) / DHEAP) + HEAP0) +#define UPHEAP_DONE(p,k) ((p) == (k)) + +/* away from the root */ void inline_speed -upheap (WT *heap, int k) +downheap (ANHE *heap, int N, int k) { - WT w = heap [k]; + ANHE he = heap [k]; + ANHE *E = heap + N + HEAP0; - while (k) + for (;;) { - int p = (k - 1) >> 1; + ev_tstamp minat; + ANHE *minpos; + ANHE *pos = heap + DHEAP * (k - HEAP0) + HEAP0 + 1; + + /* find minimum child */ + if (expect_true (pos + DHEAP - 1 < E)) + { + /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos)); + if ( ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos)); + if ( ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos)); + if ( ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos)); + } + else if (pos < E) + { + /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos)); + if (pos + 1 < E && ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos)); + if (pos + 2 < E && ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos)); + if (pos + 3 < E && ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos)); + } + else + break; - if (heap [p]->at <= w->at) + if (ANHE_at (he) <= minat) break; - heap [k] = heap [p]; - ((W)heap [k])->active = k + 1; - k = p; + heap [k] = *minpos; + ev_active (ANHE_w (*minpos)) = k; + + k = minpos - heap; } - heap [k] = w; - ((W)heap [k])->active = k + 1; + heap [k] = he; + ev_active (ANHE_w (he)) = k; } +#else /* 4HEAP */ + +#define HEAP0 1 +#define HPARENT(k) ((k) >> 1) +#define UPHEAP_DONE(p,k) (!(p)) + +/* away from the root */ void inline_speed -downheap (WT *heap, int N, int k) +downheap (ANHE *heap, int N, int k) { - WT w = heap [k]; + ANHE he = heap [k]; for (;;) { - int c = (k << 1) + 1; + int c = k << 1; - if (c >= N) + if (c > N + HEAP0 - 1) break; - c += c + 1 < N && heap [c]->at > heap [c + 1]->at + c += c + 1 < N + HEAP0 && ANHE_at (heap [c]) > ANHE_at (heap [c + 1]) ? 1 : 0; - if (w->at <= heap [c]->at) + if (ANHE_at (he) <= ANHE_at (heap [c])) break; heap [k] = heap [c]; - ((W)heap [k])->active = k + 1; - + ev_active (ANHE_w (heap [k])) = k; + k = c; } - heap [k] = w; - ((W)heap [k])->active = k + 1; + heap [k] = he; + ev_active (ANHE_w (he)) = k; +} +#endif + +/* towards the root */ +void inline_speed +upheap (ANHE *heap, int k) +{ + ANHE he = heap [k]; + + for (;;) + { + int p = HPARENT (k); + + if (UPHEAP_DONE (p, k) || ANHE_at (heap [p]) <= ANHE_at (he)) + break; + + heap [k] = heap [p]; + ev_active (ANHE_w (heap [k])) = k; + k = p; + } + + heap [k] = he; + ev_active (ANHE_w (he)) = k; } void inline_size -adjustheap (WT *heap, int N, int k) +adjustheap (ANHE *heap, int N, int k) +{ + if (k > HEAP0 && ANHE_at (heap [HPARENT (k)]) >= ANHE_at (heap [k])) + upheap (heap, k); + else + downheap (heap, N, k); +} + +/* rebuild the heap: this function is used only once and executed rarely */ +void inline_size +reheap (ANHE *heap, int N) +{ + int i; + /* we don't use floyds algorithm, upheap is simpler and is more cache-efficient */ + /* also, this is easy to implement and correct for both 2-heaps and 4-heaps */ + for (i = 0; i < N; ++i) + upheap (heap, i + HEAP0); +} + +#if EV_VERIFY +static void +checkheap (ANHE *heap, int N) { - upheap (heap, k); - downheap (heap, N, k); + int i; + + for (i = HEAP0; i < N + HEAP0; ++i) + { + assert (("active index mismatch in heap", ev_active (ANHE_w (heap [i])) == i)); + assert (("heap condition violated", i == HEAP0 || ANHE_at (heap [HPARENT (i)]) <= ANHE_at (heap [i]))); + assert (("heap at cache mismatch", ANHE_at (heap [i]) == ev_at (ANHE_w (heap [i])))); + } } +#endif /*****************************************************************************/ @@ -804,29 +999,48 @@ { if (!ev_is_active (&pipeev)) { - while (pipe (evpipe)) - syserr ("(libev) error creating signal/async pipe"); +#if EV_USE_EVENTFD + if ((evfd = eventfd (0, 0)) >= 0) + { + evpipe [0] = -1; + fd_intern (evfd); + ev_io_set (&pipeev, evfd, EV_READ); + } + else +#endif + { + while (pipe (evpipe)) + syserr ("(libev) error creating signal/async pipe"); - fd_intern (evpipe [0]); - fd_intern (evpipe [1]); + fd_intern (evpipe [0]); + fd_intern (evpipe [1]); + ev_io_set (&pipeev, evpipe [0], EV_READ); + } - ev_io_set (&pipeev, evpipe [0], EV_READ); ev_io_start (EV_A_ &pipeev); - ev_unref (EV_A); /* child watcher should not keep loop alive */ + ev_unref (EV_A); /* watcher should not keep loop alive */ } } void inline_size -evpipe_write (EV_P_ int sig, int async) +evpipe_write (EV_P_ EV_ATOMIC_T *flag) { - if (!(gotasync || gotsig)) + if (!*flag) { - int old_errno = errno; + int old_errno = errno; /* save errno because write might clobber it */ - if (sig) gotsig = 1; - if (async) gotasync = 1; + *flag = 1; + +#if EV_USE_EVENTFD + if (evfd >= 0) + { + uint64_t counter = 1; + write (evfd, &counter, sizeof (uint64_t)); + } + else +#endif + write (evpipe [1], &old_errno, 1); - write (evpipe [1], &old_errno, 1); errno = old_errno; } } @@ -834,12 +1048,20 @@ static void pipecb (EV_P_ ev_io *iow, int revents) { - { - int dummy; - read (evpipe [0], &dummy, 1); - } +#if EV_USE_EVENTFD + if (evfd >= 0) + { + uint64_t counter; + read (evfd, &counter, sizeof (uint64_t)); + } + else +#endif + { + char dummy; + read (evpipe [0], &dummy, 1); + } - if (gotsig) + if (gotsig && ev_is_default_loop (EV_A)) { int signum; gotsig = 0; @@ -849,6 +1071,7 @@ ev_feed_signal_event (EV_A_ signum + 1); } +#if EV_ASYNC_ENABLE if (gotasync) { int i; @@ -861,23 +1084,24 @@ ev_feed_event (EV_A_ asyncs [i], EV_ASYNC); } } +#endif } /*****************************************************************************/ static void -sighandler (int signum) +ev_sighandler (int signum) { #if EV_MULTIPLICITY struct ev_loop *loop = &default_loop_struct; #endif #if _WIN32 - signal (signum, sighandler); + signal (signum, ev_sighandler); #endif signals [signum - 1].gotsig = 1; - evpipe_write (EV_A_ 1, 0); + evpipe_write (EV_A_ &gotsig); } void noinline @@ -913,7 +1137,7 @@ #endif void inline_speed -child_reap (EV_P_ ev_signal *sw, int chain, int pid, int status) +child_reap (EV_P_ int chain, int pid, int status) { ev_child *w; int traced = WIFSTOPPED (status) || WIFCONTINUED (status); @@ -923,7 +1147,7 @@ if ((w->pid == pid || !w->pid) && (!traced || (w->flags & 1))) { - ev_set_priority (w, ev_priority (sw)); /* need to do it *now* */ + ev_set_priority (w, EV_MAXPRI); /* need to do it *now*, this *must* be the same prio as the signal watcher itself */ w->rpid = pid; w->rstatus = status; ev_feed_event (EV_A_ (W)w, EV_CHILD); @@ -947,13 +1171,13 @@ || 0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED))) return; - /* make sure we are called again until all childs have been reaped */ + /* make sure we are called again until all children have been reaped */ /* we need to do it this way so that the callback gets called before we continue */ ev_feed_event (EV_A_ (W)sw, EV_SIGNAL); - child_reap (EV_A_ sw, pid, pid, status); + child_reap (EV_A_ pid, pid, status); if (EV_PID_HASHSIZE > 1) - child_reap (EV_A_ sw, 0, pid, status); /* this might trigger a watcher twice, but feed_event catches that */ + child_reap (EV_A_ 0, pid, status); /* this might trigger a watcher twice, but feed_event catches that */ } #endif @@ -1081,13 +1305,19 @@ } #endif - ev_rt_now = ev_time (); - mn_now = get_clock (); - now_floor = mn_now; - rtmn_diff = ev_rt_now - mn_now; + ev_rt_now = ev_time (); + mn_now = get_clock (); + now_floor = mn_now; + rtmn_diff = ev_rt_now - mn_now; io_blocktime = 0.; timeout_blocktime = 0.; + backend = 0; + backend_fd = -1; + gotasync = 0; +#if EV_USE_INOTIFY + fs_fd = -2; +#endif /* pid check not overridable via env */ #ifndef _WIN32 @@ -1100,15 +1330,9 @@ && getenv ("LIBEV_FLAGS")) flags = atoi (getenv ("LIBEV_FLAGS")); - if (!(flags & 0x0000ffffUL)) + if (!(flags & 0x0000ffffU)) flags |= ev_recommended_backends (); - backend = 0; - backend_fd = -1; -#if EV_USE_INOTIFY - fs_fd = -2; -#endif - #if EV_USE_PORT if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags); #endif @@ -1140,8 +1364,16 @@ ev_ref (EV_A); /* signal watcher */ ev_io_stop (EV_A_ &pipeev); - close (evpipe [0]); evpipe [0] = 0; - close (evpipe [1]); evpipe [1] = 0; +#if EV_USE_EVENTFD + if (evfd >= 0) + close (evfd); +#endif + + if (evpipe [0] >= 0) + { + close (evpipe [0]); + close (evpipe [1]); + } } #if EV_USE_INOTIFY @@ -1189,11 +1421,16 @@ #endif array_free (prepare, EMPTY); array_free (check, EMPTY); +#if EV_ASYNC_ENABLE + array_free (async, EMPTY); +#endif backend = 0; } +#if EV_USE_INOTIFY void inline_size infy_fork (EV_P); +#endif void inline_size loop_fork (EV_P) @@ -1214,12 +1451,25 @@ if (ev_is_active (&pipeev)) { /* this "locks" the handlers against writing to the pipe */ - gotsig = gotasync = 1; + /* while we modify the fd vars */ + gotsig = 1; +#if EV_ASYNC_ENABLE + gotasync = 1; +#endif ev_ref (EV_A); ev_io_stop (EV_A_ &pipeev); - close (evpipe [0]); - close (evpipe [1]); + +#if EV_USE_EVENTFD + if (evfd >= 0) + close (evfd); +#endif + + if (evpipe [0] >= 0) + { + close (evpipe [0]); + close (evpipe [1]); + } evpipe_init (EV_A); /* now iterate over everything, in case we missed something */ @@ -1258,6 +1508,39 @@ postfork = 1; /* must be in line with ev_default_fork */ } +#if EV_VERIFY +static void +array_check (W **ws, int cnt) +{ + while (cnt--) + assert (("active index mismatch", ev_active (ws [cnt]) == cnt + 1)); +} + +static void +ev_loop_verify (EV_P) +{ + int i; + + checkheap (timers, timercnt); +#if EV_PERIODIC_ENABLE + checkheap (periodics, periodiccnt); +#endif + +#if EV_IDLE_ENABLE + for (i = NUMPRI; i--; ) + array_check ((W **)idles [i], idlecnt [i]); +#endif +#if EV_FORK_ENABLE + array_check ((W **)forks, forkcnt); +#endif + array_check ((W **)prepares, preparecnt); + array_check ((W **)checks, checkcnt); +#if EV_ASYNC_ENABLE + array_check ((W **)asyncs, asynccnt); +#endif +} +#endif + #endif #if EV_MULTIPLICITY @@ -1333,6 +1616,8 @@ { int pri; + EV_FREQUENT_CHECK; + for (pri = NUMPRI; pri--; ) while (pendingcnt [pri]) { @@ -1346,31 +1631,60 @@ EV_CB_INVOKE (p->w, p->events); } } + + EV_FREQUENT_CHECK; +} + +#if EV_IDLE_ENABLE +void inline_size +idle_reify (EV_P) +{ + if (expect_false (idleall)) + { + int pri; + + for (pri = NUMPRI; pri--; ) + { + if (pendingcnt [pri]) + break; + + if (idlecnt [pri]) + { + queue_events (EV_A_ (W *)idles [pri], idlecnt [pri], EV_IDLE); + break; + } + } + } } +#endif void inline_size timers_reify (EV_P) { - while (timercnt && ((WT)timers [0])->at <= mn_now) + EV_FREQUENT_CHECK; + + while (timercnt && ANHE_at (timers [HEAP0]) < mn_now) { - ev_timer *w = (ev_timer *)timers [0]; + ev_timer *w = (ev_timer *)ANHE_w (timers [HEAP0]); /*assert (("inactive timer on timer heap detected", ev_is_active (w)));*/ /* first reschedule or stop timer */ if (w->repeat) { - assert (("negative ev_timer repeat value found while processing timers", w->repeat > 0.)); + ev_at (w) += w->repeat; + if (ev_at (w) < mn_now) + ev_at (w) = mn_now; - ((WT)w)->at += w->repeat; - if (((WT)w)->at < mn_now) - ((WT)w)->at = mn_now; + assert (("negative ev_timer repeat value found while processing timers", w->repeat > 0.)); - downheap (timers, timercnt, 0); + ANHE_at_cache (timers [HEAP0]); + downheap (timers, timercnt, HEAP0); } else ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */ + EV_FREQUENT_CHECK; ev_feed_event (EV_A_ (W)w, EV_TIMEOUT); } } @@ -1379,29 +1693,47 @@ void inline_size periodics_reify (EV_P) { - while (periodiccnt && ((WT)periodics [0])->at <= ev_rt_now) + EV_FREQUENT_CHECK; + while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now) { - ev_periodic *w = (ev_periodic *)periodics [0]; + ev_periodic *w = (ev_periodic *)ANHE_w (periodics [HEAP0]); /*assert (("inactive timer on periodic heap detected", ev_is_active (w)));*/ /* first reschedule or stop timer */ if (w->reschedule_cb) { - ((WT)w)->at = w->reschedule_cb (w, ev_rt_now + TIME_EPSILON); - assert (("ev_periodic reschedule callback returned time in the past", ((WT)w)->at > ev_rt_now)); - downheap (periodics, periodiccnt, 0); + ev_at (w) = w->reschedule_cb (w, ev_rt_now); + + assert (("ev_periodic reschedule callback returned time in the past", ev_at (w) >= ev_rt_now)); + + ANHE_at_cache (periodics [HEAP0]); + downheap (periodics, periodiccnt, HEAP0); + EV_FREQUENT_CHECK; } else if (w->interval) { - ((WT)w)->at = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval; - if (((WT)w)->at - ev_rt_now <= TIME_EPSILON) ((WT)w)->at += w->interval; - assert (("ev_periodic timeout in the past detected while processing timers, negative interval?", ((WT)w)->at > ev_rt_now)); - downheap (periodics, periodiccnt, 0); + ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval; + /* if next trigger time is not sufficiently in the future, put it there */ + /* this might happen because of floating point inexactness */ + if (ev_at (w) - ev_rt_now < TIME_EPSILON) + { + ev_at (w) += w->interval; + + /* if interval is unreasonably low we might still have a time in the past */ + /* so correct this. this will make the periodic very inexact, but the user */ + /* has effectively asked to get triggered more often than possible */ + if (ev_at (w) < ev_rt_now) + ev_at (w) = ev_rt_now; + } + + ANHE_at_cache (periodics [HEAP0]); + downheap (periodics, periodiccnt, HEAP0); } else ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */ + EV_FREQUENT_CHECK; ev_feed_event (EV_A_ (W)w, EV_PERIODIC); } } @@ -1412,42 +1744,19 @@ int i; /* adjust periodics after time jump */ - for (i = 0; i < periodiccnt; ++i) + for (i = HEAP0; i < periodiccnt + HEAP0; ++i) { - ev_periodic *w = (ev_periodic *)periodics [i]; + ev_periodic *w = (ev_periodic *)ANHE_w (periodics [i]); if (w->reschedule_cb) - ((WT)w)->at = w->reschedule_cb (w, ev_rt_now); + ev_at (w) = w->reschedule_cb (w, ev_rt_now); else if (w->interval) - ((WT)w)->at = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval; - } - - /* now rebuild the heap */ - for (i = periodiccnt >> 1; i--; ) - downheap (periodics, periodiccnt, i); -} -#endif - -#if EV_IDLE_ENABLE -void inline_size -idle_reify (EV_P) -{ - if (expect_false (idleall)) - { - int pri; - - for (pri = NUMPRI; pri--; ) - { - if (pendingcnt [pri]) - break; + ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval; - if (idlecnt [pri]) - { - queue_events (EV_A_ (W *)idles [pri], idlecnt [pri], EV_IDLE); - break; - } - } + ANHE_at_cache (periodics [i]); } + + reheap (periodics, periodiccnt); } #endif @@ -1486,7 +1795,7 @@ { rtmn_diff = ev_rt_now - mn_now; - if (fabs (odiff - rtmn_diff) < MIN_TIMEJUMP) + if (expect_true (fabs (odiff - rtmn_diff) < MIN_TIMEJUMP)) return; /* all is well */ ev_rt_now = ev_time (); @@ -1512,7 +1821,11 @@ #endif /* adjust timers. this is easy, as the offset is the same for all of them */ for (i = 0; i < timercnt; ++i) - ((WT)timers [i])->at += ev_rt_now - mn_now; + { + ANHE *he = timers + i + HEAP0; + ANHE_w (*he)->at += ev_rt_now - mn_now; + ANHE_at_cache (*he); + } } mn_now = ev_rt_now; @@ -1536,9 +1849,7 @@ void ev_loop (EV_P_ int flags) { - loop_done = flags & (EVLOOP_ONESHOT | EVLOOP_NONBLOCK) - ? EVUNLOOP_ONE - : EVUNLOOP_CANCEL; + loop_done = EVUNLOOP_CANCEL; call_pending (EV_A); /* in case we recurse, ensure ordering stays nice and clean */ @@ -1594,14 +1905,14 @@ if (timercnt) { - ev_tstamp to = ((WT)timers [0])->at - mn_now + backend_fudge; + ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now + backend_fudge; if (waittime > to) waittime = to; } #if EV_PERIODIC_ENABLE if (periodiccnt) { - ev_tstamp to = ((WT)periodics [0])->at - ev_rt_now + backend_fudge; + ev_tstamp to = ANHE_at (periodics [HEAP0]) - ev_rt_now + backend_fudge; if (waittime > to) waittime = to; } #endif @@ -1644,9 +1955,12 @@ queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK); call_pending (EV_A); - } - while (expect_true (activecnt && !loop_done)); + while (expect_true ( + activecnt + && !loop_done + && !(flags & (EVLOOP_ONESHOT | EVLOOP_NONBLOCK)) + )); if (loop_done == EVUNLOOP_ONE) loop_done = EVUNLOOP_CANCEL; @@ -1745,12 +2059,16 @@ assert (("ev_io_start called with negative fd", fd >= 0)); + EV_FREQUENT_CHECK; + ev_start (EV_A_ (W)w, 1); array_needsize (ANFD, anfds, anfdmax, fd + 1, anfds_init); wlist_add (&anfds[fd].head, (WL)w); fd_change (EV_A_ fd, w->events & EV_IOFDSET | 1); w->events &= ~EV_IOFDSET; + + EV_FREQUENT_CHECK; } void noinline @@ -1760,12 +2078,16 @@ if (expect_false (!ev_is_active (w))) return; - assert (("ev_io_start called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax)); + assert (("ev_io_stop called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax)); + + EV_FREQUENT_CHECK; wlist_del (&anfds[w->fd].head, (WL)w); ev_stop (EV_A_ (W)w); fd_change (EV_A_ w->fd, 1); + + EV_FREQUENT_CHECK; } void noinline @@ -1774,16 +2096,22 @@ if (expect_false (ev_is_active (w))) return; - ((WT)w)->at += mn_now; + ev_at (w) += mn_now; assert (("ev_timer_start called with negative timer repeat value", w->repeat >= 0.)); - ev_start (EV_A_ (W)w, ++timercnt); - array_needsize (WT, timers, timermax, timercnt, EMPTY2); - timers [timercnt - 1] = (WT)w; - upheap (timers, timercnt - 1); + EV_FREQUENT_CHECK; - /*assert (("internal timer heap corruption", timers [((W)w)->active - 1] == w));*/ + ++timercnt; + ev_start (EV_A_ (W)w, timercnt + HEAP0 - 1); + array_needsize (ANHE, timers, timermax, ev_active (w) + 1, EMPTY2); + ANHE_w (timers [ev_active (w)]) = (WT)w; + ANHE_at_cache (timers [ev_active (w)]); + upheap (timers, ev_active (w)); + + EV_FREQUENT_CHECK; + + /*assert (("internal timer heap corruption", timers [ev_active (w)] == (WT)w));*/ } void noinline @@ -1793,19 +2121,25 @@ if (expect_false (!ev_is_active (w))) return; - assert (("internal timer heap corruption", timers [((W)w)->active - 1] == (WT)w)); + EV_FREQUENT_CHECK; { - int active = ((W)w)->active; + int active = ev_active (w); + + assert (("internal timer heap corruption", ANHE_w (timers [active]) == (WT)w)); - if (expect_true (--active < --timercnt)) + --timercnt; + + if (expect_true (active < timercnt + HEAP0)) { - timers [active] = timers [timercnt]; + timers [active] = timers [timercnt + HEAP0]; adjustheap (timers, timercnt, active); } } - ((WT)w)->at -= mn_now; + EV_FREQUENT_CHECK; + + ev_at (w) -= mn_now; ev_stop (EV_A_ (W)w); } @@ -1813,21 +2147,26 @@ void noinline ev_timer_again (EV_P_ ev_timer *w) { + EV_FREQUENT_CHECK; + if (ev_is_active (w)) { if (w->repeat) { - ((WT)w)->at = mn_now + w->repeat; - adjustheap (timers, timercnt, ((W)w)->active - 1); + ev_at (w) = mn_now + w->repeat; + ANHE_at_cache (timers [ev_active (w)]); + adjustheap (timers, timercnt, ev_active (w)); } else ev_timer_stop (EV_A_ w); } else if (w->repeat) { - w->at = w->repeat; + ev_at (w) = w->repeat; ev_timer_start (EV_A_ w); } + + EV_FREQUENT_CHECK; } #if EV_PERIODIC_ENABLE @@ -1838,22 +2177,28 @@ return; if (w->reschedule_cb) - ((WT)w)->at = w->reschedule_cb (w, ev_rt_now); + ev_at (w) = w->reschedule_cb (w, ev_rt_now); else if (w->interval) { assert (("ev_periodic_start called with negative interval value", w->interval >= 0.)); /* this formula differs from the one in periodic_reify because we do not always round up */ - ((WT)w)->at = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval; + ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval; } else - ((WT)w)->at = w->offset; + ev_at (w) = w->offset; + + EV_FREQUENT_CHECK; + + ++periodiccnt; + ev_start (EV_A_ (W)w, periodiccnt + HEAP0 - 1); + array_needsize (ANHE, periodics, periodicmax, ev_active (w) + 1, EMPTY2); + ANHE_w (periodics [ev_active (w)]) = (WT)w; + ANHE_at_cache (periodics [ev_active (w)]); + upheap (periodics, ev_active (w)); - ev_start (EV_A_ (W)w, ++periodiccnt); - array_needsize (WT, periodics, periodicmax, periodiccnt, EMPTY2); - periodics [periodiccnt - 1] = (WT)w; - upheap (periodics, periodiccnt - 1); + EV_FREQUENT_CHECK; - /*assert (("internal periodic heap corruption", periodics [((W)w)->active - 1] == w));*/ + /*assert (("internal periodic heap corruption", ANHE_w (periodics [ev_active (w)]) == (WT)w));*/ } void noinline @@ -1863,18 +2208,24 @@ if (expect_false (!ev_is_active (w))) return; - assert (("internal periodic heap corruption", periodics [((W)w)->active - 1] == (WT)w)); + EV_FREQUENT_CHECK; { - int active = ((W)w)->active; + int active = ev_active (w); - if (expect_true (--active < --periodiccnt)) + assert (("internal periodic heap corruption", ANHE_w (periodics [active]) == (WT)w)); + + --periodiccnt; + + if (expect_true (active < periodiccnt + HEAP0)) { - periodics [active] = periodics [periodiccnt]; + periodics [active] = periodics [periodiccnt + HEAP0]; adjustheap (periodics, periodiccnt, active); } } + EV_FREQUENT_CHECK; + ev_stop (EV_A_ (W)w); } @@ -1904,6 +2255,8 @@ evpipe_init (EV_A); + EV_FREQUENT_CHECK; + { #ifndef _WIN32 sigset_t full, prev; @@ -1924,15 +2277,17 @@ if (!((WL)w)->next) { #if _WIN32 - signal (w->signum, sighandler); + signal (w->signum, ev_sighandler); #else struct sigaction sa; - sa.sa_handler = sighandler; + sa.sa_handler = ev_sighandler; sigfillset (&sa.sa_mask); sa.sa_flags = SA_RESTART; /* if restarting works we save one iteration */ sigaction (w->signum, &sa, 0); #endif } + + EV_FREQUENT_CHECK; } void noinline @@ -1942,11 +2297,15 @@ if (expect_false (!ev_is_active (w))) return; + EV_FREQUENT_CHECK; + wlist_del (&signals [w->signum - 1].head, (WL)w); ev_stop (EV_A_ (W)w); if (!signals [w->signum - 1].head) signal (w->signum, SIG_DFL); + + EV_FREQUENT_CHECK; } void @@ -1958,8 +2317,12 @@ if (expect_false (ev_is_active (w))) return; + EV_FREQUENT_CHECK; + ev_start (EV_A_ (W)w, 1); wlist_add (&childs [w->pid & (EV_PID_HASHSIZE - 1)], (WL)w); + + EV_FREQUENT_CHECK; } void @@ -1969,8 +2332,12 @@ if (expect_false (!ev_is_active (w))) return; + EV_FREQUENT_CHECK; + wlist_del (&childs [w->pid & (EV_PID_HASHSIZE - 1)], (WL)w); ev_stop (EV_A_ (W)w); + + EV_FREQUENT_CHECK; } #if EV_STAT_ENABLE @@ -1998,6 +2365,8 @@ ev_timer_start (EV_A_ &w->timer); /* this is not race-free, so we still need to recheck periodically */ /* monitor some parent directory for speedup hints */ + /* note that exceeding the hardcoded limit is not a correctness issue, */ + /* but an efficiency issue only */ if ((errno == ENOENT || errno == EACCES) && strlen (w->path) < 4096) { char path [4096]; @@ -2206,6 +2575,8 @@ ev_timer_start (EV_A_ &w->timer); ev_start (EV_A_ (W)w, 1); + + EV_FREQUENT_CHECK; } void @@ -2215,12 +2586,16 @@ if (expect_false (!ev_is_active (w))) return; + EV_FREQUENT_CHECK; + #if EV_USE_INOTIFY infy_del (EV_A_ w); #endif ev_timer_stop (EV_A_ &w->timer); ev_stop (EV_A_ (W)w); + + EV_FREQUENT_CHECK; } #endif @@ -2233,6 +2608,8 @@ pri_adjust (EV_A_ (W)w); + EV_FREQUENT_CHECK; + { int active = ++idlecnt [ABSPRI (w)]; @@ -2242,6 +2619,8 @@ array_needsize (ev_idle *, idles [ABSPRI (w)], idlemax [ABSPRI (w)], active, EMPTY2); idles [ABSPRI (w)][active - 1] = w; } + + EV_FREQUENT_CHECK; } void @@ -2251,15 +2630,19 @@ if (expect_false (!ev_is_active (w))) return; + EV_FREQUENT_CHECK; + { - int active = ((W)w)->active; + int active = ev_active (w); idles [ABSPRI (w)][active - 1] = idles [ABSPRI (w)][--idlecnt [ABSPRI (w)]]; - ((W)idles [ABSPRI (w)][active - 1])->active = active; + ev_active (idles [ABSPRI (w)][active - 1]) = active; ev_stop (EV_A_ (W)w); --idleall; } + + EV_FREQUENT_CHECK; } #endif @@ -2269,9 +2652,13 @@ if (expect_false (ev_is_active (w))) return; + EV_FREQUENT_CHECK; + ev_start (EV_A_ (W)w, ++preparecnt); array_needsize (ev_prepare *, prepares, preparemax, preparecnt, EMPTY2); prepares [preparecnt - 1] = w; + + EV_FREQUENT_CHECK; } void @@ -2281,13 +2668,18 @@ if (expect_false (!ev_is_active (w))) return; + EV_FREQUENT_CHECK; + { - int active = ((W)w)->active; + int active = ev_active (w); + prepares [active - 1] = prepares [--preparecnt]; - ((W)prepares [active - 1])->active = active; + ev_active (prepares [active - 1]) = active; } ev_stop (EV_A_ (W)w); + + EV_FREQUENT_CHECK; } void @@ -2296,9 +2688,13 @@ if (expect_false (ev_is_active (w))) return; + EV_FREQUENT_CHECK; + ev_start (EV_A_ (W)w, ++checkcnt); array_needsize (ev_check *, checks, checkmax, checkcnt, EMPTY2); checks [checkcnt - 1] = w; + + EV_FREQUENT_CHECK; } void @@ -2308,13 +2704,18 @@ if (expect_false (!ev_is_active (w))) return; + EV_FREQUENT_CHECK; + { - int active = ((W)w)->active; + int active = ev_active (w); + checks [active - 1] = checks [--checkcnt]; - ((W)checks [active - 1])->active = active; + ev_active (checks [active - 1]) = active; } ev_stop (EV_A_ (W)w); + + EV_FREQUENT_CHECK; } #if EV_EMBED_ENABLE @@ -2371,6 +2772,8 @@ ev_io_init (&w->io, embed_io_cb, backend_fd, EV_READ); } + EV_FREQUENT_CHECK; + ev_set_priority (&w->io, ev_priority (w)); ev_io_start (EV_A_ &w->io); @@ -2381,6 +2784,8 @@ /*ev_idle_init (&w->idle, e,bed_idle_cb);*/ ev_start (EV_A_ (W)w, 1); + + EV_FREQUENT_CHECK; } void @@ -2390,10 +2795,14 @@ if (expect_false (!ev_is_active (w))) return; + EV_FREQUENT_CHECK; + ev_io_stop (EV_A_ &w->io); ev_prepare_stop (EV_A_ &w->prepare); ev_stop (EV_A_ (W)w); + + EV_FREQUENT_CHECK; } #endif @@ -2404,9 +2813,13 @@ if (expect_false (ev_is_active (w))) return; + EV_FREQUENT_CHECK; + ev_start (EV_A_ (W)w, ++forkcnt); array_needsize (ev_fork *, forks, forkmax, forkcnt, EMPTY2); forks [forkcnt - 1] = w; + + EV_FREQUENT_CHECK; } void @@ -2416,13 +2829,18 @@ if (expect_false (!ev_is_active (w))) return; + EV_FREQUENT_CHECK; + { - int active = ((W)w)->active; + int active = ev_active (w); + forks [active - 1] = forks [--forkcnt]; - ((W)forks [active - 1])->active = active; + ev_active (forks [active - 1]) = active; } ev_stop (EV_A_ (W)w); + + EV_FREQUENT_CHECK; } #endif @@ -2435,9 +2853,13 @@ evpipe_init (EV_A); + EV_FREQUENT_CHECK; + ev_start (EV_A_ (W)w, ++asynccnt); array_needsize (ev_async *, asyncs, asyncmax, asynccnt, EMPTY2); asyncs [asynccnt - 1] = w; + + EV_FREQUENT_CHECK; } void @@ -2447,20 +2869,25 @@ if (expect_false (!ev_is_active (w))) return; + EV_FREQUENT_CHECK; + { - int active = ((W)w)->active; + int active = ev_active (w); + asyncs [active - 1] = asyncs [--asynccnt]; - ((W)asyncs [active - 1])->active = active; + ev_active (asyncs [active - 1]) = active; } ev_stop (EV_A_ (W)w); + + EV_FREQUENT_CHECK; } void ev_async_send (EV_P_ ev_async *w) { w->sent = 1; - evpipe_write (EV_A_ 0, 1); + evpipe_write (EV_A_ &gotasync); } #endif