ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev.c
(Generate patch)

Comparing libev/ev.c (file contents):
Revision 1.214 by root, Tue Feb 19 19:21:20 2008 UTC vs.
Revision 1.238 by root, Thu May 8 20:49:12 2008 UTC

39 39
40#ifdef __cplusplus 40#ifdef __cplusplus
41extern "C" { 41extern "C" {
42#endif 42#endif
43 43
44/* this big block deduces configuration from config.h */
44#ifndef EV_STANDALONE 45#ifndef EV_STANDALONE
45# ifdef EV_CONFIG_H 46# ifdef EV_CONFIG_H
46# include EV_CONFIG_H 47# include EV_CONFIG_H
47# else 48# else
48# include "config.h" 49# include "config.h"
118# else 119# else
119# define EV_USE_INOTIFY 0 120# define EV_USE_INOTIFY 0
120# endif 121# endif
121# endif 122# endif
122 123
124# ifndef EV_USE_EVENTFD
125# if HAVE_EVENTFD
126# define EV_USE_EVENTFD 1
127# else
128# define EV_USE_EVENTFD 0
129# endif
130# endif
131
123#endif 132#endif
124 133
125#include <math.h> 134#include <math.h>
126#include <stdlib.h> 135#include <stdlib.h>
127#include <fcntl.h> 136#include <fcntl.h>
152# ifndef EV_SELECT_IS_WINSOCKET 161# ifndef EV_SELECT_IS_WINSOCKET
153# define EV_SELECT_IS_WINSOCKET 1 162# define EV_SELECT_IS_WINSOCKET 1
154# endif 163# endif
155#endif 164#endif
156 165
157/**/ 166/* this block tries to deduce configuration from header-defined symbols and defaults */
158 167
159#ifndef EV_USE_MONOTONIC 168#ifndef EV_USE_MONOTONIC
160# define EV_USE_MONOTONIC 0 169# define EV_USE_MONOTONIC 0
161#endif 170#endif
162 171
179# define EV_USE_POLL 1 188# define EV_USE_POLL 1
180# endif 189# endif
181#endif 190#endif
182 191
183#ifndef EV_USE_EPOLL 192#ifndef EV_USE_EPOLL
193# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))
194# define EV_USE_EPOLL 1
195# else
184# define EV_USE_EPOLL 0 196# define EV_USE_EPOLL 0
197# endif
185#endif 198#endif
186 199
187#ifndef EV_USE_KQUEUE 200#ifndef EV_USE_KQUEUE
188# define EV_USE_KQUEUE 0 201# define EV_USE_KQUEUE 0
189#endif 202#endif
191#ifndef EV_USE_PORT 204#ifndef EV_USE_PORT
192# define EV_USE_PORT 0 205# define EV_USE_PORT 0
193#endif 206#endif
194 207
195#ifndef EV_USE_INOTIFY 208#ifndef EV_USE_INOTIFY
209# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))
210# define EV_USE_INOTIFY 1
211# else
196# define EV_USE_INOTIFY 0 212# define EV_USE_INOTIFY 0
213# endif
197#endif 214#endif
198 215
199#ifndef EV_PID_HASHSIZE 216#ifndef EV_PID_HASHSIZE
200# if EV_MINIMAL 217# if EV_MINIMAL
201# define EV_PID_HASHSIZE 1 218# define EV_PID_HASHSIZE 1
210# else 227# else
211# define EV_INOTIFY_HASHSIZE 16 228# define EV_INOTIFY_HASHSIZE 16
212# endif 229# endif
213#endif 230#endif
214 231
215/**/ 232#ifndef EV_USE_EVENTFD
233# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7))
234# define EV_USE_EVENTFD 1
235# else
236# define EV_USE_EVENTFD 0
237# endif
238#endif
239
240/* this block fixes any misconfiguration where we know we run into trouble otherwise */
216 241
217#ifndef CLOCK_MONOTONIC 242#ifndef CLOCK_MONOTONIC
218# undef EV_USE_MONOTONIC 243# undef EV_USE_MONOTONIC
219# define EV_USE_MONOTONIC 0 244# define EV_USE_MONOTONIC 0
220#endif 245#endif
239# include <sys/inotify.h> 264# include <sys/inotify.h>
240#endif 265#endif
241 266
242#if EV_SELECT_IS_WINSOCKET 267#if EV_SELECT_IS_WINSOCKET
243# include <winsock.h> 268# include <winsock.h>
269#endif
270
271#if EV_USE_EVENTFD
272/* our minimum requirement is glibc 2.7 which has the stub, but not the header */
273# include <stdint.h>
274# ifdef __cplusplus
275extern "C" {
276# endif
277int eventfd (unsigned int initval, int flags);
278# ifdef __cplusplus
279}
280# endif
244#endif 281#endif
245 282
246/**/ 283/**/
247 284
248/* 285/*
263# define expect(expr,value) __builtin_expect ((expr),(value)) 300# define expect(expr,value) __builtin_expect ((expr),(value))
264# define noinline __attribute__ ((noinline)) 301# define noinline __attribute__ ((noinline))
265#else 302#else
266# define expect(expr,value) (expr) 303# define expect(expr,value) (expr)
267# define noinline 304# define noinline
268# if __STDC_VERSION__ < 199901L 305# if __STDC_VERSION__ < 199901L && __GNUC__ < 2
269# define inline 306# define inline
270# endif 307# endif
271#endif 308#endif
272 309
273#define expect_false(expr) expect ((expr) != 0, 0) 310#define expect_false(expr) expect ((expr) != 0, 0)
288 325
289typedef ev_watcher *W; 326typedef ev_watcher *W;
290typedef ev_watcher_list *WL; 327typedef ev_watcher_list *WL;
291typedef ev_watcher_time *WT; 328typedef ev_watcher_time *WT;
292 329
330#define ev_active(w) ((W)(w))->active
331#define ev_at(w) ((WT)(w))->at
332
293#if EV_USE_MONOTONIC 333#if EV_USE_MONOTONIC
294/* sig_atomic_t is used to avoid per-thread variables or locking but still */ 334/* sig_atomic_t is used to avoid per-thread variables or locking but still */
295/* giving it a reasonably high chance of working on typical architetcures */ 335/* giving it a reasonably high chance of working on typical architetcures */
296static EV_ATOMIC_T have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work? */ 336static EV_ATOMIC_T have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work? */
297#endif 337#endif
323 perror (msg); 363 perror (msg);
324 abort (); 364 abort ();
325 } 365 }
326} 366}
327 367
368static void *
369ev_realloc_emul (void *ptr, long size)
370{
371 /* some systems, notably openbsd and darwin, fail to properly
372 * implement realloc (x, 0) (as required by both ansi c-98 and
373 * the single unix specification, so work around them here.
374 */
375
376 if (size)
377 return realloc (ptr, size);
378
379 free (ptr);
380 return 0;
381}
382
328static void *(*alloc)(void *ptr, long size); 383static void *(*alloc)(void *ptr, long size) = ev_realloc_emul;
329 384
330void 385void
331ev_set_allocator (void *(*cb)(void *ptr, long size)) 386ev_set_allocator (void *(*cb)(void *ptr, long size))
332{ 387{
333 alloc = cb; 388 alloc = cb;
334} 389}
335 390
336inline_speed void * 391inline_speed void *
337ev_realloc (void *ptr, long size) 392ev_realloc (void *ptr, long size)
338{ 393{
339 ptr = alloc ? alloc (ptr, size) : realloc (ptr, size); 394 ptr = alloc (ptr, size);
340 395
341 if (!ptr && size) 396 if (!ptr && size)
342 { 397 {
343 fprintf (stderr, "libev: cannot allocate %ld bytes, aborting.", size); 398 fprintf (stderr, "libev: cannot allocate %ld bytes, aborting.", size);
344 abort (); 399 abort ();
451 ts.tv_sec = (time_t)delay; 506 ts.tv_sec = (time_t)delay;
452 ts.tv_nsec = (long)((delay - (ev_tstamp)(ts.tv_sec)) * 1e9); 507 ts.tv_nsec = (long)((delay - (ev_tstamp)(ts.tv_sec)) * 1e9);
453 508
454 nanosleep (&ts, 0); 509 nanosleep (&ts, 0);
455#elif defined(_WIN32) 510#elif defined(_WIN32)
456 Sleep (delay * 1e3); 511 Sleep ((unsigned long)(delay * 1e3));
457#else 512#else
458 struct timeval tv; 513 struct timeval tv;
459 514
460 tv.tv_sec = (time_t)delay; 515 tv.tv_sec = (time_t)delay;
461 tv.tv_usec = (long)((delay - (ev_tstamp)(tv.tv_sec)) * 1e6); 516 tv.tv_usec = (long)((delay - (ev_tstamp)(tv.tv_sec)) * 1e6);
464#endif 519#endif
465 } 520 }
466} 521}
467 522
468/*****************************************************************************/ 523/*****************************************************************************/
524
525#define MALLOC_ROUND 4096 /* prefer to allocate in chunks of this size, must be 2**n and >> 4 longs */
469 526
470int inline_size 527int inline_size
471array_nextsize (int elem, int cur, int cnt) 528array_nextsize (int elem, int cur, int cnt)
472{ 529{
473 int ncur = cur + 1; 530 int ncur = cur + 1;
474 531
475 do 532 do
476 ncur <<= 1; 533 ncur <<= 1;
477 while (cnt > ncur); 534 while (cnt > ncur);
478 535
479 /* if size > 4096, round to 4096 - 4 * longs to accomodate malloc overhead */ 536 /* if size is large, round to MALLOC_ROUND - 4 * longs to accomodate malloc overhead */
480 if (elem * ncur > 4096) 537 if (elem * ncur > MALLOC_ROUND - sizeof (void *) * 4)
481 { 538 {
482 ncur *= elem; 539 ncur *= elem;
483 ncur = (ncur + elem + 4095 + sizeof (void *) * 4) & ~4095; 540 ncur = (ncur + elem + (MALLOC_ROUND - 1) + sizeof (void *) * 4) & ~(MALLOC_ROUND - 1);
484 ncur = ncur - sizeof (void *) * 4; 541 ncur = ncur - sizeof (void *) * 4;
485 ncur /= elem; 542 ncur /= elem;
486 } 543 }
487 544
488 return ncur; 545 return ncur;
702 } 759 }
703} 760}
704 761
705/*****************************************************************************/ 762/*****************************************************************************/
706 763
764/*
765 * at the moment we allow libev the luxury of two heaps,
766 * a small-code-size 2-heap one and a ~1.5kb larger 4-heap
767 * which is more cache-efficient.
768 * the difference is about 5% with 50000+ watchers.
769 */
770#define USE_4HEAP !EV_MINIMAL
771#define USE_4HEAP 1/* they do not work corretcly */
772#if USE_4HEAP
773
774#define DHEAP 4
775#define HEAP0 (DHEAP - 1) /* index of first element in heap */
776
777/* towards the root */
707void inline_speed 778void inline_speed
708upheap (WT *heap, int k) 779upheap (WT *heap, int k)
709{ 780{
710 WT w = heap [k]; 781 WT w = heap [k];
711 782
712 while (k) 783 for (;;)
713 { 784 {
714 int p = (k - 1) >> 1; 785 int p = ((k - HEAP0 - 1) / DHEAP) + HEAP0;
715 786
716 if (heap [p]->at <= w->at) 787 if (p == k || heap [p]->at <= w->at)
717 break; 788 break;
718 789
719 heap [k] = heap [p]; 790 heap [k] = heap [p];
720 ((W)heap [k])->active = k + 1; 791 ev_active (heap [k]) = k;
721 k = p; 792 k = p;
722 } 793 }
723 794
724 heap [k] = w; 795 heap [k] = w;
725 ((W)heap [k])->active = k + 1; 796 ev_active (heap [k]) = k;
726} 797}
727 798
799/* away from the root */
728void inline_speed 800void inline_speed
729downheap (WT *heap, int N, int k) 801downheap (WT *heap, int N, int k)
730{ 802{
731 WT w = heap [k]; 803 WT w = heap [k];
804 WT *E = heap + N + HEAP0;
732 805
733 for (;;) 806 for (;;)
734 { 807 {
808 ev_tstamp minat;
809 WT *minpos;
810 WT *pos = heap + DHEAP * (k - HEAP0) + HEAP0;
811
812 // find minimum child
813 if (expect_true (pos + DHEAP - 1 < E))
814 {
815 /* fast path */
816 (minpos = pos + 0), (minat = (*minpos)->at);
817 if (pos [1]->at < minat) (minpos = pos + 1), (minat = (*minpos)->at);
818 if (pos [2]->at < minat) (minpos = pos + 2), (minat = (*minpos)->at);
819 if (pos [3]->at < minat) (minpos = pos + 3), (minat = (*minpos)->at);
820 }
821 else
822 {
823 /* slow path */
824 if (pos >= E)
825 break;
826 (minpos = pos + 0), (minat = (*minpos)->at);
827 if (pos + 1 < E && pos [1]->at < minat) (minpos = pos + 1), (minat = (*minpos)->at);
828 if (pos + 2 < E && pos [2]->at < minat) (minpos = pos + 2), (minat = (*minpos)->at);
829 if (pos + 3 < E && pos [3]->at < minat) (minpos = pos + 3), (minat = (*minpos)->at);
830 }
831
832 if (w->at <= minat)
833 break;
834
835 ev_active (*minpos) = k;
836 heap [k] = *minpos;
837
838 k = minpos - heap;
839 }
840
841 heap [k] = w;
842 ev_active (heap [k]) = k;
843}
844
845#else // 4HEAP
846
847#define HEAP0 1
848
849/* towards the root */
850void inline_speed
851upheap (WT *heap, int k)
852{
853 WT w = heap [k];
854
855 for (;;)
856 {
857 int p = k >> 1;
858
859 /* maybe we could use a dummy element at heap [0]? */
860 if (!p || heap [p]->at <= w->at)
861 break;
862
863 heap [k] = heap [p];
864 ev_active (heap [k]) = k;
865 k = p;
866 }
867
868 heap [k] = w;
869 ev_active (heap [k]) = k;
870}
871
872/* away from the root */
873void inline_speed
874downheap (WT *heap, int N, int k)
875{
876 WT w = heap [k];
877
878 for (;;)
879 {
735 int c = (k << 1) + 1; 880 int c = k << 1;
736 881
737 if (c >= N) 882 if (c > N)
738 break; 883 break;
739 884
740 c += c + 1 < N && heap [c]->at > heap [c + 1]->at 885 c += c + 1 < N && heap [c]->at > heap [c + 1]->at
741 ? 1 : 0; 886 ? 1 : 0;
742 887
743 if (w->at <= heap [c]->at) 888 if (w->at <= heap [c]->at)
744 break; 889 break;
745 890
746 heap [k] = heap [c]; 891 heap [k] = heap [c];
747 ((W)heap [k])->active = k + 1; 892 ((W)heap [k])->active = k;
748 893
749 k = c; 894 k = c;
750 } 895 }
751 896
752 heap [k] = w; 897 heap [k] = w;
753 ((W)heap [k])->active = k + 1; 898 ev_active (heap [k]) = k;
754} 899}
900#endif
755 901
756void inline_size 902void inline_size
757adjustheap (WT *heap, int N, int k) 903adjustheap (WT *heap, int N, int k)
758{ 904{
759 upheap (heap, k); 905 upheap (heap, k);
802static void noinline 948static void noinline
803evpipe_init (EV_P) 949evpipe_init (EV_P)
804{ 950{
805 if (!ev_is_active (&pipeev)) 951 if (!ev_is_active (&pipeev))
806 { 952 {
953#if EV_USE_EVENTFD
954 if ((evfd = eventfd (0, 0)) >= 0)
955 {
956 evpipe [0] = -1;
957 fd_intern (evfd);
958 ev_io_set (&pipeev, evfd, EV_READ);
959 }
960 else
961#endif
962 {
807 while (pipe (evpipe)) 963 while (pipe (evpipe))
808 syserr ("(libev) error creating signal/async pipe"); 964 syserr ("(libev) error creating signal/async pipe");
809 965
810 fd_intern (evpipe [0]); 966 fd_intern (evpipe [0]);
811 fd_intern (evpipe [1]); 967 fd_intern (evpipe [1]);
812
813 ev_io_set (&pipeev, evpipe [0], EV_READ); 968 ev_io_set (&pipeev, evpipe [0], EV_READ);
969 }
970
814 ev_io_start (EV_A_ &pipeev); 971 ev_io_start (EV_A_ &pipeev);
815 ev_unref (EV_A); /* watcher should not keep loop alive */ 972 ev_unref (EV_A); /* watcher should not keep loop alive */
816 } 973 }
817} 974}
818 975
819void inline_size 976void inline_size
820evpipe_write (EV_P_ EV_ATOMIC_T *flag) 977evpipe_write (EV_P_ EV_ATOMIC_T *flag)
821{ 978{
822 if (!*flag) 979 if (!*flag)
823 { 980 {
824 int old_errno = errno; /* save errno becaue write might clobber it */ 981 int old_errno = errno; /* save errno because write might clobber it */
825 982
826 *flag = 1; 983 *flag = 1;
984
985#if EV_USE_EVENTFD
986 if (evfd >= 0)
987 {
988 uint64_t counter = 1;
989 write (evfd, &counter, sizeof (uint64_t));
990 }
991 else
992#endif
827 write (evpipe [1], &old_errno, 1); 993 write (evpipe [1], &old_errno, 1);
828 994
829 errno = old_errno; 995 errno = old_errno;
830 } 996 }
831} 997}
832 998
833static void 999static void
834pipecb (EV_P_ ev_io *iow, int revents) 1000pipecb (EV_P_ ev_io *iow, int revents)
835{ 1001{
1002#if EV_USE_EVENTFD
1003 if (evfd >= 0)
836 { 1004 {
837 int dummy; 1005 uint64_t counter;
1006 read (evfd, &counter, sizeof (uint64_t));
1007 }
1008 else
1009#endif
1010 {
1011 char dummy;
838 read (evpipe [0], &dummy, 1); 1012 read (evpipe [0], &dummy, 1);
839 } 1013 }
840 1014
841 if (gotsig && ev_is_default_loop (EV_A)) 1015 if (gotsig && ev_is_default_loop (EV_A))
842 { 1016 {
843 int signum; 1017 int signum;
844 gotsig = 0; 1018 gotsig = 0;
865} 1039}
866 1040
867/*****************************************************************************/ 1041/*****************************************************************************/
868 1042
869static void 1043static void
870sighandler (int signum) 1044ev_sighandler (int signum)
871{ 1045{
872#if EV_MULTIPLICITY 1046#if EV_MULTIPLICITY
873 struct ev_loop *loop = &default_loop_struct; 1047 struct ev_loop *loop = &default_loop_struct;
874#endif 1048#endif
875 1049
876#if _WIN32 1050#if _WIN32
877 signal (signum, sighandler); 1051 signal (signum, ev_sighandler);
878#endif 1052#endif
879 1053
880 signals [signum - 1].gotsig = 1; 1054 signals [signum - 1].gotsig = 1;
881 evpipe_write (EV_A_ &gotsig); 1055 evpipe_write (EV_A_ &gotsig);
882} 1056}
912#ifndef WIFCONTINUED 1086#ifndef WIFCONTINUED
913# define WIFCONTINUED(status) 0 1087# define WIFCONTINUED(status) 0
914#endif 1088#endif
915 1089
916void inline_speed 1090void inline_speed
917child_reap (EV_P_ ev_signal *sw, int chain, int pid, int status) 1091child_reap (EV_P_ int chain, int pid, int status)
918{ 1092{
919 ev_child *w; 1093 ev_child *w;
920 int traced = WIFSTOPPED (status) || WIFCONTINUED (status); 1094 int traced = WIFSTOPPED (status) || WIFCONTINUED (status);
921 1095
922 for (w = (ev_child *)childs [chain & (EV_PID_HASHSIZE - 1)]; w; w = (ev_child *)((WL)w)->next) 1096 for (w = (ev_child *)childs [chain & (EV_PID_HASHSIZE - 1)]; w; w = (ev_child *)((WL)w)->next)
923 { 1097 {
924 if ((w->pid == pid || !w->pid) 1098 if ((w->pid == pid || !w->pid)
925 && (!traced || (w->flags & 1))) 1099 && (!traced || (w->flags & 1)))
926 { 1100 {
927 ev_set_priority (w, ev_priority (sw)); /* need to do it *now* */ 1101 ev_set_priority (w, EV_MAXPRI); /* need to do it *now*, this *must* be the same prio as the signal watcher itself */
928 w->rpid = pid; 1102 w->rpid = pid;
929 w->rstatus = status; 1103 w->rstatus = status;
930 ev_feed_event (EV_A_ (W)w, EV_CHILD); 1104 ev_feed_event (EV_A_ (W)w, EV_CHILD);
931 } 1105 }
932 } 1106 }
946 if (!WCONTINUED 1120 if (!WCONTINUED
947 || errno != EINVAL 1121 || errno != EINVAL
948 || 0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED))) 1122 || 0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED)))
949 return; 1123 return;
950 1124
951 /* make sure we are called again until all childs have been reaped */ 1125 /* make sure we are called again until all children have been reaped */
952 /* we need to do it this way so that the callback gets called before we continue */ 1126 /* we need to do it this way so that the callback gets called before we continue */
953 ev_feed_event (EV_A_ (W)sw, EV_SIGNAL); 1127 ev_feed_event (EV_A_ (W)sw, EV_SIGNAL);
954 1128
955 child_reap (EV_A_ sw, pid, pid, status); 1129 child_reap (EV_A_ pid, pid, status);
956 if (EV_PID_HASHSIZE > 1) 1130 if (EV_PID_HASHSIZE > 1)
957 child_reap (EV_A_ sw, 0, pid, status); /* this might trigger a watcher twice, but feed_event catches that */ 1131 child_reap (EV_A_ 0, pid, status); /* this might trigger a watcher twice, but feed_event catches that */
958} 1132}
959 1133
960#endif 1134#endif
961 1135
962/*****************************************************************************/ 1136/*****************************************************************************/
1105 if (!(flags & EVFLAG_NOENV) 1279 if (!(flags & EVFLAG_NOENV)
1106 && !enable_secure () 1280 && !enable_secure ()
1107 && getenv ("LIBEV_FLAGS")) 1281 && getenv ("LIBEV_FLAGS"))
1108 flags = atoi (getenv ("LIBEV_FLAGS")); 1282 flags = atoi (getenv ("LIBEV_FLAGS"));
1109 1283
1110 if (!(flags & 0x0000ffffUL)) 1284 if (!(flags & 0x0000ffffU))
1111 flags |= ev_recommended_backends (); 1285 flags |= ev_recommended_backends ();
1112 1286
1113#if EV_USE_PORT 1287#if EV_USE_PORT
1114 if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags); 1288 if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags);
1115#endif 1289#endif
1139 if (ev_is_active (&pipeev)) 1313 if (ev_is_active (&pipeev))
1140 { 1314 {
1141 ev_ref (EV_A); /* signal watcher */ 1315 ev_ref (EV_A); /* signal watcher */
1142 ev_io_stop (EV_A_ &pipeev); 1316 ev_io_stop (EV_A_ &pipeev);
1143 1317
1144 close (evpipe [0]); evpipe [0] = 0; 1318#if EV_USE_EVENTFD
1145 close (evpipe [1]); evpipe [1] = 0; 1319 if (evfd >= 0)
1320 close (evfd);
1321#endif
1322
1323 if (evpipe [0] >= 0)
1324 {
1325 close (evpipe [0]);
1326 close (evpipe [1]);
1327 }
1146 } 1328 }
1147 1329
1148#if EV_USE_INOTIFY 1330#if EV_USE_INOTIFY
1149 if (fs_fd >= 0) 1331 if (fs_fd >= 0)
1150 close (fs_fd); 1332 close (fs_fd);
1195#endif 1377#endif
1196 1378
1197 backend = 0; 1379 backend = 0;
1198} 1380}
1199 1381
1382#if EV_USE_INOTIFY
1200void inline_size infy_fork (EV_P); 1383void inline_size infy_fork (EV_P);
1384#endif
1201 1385
1202void inline_size 1386void inline_size
1203loop_fork (EV_P) 1387loop_fork (EV_P)
1204{ 1388{
1205#if EV_USE_PORT 1389#if EV_USE_PORT
1224 gotasync = 1; 1408 gotasync = 1;
1225#endif 1409#endif
1226 1410
1227 ev_ref (EV_A); 1411 ev_ref (EV_A);
1228 ev_io_stop (EV_A_ &pipeev); 1412 ev_io_stop (EV_A_ &pipeev);
1413
1414#if EV_USE_EVENTFD
1415 if (evfd >= 0)
1416 close (evfd);
1417#endif
1418
1419 if (evpipe [0] >= 0)
1420 {
1229 close (evpipe [0]); 1421 close (evpipe [0]);
1230 close (evpipe [1]); 1422 close (evpipe [1]);
1423 }
1231 1424
1232 evpipe_init (EV_A); 1425 evpipe_init (EV_A);
1233 /* now iterate over everything, in case we missed something */ 1426 /* now iterate over everything, in case we missed something */
1234 pipecb (EV_A_ &pipeev, EV_READ); 1427 pipecb (EV_A_ &pipeev, EV_READ);
1235 } 1428 }
1263void 1456void
1264ev_loop_fork (EV_P) 1457ev_loop_fork (EV_P)
1265{ 1458{
1266 postfork = 1; /* must be in line with ev_default_fork */ 1459 postfork = 1; /* must be in line with ev_default_fork */
1267} 1460}
1268
1269#endif 1461#endif
1270 1462
1271#if EV_MULTIPLICITY 1463#if EV_MULTIPLICITY
1272struct ev_loop * 1464struct ev_loop *
1273ev_default_loop_init (unsigned int flags) 1465ev_default_loop_init (unsigned int flags)
1354 EV_CB_INVOKE (p->w, p->events); 1546 EV_CB_INVOKE (p->w, p->events);
1355 } 1547 }
1356 } 1548 }
1357} 1549}
1358 1550
1359void inline_size
1360timers_reify (EV_P)
1361{
1362 while (timercnt && ((WT)timers [0])->at <= mn_now)
1363 {
1364 ev_timer *w = (ev_timer *)timers [0];
1365
1366 /*assert (("inactive timer on timer heap detected", ev_is_active (w)));*/
1367
1368 /* first reschedule or stop timer */
1369 if (w->repeat)
1370 {
1371 assert (("negative ev_timer repeat value found while processing timers", w->repeat > 0.));
1372
1373 ((WT)w)->at += w->repeat;
1374 if (((WT)w)->at < mn_now)
1375 ((WT)w)->at = mn_now;
1376
1377 downheap (timers, timercnt, 0);
1378 }
1379 else
1380 ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */
1381
1382 ev_feed_event (EV_A_ (W)w, EV_TIMEOUT);
1383 }
1384}
1385
1386#if EV_PERIODIC_ENABLE
1387void inline_size
1388periodics_reify (EV_P)
1389{
1390 while (periodiccnt && ((WT)periodics [0])->at <= ev_rt_now)
1391 {
1392 ev_periodic *w = (ev_periodic *)periodics [0];
1393
1394 /*assert (("inactive timer on periodic heap detected", ev_is_active (w)));*/
1395
1396 /* first reschedule or stop timer */
1397 if (w->reschedule_cb)
1398 {
1399 ((WT)w)->at = w->reschedule_cb (w, ev_rt_now + TIME_EPSILON);
1400 assert (("ev_periodic reschedule callback returned time in the past", ((WT)w)->at > ev_rt_now));
1401 downheap (periodics, periodiccnt, 0);
1402 }
1403 else if (w->interval)
1404 {
1405 ((WT)w)->at = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
1406 if (((WT)w)->at - ev_rt_now <= TIME_EPSILON) ((WT)w)->at += w->interval;
1407 assert (("ev_periodic timeout in the past detected while processing timers, negative interval?", ((WT)w)->at > ev_rt_now));
1408 downheap (periodics, periodiccnt, 0);
1409 }
1410 else
1411 ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */
1412
1413 ev_feed_event (EV_A_ (W)w, EV_PERIODIC);
1414 }
1415}
1416
1417static void noinline
1418periodics_reschedule (EV_P)
1419{
1420 int i;
1421
1422 /* adjust periodics after time jump */
1423 for (i = 0; i < periodiccnt; ++i)
1424 {
1425 ev_periodic *w = (ev_periodic *)periodics [i];
1426
1427 if (w->reschedule_cb)
1428 ((WT)w)->at = w->reschedule_cb (w, ev_rt_now);
1429 else if (w->interval)
1430 ((WT)w)->at = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
1431 }
1432
1433 /* now rebuild the heap */
1434 for (i = periodiccnt >> 1; i--; )
1435 downheap (periodics, periodiccnt, i);
1436}
1437#endif
1438
1439#if EV_IDLE_ENABLE 1551#if EV_IDLE_ENABLE
1440void inline_size 1552void inline_size
1441idle_reify (EV_P) 1553idle_reify (EV_P)
1442{ 1554{
1443 if (expect_false (idleall)) 1555 if (expect_false (idleall))
1454 queue_events (EV_A_ (W *)idles [pri], idlecnt [pri], EV_IDLE); 1566 queue_events (EV_A_ (W *)idles [pri], idlecnt [pri], EV_IDLE);
1455 break; 1567 break;
1456 } 1568 }
1457 } 1569 }
1458 } 1570 }
1571}
1572#endif
1573
1574void inline_size
1575timers_reify (EV_P)
1576{
1577 while (timercnt && ev_at (timers [HEAP0]) <= mn_now)
1578 {
1579 ev_timer *w = (ev_timer *)timers [HEAP0];
1580
1581 /*assert (("inactive timer on timer heap detected", ev_is_active (w)));*/
1582
1583 /* first reschedule or stop timer */
1584 if (w->repeat)
1585 {
1586 assert (("negative ev_timer repeat value found while processing timers", w->repeat > 0.));
1587
1588 ev_at (w) += w->repeat;
1589 if (ev_at (w) < mn_now)
1590 ev_at (w) = mn_now;
1591
1592 downheap (timers, timercnt, HEAP0);
1593 }
1594 else
1595 ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */
1596
1597 ev_feed_event (EV_A_ (W)w, EV_TIMEOUT);
1598 }
1599}
1600
1601#if EV_PERIODIC_ENABLE
1602void inline_size
1603periodics_reify (EV_P)
1604{
1605 while (periodiccnt && ev_at (periodics [HEAP0]) <= ev_rt_now)
1606 {
1607 ev_periodic *w = (ev_periodic *)periodics [HEAP0];
1608
1609 /*assert (("inactive timer on periodic heap detected", ev_is_active (w)));*/
1610
1611 /* first reschedule or stop timer */
1612 if (w->reschedule_cb)
1613 {
1614 ev_at (w) = w->reschedule_cb (w, ev_rt_now + TIME_EPSILON);
1615 assert (("ev_periodic reschedule callback returned time in the past", ev_at (w) > ev_rt_now));
1616 downheap (periodics, periodiccnt, 1);
1617 }
1618 else if (w->interval)
1619 {
1620 ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
1621 if (ev_at (w) - ev_rt_now <= TIME_EPSILON) ev_at (w) += w->interval;
1622 assert (("ev_periodic timeout in the past detected while processing timers, negative interval?", ev_at (w) > ev_rt_now));
1623 downheap (periodics, periodiccnt, HEAP0);
1624 }
1625 else
1626 ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */
1627
1628 ev_feed_event (EV_A_ (W)w, EV_PERIODIC);
1629 }
1630}
1631
1632static void noinline
1633periodics_reschedule (EV_P)
1634{
1635 int i;
1636
1637 /* adjust periodics after time jump */
1638 for (i = 1; i <= periodiccnt; ++i)
1639 {
1640 ev_periodic *w = (ev_periodic *)periodics [i];
1641
1642 if (w->reschedule_cb)
1643 ev_at (w) = w->reschedule_cb (w, ev_rt_now);
1644 else if (w->interval)
1645 ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
1646 }
1647
1648 /* now rebuild the heap */
1649 for (i = periodiccnt >> 1; --i; )
1650 downheap (periodics, periodiccnt, i + HEAP0);
1459} 1651}
1460#endif 1652#endif
1461 1653
1462void inline_speed 1654void inline_speed
1463time_update (EV_P_ ev_tstamp max_block) 1655time_update (EV_P_ ev_tstamp max_block)
1492 */ 1684 */
1493 for (i = 4; --i; ) 1685 for (i = 4; --i; )
1494 { 1686 {
1495 rtmn_diff = ev_rt_now - mn_now; 1687 rtmn_diff = ev_rt_now - mn_now;
1496 1688
1497 if (fabs (odiff - rtmn_diff) < MIN_TIMEJUMP) 1689 if (expect_true (fabs (odiff - rtmn_diff) < MIN_TIMEJUMP))
1498 return; /* all is well */ 1690 return; /* all is well */
1499 1691
1500 ev_rt_now = ev_time (); 1692 ev_rt_now = ev_time ();
1501 mn_now = get_clock (); 1693 mn_now = get_clock ();
1502 now_floor = mn_now; 1694 now_floor = mn_now;
1517 { 1709 {
1518#if EV_PERIODIC_ENABLE 1710#if EV_PERIODIC_ENABLE
1519 periodics_reschedule (EV_A); 1711 periodics_reschedule (EV_A);
1520#endif 1712#endif
1521 /* adjust timers. this is easy, as the offset is the same for all of them */ 1713 /* adjust timers. this is easy, as the offset is the same for all of them */
1522 for (i = 0; i < timercnt; ++i) 1714 for (i = 1; i <= timercnt; ++i)
1523 ((WT)timers [i])->at += ev_rt_now - mn_now; 1715 ev_at (timers [i]) += ev_rt_now - mn_now;
1524 } 1716 }
1525 1717
1526 mn_now = ev_rt_now; 1718 mn_now = ev_rt_now;
1527 } 1719 }
1528} 1720}
1542static int loop_done; 1734static int loop_done;
1543 1735
1544void 1736void
1545ev_loop (EV_P_ int flags) 1737ev_loop (EV_P_ int flags)
1546{ 1738{
1547 loop_done = flags & (EVLOOP_ONESHOT | EVLOOP_NONBLOCK) 1739 loop_done = EVUNLOOP_CANCEL;
1548 ? EVUNLOOP_ONE
1549 : EVUNLOOP_CANCEL;
1550 1740
1551 call_pending (EV_A); /* in case we recurse, ensure ordering stays nice and clean */ 1741 call_pending (EV_A); /* in case we recurse, ensure ordering stays nice and clean */
1552 1742
1553 do 1743 do
1554 { 1744 {
1600 1790
1601 waittime = MAX_BLOCKTIME; 1791 waittime = MAX_BLOCKTIME;
1602 1792
1603 if (timercnt) 1793 if (timercnt)
1604 { 1794 {
1605 ev_tstamp to = ((WT)timers [0])->at - mn_now + backend_fudge; 1795 ev_tstamp to = ev_at (timers [HEAP0]) - mn_now + backend_fudge;
1606 if (waittime > to) waittime = to; 1796 if (waittime > to) waittime = to;
1607 } 1797 }
1608 1798
1609#if EV_PERIODIC_ENABLE 1799#if EV_PERIODIC_ENABLE
1610 if (periodiccnt) 1800 if (periodiccnt)
1611 { 1801 {
1612 ev_tstamp to = ((WT)periodics [0])->at - ev_rt_now + backend_fudge; 1802 ev_tstamp to = ev_at (periodics [HEAP0]) - ev_rt_now + backend_fudge;
1613 if (waittime > to) waittime = to; 1803 if (waittime > to) waittime = to;
1614 } 1804 }
1615#endif 1805#endif
1616 1806
1617 if (expect_false (waittime < timeout_blocktime)) 1807 if (expect_false (waittime < timeout_blocktime))
1650 /* queue check watchers, to be executed first */ 1840 /* queue check watchers, to be executed first */
1651 if (expect_false (checkcnt)) 1841 if (expect_false (checkcnt))
1652 queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK); 1842 queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK);
1653 1843
1654 call_pending (EV_A); 1844 call_pending (EV_A);
1655
1656 } 1845 }
1657 while (expect_true (activecnt && !loop_done)); 1846 while (expect_true (
1847 activecnt
1848 && !loop_done
1849 && !(flags & (EVLOOP_ONESHOT | EVLOOP_NONBLOCK))
1850 ));
1658 1851
1659 if (loop_done == EVUNLOOP_ONE) 1852 if (loop_done == EVUNLOOP_ONE)
1660 loop_done = EVUNLOOP_CANCEL; 1853 loop_done = EVUNLOOP_CANCEL;
1661} 1854}
1662 1855
1780ev_timer_start (EV_P_ ev_timer *w) 1973ev_timer_start (EV_P_ ev_timer *w)
1781{ 1974{
1782 if (expect_false (ev_is_active (w))) 1975 if (expect_false (ev_is_active (w)))
1783 return; 1976 return;
1784 1977
1785 ((WT)w)->at += mn_now; 1978 ev_at (w) += mn_now;
1786 1979
1787 assert (("ev_timer_start called with negative timer repeat value", w->repeat >= 0.)); 1980 assert (("ev_timer_start called with negative timer repeat value", w->repeat >= 0.));
1788 1981
1789 ev_start (EV_A_ (W)w, ++timercnt); 1982 ev_start (EV_A_ (W)w, ++timercnt + HEAP0 - 1);
1790 array_needsize (WT, timers, timermax, timercnt, EMPTY2); 1983 array_needsize (WT, timers, timermax, timercnt + HEAP0, EMPTY2);
1791 timers [timercnt - 1] = (WT)w; 1984 timers [ev_active (w)] = (WT)w;
1792 upheap (timers, timercnt - 1); 1985 upheap (timers, ev_active (w));
1793 1986
1794 /*assert (("internal timer heap corruption", timers [((W)w)->active - 1] == w));*/ 1987 /*assert (("internal timer heap corruption", timers [ev_active (w)] == w));*/
1795} 1988}
1796 1989
1797void noinline 1990void noinline
1798ev_timer_stop (EV_P_ ev_timer *w) 1991ev_timer_stop (EV_P_ ev_timer *w)
1799{ 1992{
1800 clear_pending (EV_A_ (W)w); 1993 clear_pending (EV_A_ (W)w);
1801 if (expect_false (!ev_is_active (w))) 1994 if (expect_false (!ev_is_active (w)))
1802 return; 1995 return;
1803 1996
1804 assert (("internal timer heap corruption", timers [((W)w)->active - 1] == (WT)w));
1805
1806 { 1997 {
1807 int active = ((W)w)->active; 1998 int active = ev_active (w);
1808 1999
2000 assert (("internal timer heap corruption", timers [active] == (WT)w));
2001
1809 if (expect_true (--active < --timercnt)) 2002 if (expect_true (active < timercnt + HEAP0 - 1))
1810 { 2003 {
1811 timers [active] = timers [timercnt]; 2004 timers [active] = timers [timercnt + HEAP0 - 1];
1812 adjustheap (timers, timercnt, active); 2005 adjustheap (timers, timercnt, active);
1813 } 2006 }
2007
2008 --timercnt;
1814 } 2009 }
1815 2010
1816 ((WT)w)->at -= mn_now; 2011 ev_at (w) -= mn_now;
1817 2012
1818 ev_stop (EV_A_ (W)w); 2013 ev_stop (EV_A_ (W)w);
1819} 2014}
1820 2015
1821void noinline 2016void noinline
1823{ 2018{
1824 if (ev_is_active (w)) 2019 if (ev_is_active (w))
1825 { 2020 {
1826 if (w->repeat) 2021 if (w->repeat)
1827 { 2022 {
1828 ((WT)w)->at = mn_now + w->repeat; 2023 ev_at (w) = mn_now + w->repeat;
1829 adjustheap (timers, timercnt, ((W)w)->active - 1); 2024 adjustheap (timers, timercnt, ev_active (w));
1830 } 2025 }
1831 else 2026 else
1832 ev_timer_stop (EV_A_ w); 2027 ev_timer_stop (EV_A_ w);
1833 } 2028 }
1834 else if (w->repeat) 2029 else if (w->repeat)
1835 { 2030 {
1836 w->at = w->repeat; 2031 ev_at (w) = w->repeat;
1837 ev_timer_start (EV_A_ w); 2032 ev_timer_start (EV_A_ w);
1838 } 2033 }
1839} 2034}
1840 2035
1841#if EV_PERIODIC_ENABLE 2036#if EV_PERIODIC_ENABLE
1844{ 2039{
1845 if (expect_false (ev_is_active (w))) 2040 if (expect_false (ev_is_active (w)))
1846 return; 2041 return;
1847 2042
1848 if (w->reschedule_cb) 2043 if (w->reschedule_cb)
1849 ((WT)w)->at = w->reschedule_cb (w, ev_rt_now); 2044 ev_at (w) = w->reschedule_cb (w, ev_rt_now);
1850 else if (w->interval) 2045 else if (w->interval)
1851 { 2046 {
1852 assert (("ev_periodic_start called with negative interval value", w->interval >= 0.)); 2047 assert (("ev_periodic_start called with negative interval value", w->interval >= 0.));
1853 /* this formula differs from the one in periodic_reify because we do not always round up */ 2048 /* this formula differs from the one in periodic_reify because we do not always round up */
1854 ((WT)w)->at = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval; 2049 ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
1855 } 2050 }
1856 else 2051 else
1857 ((WT)w)->at = w->offset; 2052 ev_at (w) = w->offset;
1858 2053
1859 ev_start (EV_A_ (W)w, ++periodiccnt); 2054 ev_start (EV_A_ (W)w, ++periodiccnt + HEAP0 - 1);
1860 array_needsize (WT, periodics, periodicmax, periodiccnt, EMPTY2); 2055 array_needsize (WT, periodics, periodicmax, periodiccnt + HEAP0, EMPTY2);
1861 periodics [periodiccnt - 1] = (WT)w; 2056 periodics [ev_active (w)] = (WT)w;
1862 upheap (periodics, periodiccnt - 1); 2057 upheap (periodics, ev_active (w));
1863 2058
1864 /*assert (("internal periodic heap corruption", periodics [((W)w)->active - 1] == w));*/ 2059 /*assert (("internal periodic heap corruption", periodics [ev_active (w)] == w));*/
1865} 2060}
1866 2061
1867void noinline 2062void noinline
1868ev_periodic_stop (EV_P_ ev_periodic *w) 2063ev_periodic_stop (EV_P_ ev_periodic *w)
1869{ 2064{
1870 clear_pending (EV_A_ (W)w); 2065 clear_pending (EV_A_ (W)w);
1871 if (expect_false (!ev_is_active (w))) 2066 if (expect_false (!ev_is_active (w)))
1872 return; 2067 return;
1873 2068
1874 assert (("internal periodic heap corruption", periodics [((W)w)->active - 1] == (WT)w));
1875
1876 { 2069 {
1877 int active = ((W)w)->active; 2070 int active = ev_active (w);
1878 2071
2072 assert (("internal periodic heap corruption", periodics [active] == (WT)w));
2073
1879 if (expect_true (--active < --periodiccnt)) 2074 if (expect_true (active < periodiccnt + HEAP0 - 1))
1880 { 2075 {
1881 periodics [active] = periodics [periodiccnt]; 2076 periodics [active] = periodics [periodiccnt + HEAP0 - 1];
1882 adjustheap (periodics, periodiccnt, active); 2077 adjustheap (periodics, periodiccnt, active);
1883 } 2078 }
2079
2080 --periodiccnt;
1884 } 2081 }
1885 2082
1886 ev_stop (EV_A_ (W)w); 2083 ev_stop (EV_A_ (W)w);
1887} 2084}
1888 2085
1930 wlist_add (&signals [w->signum - 1].head, (WL)w); 2127 wlist_add (&signals [w->signum - 1].head, (WL)w);
1931 2128
1932 if (!((WL)w)->next) 2129 if (!((WL)w)->next)
1933 { 2130 {
1934#if _WIN32 2131#if _WIN32
1935 signal (w->signum, sighandler); 2132 signal (w->signum, ev_sighandler);
1936#else 2133#else
1937 struct sigaction sa; 2134 struct sigaction sa;
1938 sa.sa_handler = sighandler; 2135 sa.sa_handler = ev_sighandler;
1939 sigfillset (&sa.sa_mask); 2136 sigfillset (&sa.sa_mask);
1940 sa.sa_flags = SA_RESTART; /* if restarting works we save one iteration */ 2137 sa.sa_flags = SA_RESTART; /* if restarting works we save one iteration */
1941 sigaction (w->signum, &sa, 0); 2138 sigaction (w->signum, &sa, 0);
1942#endif 2139#endif
1943 } 2140 }
2004 if (w->wd < 0) 2201 if (w->wd < 0)
2005 { 2202 {
2006 ev_timer_start (EV_A_ &w->timer); /* this is not race-free, so we still need to recheck periodically */ 2203 ev_timer_start (EV_A_ &w->timer); /* this is not race-free, so we still need to recheck periodically */
2007 2204
2008 /* monitor some parent directory for speedup hints */ 2205 /* monitor some parent directory for speedup hints */
2206 /* note that exceeding the hardcoded limit is not a correctness issue, */
2207 /* but an efficiency issue only */
2009 if ((errno == ENOENT || errno == EACCES) && strlen (w->path) < 4096) 2208 if ((errno == ENOENT || errno == EACCES) && strlen (w->path) < 4096)
2010 { 2209 {
2011 char path [4096]; 2210 char path [4096];
2012 strcpy (path, w->path); 2211 strcpy (path, w->path);
2013 2212
2258 clear_pending (EV_A_ (W)w); 2457 clear_pending (EV_A_ (W)w);
2259 if (expect_false (!ev_is_active (w))) 2458 if (expect_false (!ev_is_active (w)))
2260 return; 2459 return;
2261 2460
2262 { 2461 {
2263 int active = ((W)w)->active; 2462 int active = ev_active (w);
2264 2463
2265 idles [ABSPRI (w)][active - 1] = idles [ABSPRI (w)][--idlecnt [ABSPRI (w)]]; 2464 idles [ABSPRI (w)][active - 1] = idles [ABSPRI (w)][--idlecnt [ABSPRI (w)]];
2266 ((W)idles [ABSPRI (w)][active - 1])->active = active; 2465 ev_active (idles [ABSPRI (w)][active - 1]) = active;
2267 2466
2268 ev_stop (EV_A_ (W)w); 2467 ev_stop (EV_A_ (W)w);
2269 --idleall; 2468 --idleall;
2270 } 2469 }
2271} 2470}
2288 clear_pending (EV_A_ (W)w); 2487 clear_pending (EV_A_ (W)w);
2289 if (expect_false (!ev_is_active (w))) 2488 if (expect_false (!ev_is_active (w)))
2290 return; 2489 return;
2291 2490
2292 { 2491 {
2293 int active = ((W)w)->active; 2492 int active = ev_active (w);
2493
2294 prepares [active - 1] = prepares [--preparecnt]; 2494 prepares [active - 1] = prepares [--preparecnt];
2295 ((W)prepares [active - 1])->active = active; 2495 ev_active (prepares [active - 1]) = active;
2296 } 2496 }
2297 2497
2298 ev_stop (EV_A_ (W)w); 2498 ev_stop (EV_A_ (W)w);
2299} 2499}
2300 2500
2315 clear_pending (EV_A_ (W)w); 2515 clear_pending (EV_A_ (W)w);
2316 if (expect_false (!ev_is_active (w))) 2516 if (expect_false (!ev_is_active (w)))
2317 return; 2517 return;
2318 2518
2319 { 2519 {
2320 int active = ((W)w)->active; 2520 int active = ev_active (w);
2521
2321 checks [active - 1] = checks [--checkcnt]; 2522 checks [active - 1] = checks [--checkcnt];
2322 ((W)checks [active - 1])->active = active; 2523 ev_active (checks [active - 1]) = active;
2323 } 2524 }
2324 2525
2325 ev_stop (EV_A_ (W)w); 2526 ev_stop (EV_A_ (W)w);
2326} 2527}
2327 2528
2423 clear_pending (EV_A_ (W)w); 2624 clear_pending (EV_A_ (W)w);
2424 if (expect_false (!ev_is_active (w))) 2625 if (expect_false (!ev_is_active (w)))
2425 return; 2626 return;
2426 2627
2427 { 2628 {
2428 int active = ((W)w)->active; 2629 int active = ev_active (w);
2630
2429 forks [active - 1] = forks [--forkcnt]; 2631 forks [active - 1] = forks [--forkcnt];
2430 ((W)forks [active - 1])->active = active; 2632 ev_active (forks [active - 1]) = active;
2431 } 2633 }
2432 2634
2433 ev_stop (EV_A_ (W)w); 2635 ev_stop (EV_A_ (W)w);
2434} 2636}
2435#endif 2637#endif
2454 clear_pending (EV_A_ (W)w); 2656 clear_pending (EV_A_ (W)w);
2455 if (expect_false (!ev_is_active (w))) 2657 if (expect_false (!ev_is_active (w)))
2456 return; 2658 return;
2457 2659
2458 { 2660 {
2459 int active = ((W)w)->active; 2661 int active = ev_active (w);
2662
2460 asyncs [active - 1] = asyncs [--asynccnt]; 2663 asyncs [active - 1] = asyncs [--asynccnt];
2461 ((W)asyncs [active - 1])->active = active; 2664 ev_active (asyncs [active - 1]) = active;
2462 } 2665 }
2463 2666
2464 ev_stop (EV_A_ (W)w); 2667 ev_stop (EV_A_ (W)w);
2465} 2668}
2466 2669

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines