ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev.c
(Generate patch)

Comparing libev/ev.c (file contents):
Revision 1.213 by root, Tue Feb 19 19:13:50 2008 UTC vs.
Revision 1.245 by root, Wed May 21 00:26:01 2008 UTC

39 39
40#ifdef __cplusplus 40#ifdef __cplusplus
41extern "C" { 41extern "C" {
42#endif 42#endif
43 43
44/* this big block deduces configuration from config.h */
44#ifndef EV_STANDALONE 45#ifndef EV_STANDALONE
45# ifdef EV_CONFIG_H 46# ifdef EV_CONFIG_H
46# include EV_CONFIG_H 47# include EV_CONFIG_H
47# else 48# else
48# include "config.h" 49# include "config.h"
118# else 119# else
119# define EV_USE_INOTIFY 0 120# define EV_USE_INOTIFY 0
120# endif 121# endif
121# endif 122# endif
122 123
124# ifndef EV_USE_EVENTFD
125# if HAVE_EVENTFD
126# define EV_USE_EVENTFD 1
127# else
128# define EV_USE_EVENTFD 0
129# endif
130# endif
131
123#endif 132#endif
124 133
125#include <math.h> 134#include <math.h>
126#include <stdlib.h> 135#include <stdlib.h>
127#include <fcntl.h> 136#include <fcntl.h>
152# ifndef EV_SELECT_IS_WINSOCKET 161# ifndef EV_SELECT_IS_WINSOCKET
153# define EV_SELECT_IS_WINSOCKET 1 162# define EV_SELECT_IS_WINSOCKET 1
154# endif 163# endif
155#endif 164#endif
156 165
157/**/ 166/* this block tries to deduce configuration from header-defined symbols and defaults */
158 167
159#ifndef EV_USE_MONOTONIC 168#ifndef EV_USE_MONOTONIC
160# define EV_USE_MONOTONIC 0 169# define EV_USE_MONOTONIC 0
161#endif 170#endif
162 171
179# define EV_USE_POLL 1 188# define EV_USE_POLL 1
180# endif 189# endif
181#endif 190#endif
182 191
183#ifndef EV_USE_EPOLL 192#ifndef EV_USE_EPOLL
193# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))
194# define EV_USE_EPOLL 1
195# else
184# define EV_USE_EPOLL 0 196# define EV_USE_EPOLL 0
197# endif
185#endif 198#endif
186 199
187#ifndef EV_USE_KQUEUE 200#ifndef EV_USE_KQUEUE
188# define EV_USE_KQUEUE 0 201# define EV_USE_KQUEUE 0
189#endif 202#endif
191#ifndef EV_USE_PORT 204#ifndef EV_USE_PORT
192# define EV_USE_PORT 0 205# define EV_USE_PORT 0
193#endif 206#endif
194 207
195#ifndef EV_USE_INOTIFY 208#ifndef EV_USE_INOTIFY
209# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))
210# define EV_USE_INOTIFY 1
211# else
196# define EV_USE_INOTIFY 0 212# define EV_USE_INOTIFY 0
213# endif
197#endif 214#endif
198 215
199#ifndef EV_PID_HASHSIZE 216#ifndef EV_PID_HASHSIZE
200# if EV_MINIMAL 217# if EV_MINIMAL
201# define EV_PID_HASHSIZE 1 218# define EV_PID_HASHSIZE 1
210# else 227# else
211# define EV_INOTIFY_HASHSIZE 16 228# define EV_INOTIFY_HASHSIZE 16
212# endif 229# endif
213#endif 230#endif
214 231
215/**/ 232#ifndef EV_USE_EVENTFD
233# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7))
234# define EV_USE_EVENTFD 1
235# else
236# define EV_USE_EVENTFD 0
237# endif
238#endif
239
240#ifndef EV_USE_4HEAP
241# define EV_USE_4HEAP !EV_MINIMAL
242#endif
243
244#ifndef EV_HEAP_CACHE_AT
245# define EV_HEAP_CACHE_AT !EV_MINIMAL
246#endif
247
248/* this block fixes any misconfiguration where we know we run into trouble otherwise */
216 249
217#ifndef CLOCK_MONOTONIC 250#ifndef CLOCK_MONOTONIC
218# undef EV_USE_MONOTONIC 251# undef EV_USE_MONOTONIC
219# define EV_USE_MONOTONIC 0 252# define EV_USE_MONOTONIC 0
220#endif 253#endif
239# include <sys/inotify.h> 272# include <sys/inotify.h>
240#endif 273#endif
241 274
242#if EV_SELECT_IS_WINSOCKET 275#if EV_SELECT_IS_WINSOCKET
243# include <winsock.h> 276# include <winsock.h>
277#endif
278
279#if EV_USE_EVENTFD
280/* our minimum requirement is glibc 2.7 which has the stub, but not the header */
281# include <stdint.h>
282# ifdef __cplusplus
283extern "C" {
284# endif
285int eventfd (unsigned int initval, int flags);
286# ifdef __cplusplus
287}
288# endif
244#endif 289#endif
245 290
246/**/ 291/**/
247 292
248/* 293/*
263# define expect(expr,value) __builtin_expect ((expr),(value)) 308# define expect(expr,value) __builtin_expect ((expr),(value))
264# define noinline __attribute__ ((noinline)) 309# define noinline __attribute__ ((noinline))
265#else 310#else
266# define expect(expr,value) (expr) 311# define expect(expr,value) (expr)
267# define noinline 312# define noinline
268# if __STDC_VERSION__ < 199901L 313# if __STDC_VERSION__ < 199901L && __GNUC__ < 2
269# define inline 314# define inline
270# endif 315# endif
271#endif 316#endif
272 317
273#define expect_false(expr) expect ((expr) != 0, 0) 318#define expect_false(expr) expect ((expr) != 0, 0)
288 333
289typedef ev_watcher *W; 334typedef ev_watcher *W;
290typedef ev_watcher_list *WL; 335typedef ev_watcher_list *WL;
291typedef ev_watcher_time *WT; 336typedef ev_watcher_time *WT;
292 337
338#define ev_active(w) ((W)(w))->active
339#define ev_at(w) ((WT)(w))->at
340
293#if EV_USE_MONOTONIC 341#if EV_USE_MONOTONIC
294/* sig_atomic_t is used to avoid per-thread variables or locking but still */ 342/* sig_atomic_t is used to avoid per-thread variables or locking but still */
295/* giving it a reasonably high chance of working on typical architetcures */ 343/* giving it a reasonably high chance of working on typical architetcures */
296static EV_ATOMIC_T have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work? */ 344static EV_ATOMIC_T have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work? */
297#endif 345#endif
323 perror (msg); 371 perror (msg);
324 abort (); 372 abort ();
325 } 373 }
326} 374}
327 375
376static void *
377ev_realloc_emul (void *ptr, long size)
378{
379 /* some systems, notably openbsd and darwin, fail to properly
380 * implement realloc (x, 0) (as required by both ansi c-98 and
381 * the single unix specification, so work around them here.
382 */
383
384 if (size)
385 return realloc (ptr, size);
386
387 free (ptr);
388 return 0;
389}
390
328static void *(*alloc)(void *ptr, long size); 391static void *(*alloc)(void *ptr, long size) = ev_realloc_emul;
329 392
330void 393void
331ev_set_allocator (void *(*cb)(void *ptr, long size)) 394ev_set_allocator (void *(*cb)(void *ptr, long size))
332{ 395{
333 alloc = cb; 396 alloc = cb;
334} 397}
335 398
336inline_speed void * 399inline_speed void *
337ev_realloc (void *ptr, long size) 400ev_realloc (void *ptr, long size)
338{ 401{
339 ptr = alloc ? alloc (ptr, size) : realloc (ptr, size); 402 ptr = alloc (ptr, size);
340 403
341 if (!ptr && size) 404 if (!ptr && size)
342 { 405 {
343 fprintf (stderr, "libev: cannot allocate %ld bytes, aborting.", size); 406 fprintf (stderr, "libev: cannot allocate %ld bytes, aborting.", size);
344 abort (); 407 abort ();
367 W w; 430 W w;
368 int events; 431 int events;
369} ANPENDING; 432} ANPENDING;
370 433
371#if EV_USE_INOTIFY 434#if EV_USE_INOTIFY
435/* hash table entry per inotify-id */
372typedef struct 436typedef struct
373{ 437{
374 WL head; 438 WL head;
375} ANFS; 439} ANFS;
440#endif
441
442/* Heap Entry */
443#if EV_HEAP_CACHE_AT
444 typedef struct {
445 ev_tstamp at;
446 WT w;
447 } ANHE;
448
449 #define ANHE_w(he) (he).w /* access watcher, read-write */
450 #define ANHE_at(he) (he).at /* access cached at, read-only */
451 #define ANHE_at_set(he) (he).at = (he).w->at /* update at from watcher */
452#else
453 typedef WT ANHE;
454
455 #define ANHE_w(he) (he)
456 #define ANHE_at(he) (he)->at
457 #define ANHE_at_set(he)
376#endif 458#endif
377 459
378#if EV_MULTIPLICITY 460#if EV_MULTIPLICITY
379 461
380 struct ev_loop 462 struct ev_loop
451 ts.tv_sec = (time_t)delay; 533 ts.tv_sec = (time_t)delay;
452 ts.tv_nsec = (long)((delay - (ev_tstamp)(ts.tv_sec)) * 1e9); 534 ts.tv_nsec = (long)((delay - (ev_tstamp)(ts.tv_sec)) * 1e9);
453 535
454 nanosleep (&ts, 0); 536 nanosleep (&ts, 0);
455#elif defined(_WIN32) 537#elif defined(_WIN32)
456 Sleep (delay * 1e3); 538 Sleep ((unsigned long)(delay * 1e3));
457#else 539#else
458 struct timeval tv; 540 struct timeval tv;
459 541
460 tv.tv_sec = (time_t)delay; 542 tv.tv_sec = (time_t)delay;
461 tv.tv_usec = (long)((delay - (ev_tstamp)(tv.tv_sec)) * 1e6); 543 tv.tv_usec = (long)((delay - (ev_tstamp)(tv.tv_sec)) * 1e6);
464#endif 546#endif
465 } 547 }
466} 548}
467 549
468/*****************************************************************************/ 550/*****************************************************************************/
551
552#define MALLOC_ROUND 4096 /* prefer to allocate in chunks of this size, must be 2**n and >> 4 longs */
469 553
470int inline_size 554int inline_size
471array_nextsize (int elem, int cur, int cnt) 555array_nextsize (int elem, int cur, int cnt)
472{ 556{
473 int ncur = cur + 1; 557 int ncur = cur + 1;
474 558
475 do 559 do
476 ncur <<= 1; 560 ncur <<= 1;
477 while (cnt > ncur); 561 while (cnt > ncur);
478 562
479 /* if size > 4096, round to 4096 - 4 * longs to accomodate malloc overhead */ 563 /* if size is large, round to MALLOC_ROUND - 4 * longs to accomodate malloc overhead */
480 if (elem * ncur > 4096) 564 if (elem * ncur > MALLOC_ROUND - sizeof (void *) * 4)
481 { 565 {
482 ncur *= elem; 566 ncur *= elem;
483 ncur = (ncur + elem + 4095 + sizeof (void *) * 4) & ~4095; 567 ncur = (ncur + elem + (MALLOC_ROUND - 1) + sizeof (void *) * 4) & ~(MALLOC_ROUND - 1);
484 ncur = ncur - sizeof (void *) * 4; 568 ncur = ncur - sizeof (void *) * 4;
485 ncur /= elem; 569 ncur /= elem;
486 } 570 }
487 571
488 return ncur; 572 return ncur;
702 } 786 }
703} 787}
704 788
705/*****************************************************************************/ 789/*****************************************************************************/
706 790
791/*
792 * the heap functions want a real array index. array index 0 uis guaranteed to not
793 * be in-use at any time. the first heap entry is at array [HEAP0]. DHEAP gives
794 * the branching factor of the d-tree.
795 */
796
797/*
798 * at the moment we allow libev the luxury of two heaps,
799 * a small-code-size 2-heap one and a ~1.5kb larger 4-heap
800 * which is more cache-efficient.
801 * the difference is about 5% with 50000+ watchers.
802 */
803#if EV_USE_4HEAP
804
805#define DHEAP 4
806#define HEAP0 (DHEAP - 1) /* index of first element in heap */
807
808/* towards the root */
707void inline_speed 809void inline_speed
708upheap (WT *heap, int k) 810upheap (ANHE *heap, int k)
709{ 811{
710 WT w = heap [k]; 812 ANHE he = heap [k];
711 813
712 while (k) 814 for (;;)
713 { 815 {
714 int p = (k - 1) >> 1; 816 int p = ((k - HEAP0 - 1) / DHEAP) + HEAP0;
715 817
716 if (heap [p]->at <= w->at) 818 if (p == k || ANHE_at (heap [p]) <= ANHE_at (he))
717 break; 819 break;
718 820
719 heap [k] = heap [p]; 821 heap [k] = heap [p];
720 ((W)heap [k])->active = k + 1; 822 ev_active (ANHE_w (heap [k])) = k;
721 k = p; 823 k = p;
722 } 824 }
723 825
826 ev_active (ANHE_w (he)) = k;
724 heap [k] = w; 827 heap [k] = he;
725 ((W)heap [k])->active = k + 1;
726} 828}
727 829
830/* away from the root */
728void inline_speed 831void inline_speed
729downheap (WT *heap, int N, int k) 832downheap (ANHE *heap, int N, int k)
730{ 833{
731 WT w = heap [k]; 834 ANHE he = heap [k];
835 ANHE *E = heap + N + HEAP0;
732 836
733 for (;;) 837 for (;;)
734 { 838 {
735 int c = (k << 1) + 1; 839 ev_tstamp minat;
840 ANHE *minpos;
841 ANHE *pos = heap + DHEAP * (k - HEAP0) + HEAP0;
736 842
737 if (c >= N) 843 // find minimum child
844 if (expect_true (pos + DHEAP - 1 < E))
845 {
846 /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
847 if ( ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos));
848 if ( ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos));
849 if ( ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos));
850 }
851 else if (pos < E)
852 {
853 /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
854 if (pos + 1 < E && ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos));
855 if (pos + 2 < E && ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos));
856 if (pos + 3 < E && ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos));
857 }
858 else
738 break; 859 break;
739 860
861 if (ANHE_at (he) <= minat)
862 break;
863
864 ev_active (ANHE_w (*minpos)) = k;
865 heap [k] = *minpos;
866
867 k = minpos - heap;
868 }
869
870 ev_active (ANHE_w (he)) = k;
871 heap [k] = he;
872}
873
874#else // 4HEAP
875
876#define HEAP0 1
877
878/* towards the root */
879void inline_speed
880upheap (ANHE *heap, int k)
881{
882 ANHE he = heap [k];
883
884 for (;;)
885 {
886 int p = k >> 1;
887
888 /* maybe we could use a dummy element at heap [0]? */
889 if (!p || ANHE_at (heap [p]) <= ANHE_at (he))
890 break;
891
892 heap [k] = heap [p];
893 ev_active (ANHE_w (heap [k])) = k;
894 k = p;
895 }
896
897 heap [k] = he;
898 ev_active (ANHE_w (heap [k])) = k;
899}
900
901/* away from the root */
902void inline_speed
903downheap (ANHE *heap, int N, int k)
904{
905 ANHE he = heap [k];
906
907 for (;;)
908 {
909 int c = k << 1;
910
911 if (c > N)
912 break;
913
740 c += c + 1 < N && heap [c]->at > heap [c + 1]->at 914 c += c + 1 < N && ANHE_at (heap [c]) > ANHE_at (heap [c + 1])
741 ? 1 : 0; 915 ? 1 : 0;
742 916
743 if (w->at <= heap [c]->at) 917 if (ANHE_at (he) <= ANHE_at (heap [c]))
744 break; 918 break;
745 919
746 heap [k] = heap [c]; 920 heap [k] = heap [c];
747 ((W)heap [k])->active = k + 1; 921 ev_active (ANHE_w (heap [k])) = k;
748 922
749 k = c; 923 k = c;
750 } 924 }
751 925
752 heap [k] = w; 926 heap [k] = he;
753 ((W)heap [k])->active = k + 1; 927 ev_active (ANHE_w (he)) = k;
754} 928}
929#endif
755 930
756void inline_size 931void inline_size
757adjustheap (WT *heap, int N, int k) 932adjustheap (ANHE *heap, int N, int k)
758{ 933{
759 upheap (heap, k); 934 upheap (heap, k);
760 downheap (heap, N, k); 935 downheap (heap, N, k);
761} 936}
762 937
802static void noinline 977static void noinline
803evpipe_init (EV_P) 978evpipe_init (EV_P)
804{ 979{
805 if (!ev_is_active (&pipeev)) 980 if (!ev_is_active (&pipeev))
806 { 981 {
982#if EV_USE_EVENTFD
983 if ((evfd = eventfd (0, 0)) >= 0)
984 {
985 evpipe [0] = -1;
986 fd_intern (evfd);
987 ev_io_set (&pipeev, evfd, EV_READ);
988 }
989 else
990#endif
991 {
807 while (pipe (evpipe)) 992 while (pipe (evpipe))
808 syserr ("(libev) error creating signal/async pipe"); 993 syserr ("(libev) error creating signal/async pipe");
809 994
810 fd_intern (evpipe [0]); 995 fd_intern (evpipe [0]);
811 fd_intern (evpipe [1]); 996 fd_intern (evpipe [1]);
812
813 ev_io_set (&pipeev, evpipe [0], EV_READ); 997 ev_io_set (&pipeev, evpipe [0], EV_READ);
998 }
999
814 ev_io_start (EV_A_ &pipeev); 1000 ev_io_start (EV_A_ &pipeev);
815 ev_unref (EV_A); /* watcher should not keep loop alive */ 1001 ev_unref (EV_A); /* watcher should not keep loop alive */
816 } 1002 }
817} 1003}
818 1004
819void inline_size 1005void inline_size
820evpipe_write (EV_P_ int sig, int async) 1006evpipe_write (EV_P_ EV_ATOMIC_T *flag)
821{ 1007{
822 int sent = gotasync || gotsig; 1008 if (!*flag)
823
824 if (sig) gotsig = 1;
825 if (async) gotasync = 1;
826
827 if (!sent)
828 { 1009 {
829 int old_errno = errno; /* save errno becaue write might clobber it */ 1010 int old_errno = errno; /* save errno because write might clobber it */
1011
1012 *flag = 1;
1013
1014#if EV_USE_EVENTFD
1015 if (evfd >= 0)
1016 {
1017 uint64_t counter = 1;
1018 write (evfd, &counter, sizeof (uint64_t));
1019 }
1020 else
1021#endif
830 write (evpipe [1], &old_errno, 1); 1022 write (evpipe [1], &old_errno, 1);
1023
831 errno = old_errno; 1024 errno = old_errno;
832 } 1025 }
833} 1026}
834 1027
835static void 1028static void
836pipecb (EV_P_ ev_io *iow, int revents) 1029pipecb (EV_P_ ev_io *iow, int revents)
837{ 1030{
1031#if EV_USE_EVENTFD
1032 if (evfd >= 0)
838 { 1033 {
839 int dummy; 1034 uint64_t counter;
1035 read (evfd, &counter, sizeof (uint64_t));
1036 }
1037 else
1038#endif
1039 {
1040 char dummy;
840 read (evpipe [0], &dummy, 1); 1041 read (evpipe [0], &dummy, 1);
841 } 1042 }
842 1043
843 if (gotsig && ev_is_default_loop (EV_A)) 1044 if (gotsig && ev_is_default_loop (EV_A))
844 { 1045 {
845 int signum; 1046 int signum;
846 gotsig = 0; 1047 gotsig = 0;
867} 1068}
868 1069
869/*****************************************************************************/ 1070/*****************************************************************************/
870 1071
871static void 1072static void
872sighandler (int signum) 1073ev_sighandler (int signum)
873{ 1074{
874#if EV_MULTIPLICITY 1075#if EV_MULTIPLICITY
875 struct ev_loop *loop = &default_loop_struct; 1076 struct ev_loop *loop = &default_loop_struct;
876#endif 1077#endif
877 1078
878#if _WIN32 1079#if _WIN32
879 signal (signum, sighandler); 1080 signal (signum, ev_sighandler);
880#endif 1081#endif
881 1082
882 signals [signum - 1].gotsig = 1; 1083 signals [signum - 1].gotsig = 1;
883 evpipe_write (EV_A_ 1, 0); 1084 evpipe_write (EV_A_ &gotsig);
884} 1085}
885 1086
886void noinline 1087void noinline
887ev_feed_signal_event (EV_P_ int signum) 1088ev_feed_signal_event (EV_P_ int signum)
888{ 1089{
914#ifndef WIFCONTINUED 1115#ifndef WIFCONTINUED
915# define WIFCONTINUED(status) 0 1116# define WIFCONTINUED(status) 0
916#endif 1117#endif
917 1118
918void inline_speed 1119void inline_speed
919child_reap (EV_P_ ev_signal *sw, int chain, int pid, int status) 1120child_reap (EV_P_ int chain, int pid, int status)
920{ 1121{
921 ev_child *w; 1122 ev_child *w;
922 int traced = WIFSTOPPED (status) || WIFCONTINUED (status); 1123 int traced = WIFSTOPPED (status) || WIFCONTINUED (status);
923 1124
924 for (w = (ev_child *)childs [chain & (EV_PID_HASHSIZE - 1)]; w; w = (ev_child *)((WL)w)->next) 1125 for (w = (ev_child *)childs [chain & (EV_PID_HASHSIZE - 1)]; w; w = (ev_child *)((WL)w)->next)
925 { 1126 {
926 if ((w->pid == pid || !w->pid) 1127 if ((w->pid == pid || !w->pid)
927 && (!traced || (w->flags & 1))) 1128 && (!traced || (w->flags & 1)))
928 { 1129 {
929 ev_set_priority (w, ev_priority (sw)); /* need to do it *now* */ 1130 ev_set_priority (w, EV_MAXPRI); /* need to do it *now*, this *must* be the same prio as the signal watcher itself */
930 w->rpid = pid; 1131 w->rpid = pid;
931 w->rstatus = status; 1132 w->rstatus = status;
932 ev_feed_event (EV_A_ (W)w, EV_CHILD); 1133 ev_feed_event (EV_A_ (W)w, EV_CHILD);
933 } 1134 }
934 } 1135 }
948 if (!WCONTINUED 1149 if (!WCONTINUED
949 || errno != EINVAL 1150 || errno != EINVAL
950 || 0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED))) 1151 || 0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED)))
951 return; 1152 return;
952 1153
953 /* make sure we are called again until all childs have been reaped */ 1154 /* make sure we are called again until all children have been reaped */
954 /* we need to do it this way so that the callback gets called before we continue */ 1155 /* we need to do it this way so that the callback gets called before we continue */
955 ev_feed_event (EV_A_ (W)sw, EV_SIGNAL); 1156 ev_feed_event (EV_A_ (W)sw, EV_SIGNAL);
956 1157
957 child_reap (EV_A_ sw, pid, pid, status); 1158 child_reap (EV_A_ pid, pid, status);
958 if (EV_PID_HASHSIZE > 1) 1159 if (EV_PID_HASHSIZE > 1)
959 child_reap (EV_A_ sw, 0, pid, status); /* this might trigger a watcher twice, but feed_event catches that */ 1160 child_reap (EV_A_ 0, pid, status); /* this might trigger a watcher twice, but feed_event catches that */
960} 1161}
961 1162
962#endif 1163#endif
963 1164
964/*****************************************************************************/ 1165/*****************************************************************************/
1107 if (!(flags & EVFLAG_NOENV) 1308 if (!(flags & EVFLAG_NOENV)
1108 && !enable_secure () 1309 && !enable_secure ()
1109 && getenv ("LIBEV_FLAGS")) 1310 && getenv ("LIBEV_FLAGS"))
1110 flags = atoi (getenv ("LIBEV_FLAGS")); 1311 flags = atoi (getenv ("LIBEV_FLAGS"));
1111 1312
1112 if (!(flags & 0x0000ffffUL)) 1313 if (!(flags & 0x0000ffffU))
1113 flags |= ev_recommended_backends (); 1314 flags |= ev_recommended_backends ();
1114 1315
1115#if EV_USE_PORT 1316#if EV_USE_PORT
1116 if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags); 1317 if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags);
1117#endif 1318#endif
1141 if (ev_is_active (&pipeev)) 1342 if (ev_is_active (&pipeev))
1142 { 1343 {
1143 ev_ref (EV_A); /* signal watcher */ 1344 ev_ref (EV_A); /* signal watcher */
1144 ev_io_stop (EV_A_ &pipeev); 1345 ev_io_stop (EV_A_ &pipeev);
1145 1346
1146 close (evpipe [0]); evpipe [0] = 0; 1347#if EV_USE_EVENTFD
1147 close (evpipe [1]); evpipe [1] = 0; 1348 if (evfd >= 0)
1349 close (evfd);
1350#endif
1351
1352 if (evpipe [0] >= 0)
1353 {
1354 close (evpipe [0]);
1355 close (evpipe [1]);
1356 }
1148 } 1357 }
1149 1358
1150#if EV_USE_INOTIFY 1359#if EV_USE_INOTIFY
1151 if (fs_fd >= 0) 1360 if (fs_fd >= 0)
1152 close (fs_fd); 1361 close (fs_fd);
1197#endif 1406#endif
1198 1407
1199 backend = 0; 1408 backend = 0;
1200} 1409}
1201 1410
1411#if EV_USE_INOTIFY
1202void inline_size infy_fork (EV_P); 1412void inline_size infy_fork (EV_P);
1413#endif
1203 1414
1204void inline_size 1415void inline_size
1205loop_fork (EV_P) 1416loop_fork (EV_P)
1206{ 1417{
1207#if EV_USE_PORT 1418#if EV_USE_PORT
1226 gotasync = 1; 1437 gotasync = 1;
1227#endif 1438#endif
1228 1439
1229 ev_ref (EV_A); 1440 ev_ref (EV_A);
1230 ev_io_stop (EV_A_ &pipeev); 1441 ev_io_stop (EV_A_ &pipeev);
1442
1443#if EV_USE_EVENTFD
1444 if (evfd >= 0)
1445 close (evfd);
1446#endif
1447
1448 if (evpipe [0] >= 0)
1449 {
1231 close (evpipe [0]); 1450 close (evpipe [0]);
1232 close (evpipe [1]); 1451 close (evpipe [1]);
1452 }
1233 1453
1234 evpipe_init (EV_A); 1454 evpipe_init (EV_A);
1235 /* now iterate over everything, in case we missed something */ 1455 /* now iterate over everything, in case we missed something */
1236 pipecb (EV_A_ &pipeev, EV_READ); 1456 pipecb (EV_A_ &pipeev, EV_READ);
1237 } 1457 }
1265void 1485void
1266ev_loop_fork (EV_P) 1486ev_loop_fork (EV_P)
1267{ 1487{
1268 postfork = 1; /* must be in line with ev_default_fork */ 1488 postfork = 1; /* must be in line with ev_default_fork */
1269} 1489}
1270
1271#endif 1490#endif
1272 1491
1273#if EV_MULTIPLICITY 1492#if EV_MULTIPLICITY
1274struct ev_loop * 1493struct ev_loop *
1275ev_default_loop_init (unsigned int flags) 1494ev_default_loop_init (unsigned int flags)
1356 EV_CB_INVOKE (p->w, p->events); 1575 EV_CB_INVOKE (p->w, p->events);
1357 } 1576 }
1358 } 1577 }
1359} 1578}
1360 1579
1361void inline_size
1362timers_reify (EV_P)
1363{
1364 while (timercnt && ((WT)timers [0])->at <= mn_now)
1365 {
1366 ev_timer *w = (ev_timer *)timers [0];
1367
1368 /*assert (("inactive timer on timer heap detected", ev_is_active (w)));*/
1369
1370 /* first reschedule or stop timer */
1371 if (w->repeat)
1372 {
1373 assert (("negative ev_timer repeat value found while processing timers", w->repeat > 0.));
1374
1375 ((WT)w)->at += w->repeat;
1376 if (((WT)w)->at < mn_now)
1377 ((WT)w)->at = mn_now;
1378
1379 downheap (timers, timercnt, 0);
1380 }
1381 else
1382 ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */
1383
1384 ev_feed_event (EV_A_ (W)w, EV_TIMEOUT);
1385 }
1386}
1387
1388#if EV_PERIODIC_ENABLE
1389void inline_size
1390periodics_reify (EV_P)
1391{
1392 while (periodiccnt && ((WT)periodics [0])->at <= ev_rt_now)
1393 {
1394 ev_periodic *w = (ev_periodic *)periodics [0];
1395
1396 /*assert (("inactive timer on periodic heap detected", ev_is_active (w)));*/
1397
1398 /* first reschedule or stop timer */
1399 if (w->reschedule_cb)
1400 {
1401 ((WT)w)->at = w->reschedule_cb (w, ev_rt_now + TIME_EPSILON);
1402 assert (("ev_periodic reschedule callback returned time in the past", ((WT)w)->at > ev_rt_now));
1403 downheap (periodics, periodiccnt, 0);
1404 }
1405 else if (w->interval)
1406 {
1407 ((WT)w)->at = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
1408 if (((WT)w)->at - ev_rt_now <= TIME_EPSILON) ((WT)w)->at += w->interval;
1409 assert (("ev_periodic timeout in the past detected while processing timers, negative interval?", ((WT)w)->at > ev_rt_now));
1410 downheap (periodics, periodiccnt, 0);
1411 }
1412 else
1413 ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */
1414
1415 ev_feed_event (EV_A_ (W)w, EV_PERIODIC);
1416 }
1417}
1418
1419static void noinline
1420periodics_reschedule (EV_P)
1421{
1422 int i;
1423
1424 /* adjust periodics after time jump */
1425 for (i = 0; i < periodiccnt; ++i)
1426 {
1427 ev_periodic *w = (ev_periodic *)periodics [i];
1428
1429 if (w->reschedule_cb)
1430 ((WT)w)->at = w->reschedule_cb (w, ev_rt_now);
1431 else if (w->interval)
1432 ((WT)w)->at = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
1433 }
1434
1435 /* now rebuild the heap */
1436 for (i = periodiccnt >> 1; i--; )
1437 downheap (periodics, periodiccnt, i);
1438}
1439#endif
1440
1441#if EV_IDLE_ENABLE 1580#if EV_IDLE_ENABLE
1442void inline_size 1581void inline_size
1443idle_reify (EV_P) 1582idle_reify (EV_P)
1444{ 1583{
1445 if (expect_false (idleall)) 1584 if (expect_false (idleall))
1456 queue_events (EV_A_ (W *)idles [pri], idlecnt [pri], EV_IDLE); 1595 queue_events (EV_A_ (W *)idles [pri], idlecnt [pri], EV_IDLE);
1457 break; 1596 break;
1458 } 1597 }
1459 } 1598 }
1460 } 1599 }
1600}
1601#endif
1602
1603void inline_size
1604timers_reify (EV_P)
1605{
1606 while (timercnt && ANHE_at (timers [HEAP0]) < mn_now)
1607 {
1608 ev_timer *w = (ev_timer *)ANHE_w (timers [HEAP0]);
1609
1610 /*assert (("inactive timer on timer heap detected", ev_is_active (w)));*/
1611
1612 /* first reschedule or stop timer */
1613 if (w->repeat)
1614 {
1615 ev_at (w) += w->repeat;
1616 if (ev_at (w) < mn_now)
1617 ev_at (w) = mn_now;
1618
1619 assert (("negative ev_timer repeat value found while processing timers", w->repeat > 0.));
1620
1621 ANHE_at_set (timers [HEAP0]);
1622 downheap (timers, timercnt, HEAP0);
1623 }
1624 else
1625 ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */
1626
1627 ev_feed_event (EV_A_ (W)w, EV_TIMEOUT);
1628 }
1629}
1630
1631#if EV_PERIODIC_ENABLE
1632void inline_size
1633periodics_reify (EV_P)
1634{
1635 while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now)
1636 {
1637 ev_periodic *w = (ev_periodic *)ANHE_w (periodics [HEAP0]);
1638
1639 /*assert (("inactive timer on periodic heap detected", ev_is_active (w)));*/
1640
1641 /* first reschedule or stop timer */
1642 if (w->reschedule_cb)
1643 {
1644 ev_at (w) = w->reschedule_cb (w, ev_rt_now);
1645
1646 assert (("ev_periodic reschedule callback returned time in the past", ev_at (w) >= ev_rt_now));
1647
1648 ANHE_at_set (periodics [HEAP0]);
1649 downheap (periodics, periodiccnt, HEAP0);
1650 }
1651 else if (w->interval)
1652 {
1653 ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
1654 if (ev_at (w) - ev_rt_now <= TIME_EPSILON) ev_at (w) += w->interval;
1655
1656 assert (("ev_periodic timeout in the past detected while processing timers, negative interval?", ev_at (w) >= ev_rt_now));
1657
1658 ANHE_at_set (periodics [HEAP0]);
1659 downheap (periodics, periodiccnt, HEAP0);
1660 }
1661 else
1662 ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */
1663
1664 ev_feed_event (EV_A_ (W)w, EV_PERIODIC);
1665 }
1666}
1667
1668static void noinline
1669periodics_reschedule (EV_P)
1670{
1671 int i;
1672
1673 /* adjust periodics after time jump */
1674 for (i = HEAP0; i < periodiccnt + HEAP0; ++i)
1675 {
1676 ev_periodic *w = (ev_periodic *)ANHE_w (periodics [i]);
1677
1678 if (w->reschedule_cb)
1679 ev_at (w) = w->reschedule_cb (w, ev_rt_now);
1680 else if (w->interval)
1681 ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
1682
1683 ANHE_at_set (periodics [i]);
1684 }
1685
1686 /* we don't use floyds algorithm, uphead is simpler and is more cache-efficient */
1687 /* also, this is easy and corretc for both 2-heaps and 4-heaps */
1688 for (i = 0; i < periodiccnt; ++i)
1689 upheap (periodics, i + HEAP0);
1461} 1690}
1462#endif 1691#endif
1463 1692
1464void inline_speed 1693void inline_speed
1465time_update (EV_P_ ev_tstamp max_block) 1694time_update (EV_P_ ev_tstamp max_block)
1494 */ 1723 */
1495 for (i = 4; --i; ) 1724 for (i = 4; --i; )
1496 { 1725 {
1497 rtmn_diff = ev_rt_now - mn_now; 1726 rtmn_diff = ev_rt_now - mn_now;
1498 1727
1499 if (fabs (odiff - rtmn_diff) < MIN_TIMEJUMP) 1728 if (expect_true (fabs (odiff - rtmn_diff) < MIN_TIMEJUMP))
1500 return; /* all is well */ 1729 return; /* all is well */
1501 1730
1502 ev_rt_now = ev_time (); 1731 ev_rt_now = ev_time ();
1503 mn_now = get_clock (); 1732 mn_now = get_clock ();
1504 now_floor = mn_now; 1733 now_floor = mn_now;
1520#if EV_PERIODIC_ENABLE 1749#if EV_PERIODIC_ENABLE
1521 periodics_reschedule (EV_A); 1750 periodics_reschedule (EV_A);
1522#endif 1751#endif
1523 /* adjust timers. this is easy, as the offset is the same for all of them */ 1752 /* adjust timers. this is easy, as the offset is the same for all of them */
1524 for (i = 0; i < timercnt; ++i) 1753 for (i = 0; i < timercnt; ++i)
1754 {
1755 ANHE *he = timers + i + HEAP0;
1525 ((WT)timers [i])->at += ev_rt_now - mn_now; 1756 ANHE_w (*he)->at += ev_rt_now - mn_now;
1757 ANHE_at_set (*he);
1758 }
1526 } 1759 }
1527 1760
1528 mn_now = ev_rt_now; 1761 mn_now = ev_rt_now;
1529 } 1762 }
1530} 1763}
1544static int loop_done; 1777static int loop_done;
1545 1778
1546void 1779void
1547ev_loop (EV_P_ int flags) 1780ev_loop (EV_P_ int flags)
1548{ 1781{
1549 loop_done = flags & (EVLOOP_ONESHOT | EVLOOP_NONBLOCK) 1782 loop_done = EVUNLOOP_CANCEL;
1550 ? EVUNLOOP_ONE
1551 : EVUNLOOP_CANCEL;
1552 1783
1553 call_pending (EV_A); /* in case we recurse, ensure ordering stays nice and clean */ 1784 call_pending (EV_A); /* in case we recurse, ensure ordering stays nice and clean */
1554 1785
1555 do 1786 do
1556 { 1787 {
1602 1833
1603 waittime = MAX_BLOCKTIME; 1834 waittime = MAX_BLOCKTIME;
1604 1835
1605 if (timercnt) 1836 if (timercnt)
1606 { 1837 {
1607 ev_tstamp to = ((WT)timers [0])->at - mn_now + backend_fudge; 1838 ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now + backend_fudge;
1608 if (waittime > to) waittime = to; 1839 if (waittime > to) waittime = to;
1609 } 1840 }
1610 1841
1611#if EV_PERIODIC_ENABLE 1842#if EV_PERIODIC_ENABLE
1612 if (periodiccnt) 1843 if (periodiccnt)
1613 { 1844 {
1614 ev_tstamp to = ((WT)periodics [0])->at - ev_rt_now + backend_fudge; 1845 ev_tstamp to = ANHE_at (periodics [HEAP0]) - ev_rt_now + backend_fudge;
1615 if (waittime > to) waittime = to; 1846 if (waittime > to) waittime = to;
1616 } 1847 }
1617#endif 1848#endif
1618 1849
1619 if (expect_false (waittime < timeout_blocktime)) 1850 if (expect_false (waittime < timeout_blocktime))
1652 /* queue check watchers, to be executed first */ 1883 /* queue check watchers, to be executed first */
1653 if (expect_false (checkcnt)) 1884 if (expect_false (checkcnt))
1654 queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK); 1885 queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK);
1655 1886
1656 call_pending (EV_A); 1887 call_pending (EV_A);
1657
1658 } 1888 }
1659 while (expect_true (activecnt && !loop_done)); 1889 while (expect_true (
1890 activecnt
1891 && !loop_done
1892 && !(flags & (EVLOOP_ONESHOT | EVLOOP_NONBLOCK))
1893 ));
1660 1894
1661 if (loop_done == EVUNLOOP_ONE) 1895 if (loop_done == EVUNLOOP_ONE)
1662 loop_done = EVUNLOOP_CANCEL; 1896 loop_done = EVUNLOOP_CANCEL;
1663} 1897}
1664 1898
1768{ 2002{
1769 clear_pending (EV_A_ (W)w); 2003 clear_pending (EV_A_ (W)w);
1770 if (expect_false (!ev_is_active (w))) 2004 if (expect_false (!ev_is_active (w)))
1771 return; 2005 return;
1772 2006
1773 assert (("ev_io_start called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax)); 2007 assert (("ev_io_stop called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax));
1774 2008
1775 wlist_del (&anfds[w->fd].head, (WL)w); 2009 wlist_del (&anfds[w->fd].head, (WL)w);
1776 ev_stop (EV_A_ (W)w); 2010 ev_stop (EV_A_ (W)w);
1777 2011
1778 fd_change (EV_A_ w->fd, 1); 2012 fd_change (EV_A_ w->fd, 1);
1782ev_timer_start (EV_P_ ev_timer *w) 2016ev_timer_start (EV_P_ ev_timer *w)
1783{ 2017{
1784 if (expect_false (ev_is_active (w))) 2018 if (expect_false (ev_is_active (w)))
1785 return; 2019 return;
1786 2020
1787 ((WT)w)->at += mn_now; 2021 ev_at (w) += mn_now;
1788 2022
1789 assert (("ev_timer_start called with negative timer repeat value", w->repeat >= 0.)); 2023 assert (("ev_timer_start called with negative timer repeat value", w->repeat >= 0.));
1790 2024
1791 ev_start (EV_A_ (W)w, ++timercnt); 2025 ev_start (EV_A_ (W)w, ++timercnt + HEAP0 - 1);
1792 array_needsize (WT, timers, timermax, timercnt, EMPTY2); 2026 array_needsize (ANHE, timers, timermax, ev_active (w) + 1, EMPTY2);
1793 timers [timercnt - 1] = (WT)w; 2027 ANHE_w (timers [ev_active (w)]) = (WT)w;
1794 upheap (timers, timercnt - 1); 2028 ANHE_at_set (timers [ev_active (w)]);
2029 upheap (timers, ev_active (w));
1795 2030
1796 /*assert (("internal timer heap corruption", timers [((W)w)->active - 1] == w));*/ 2031 /*assert (("internal timer heap corruption", timers [ev_active (w)] == (WT)w));*/
1797} 2032}
1798 2033
1799void noinline 2034void noinline
1800ev_timer_stop (EV_P_ ev_timer *w) 2035ev_timer_stop (EV_P_ ev_timer *w)
1801{ 2036{
1802 clear_pending (EV_A_ (W)w); 2037 clear_pending (EV_A_ (W)w);
1803 if (expect_false (!ev_is_active (w))) 2038 if (expect_false (!ev_is_active (w)))
1804 return; 2039 return;
1805 2040
1806 assert (("internal timer heap corruption", timers [((W)w)->active - 1] == (WT)w));
1807
1808 { 2041 {
1809 int active = ((W)w)->active; 2042 int active = ev_active (w);
1810 2043
2044 assert (("internal timer heap corruption", ANHE_w (timers [active]) == (WT)w));
2045
1811 if (expect_true (--active < --timercnt)) 2046 if (expect_true (active < timercnt + HEAP0 - 1))
1812 { 2047 {
1813 timers [active] = timers [timercnt]; 2048 timers [active] = timers [timercnt + HEAP0 - 1];
1814 adjustheap (timers, timercnt, active); 2049 adjustheap (timers, timercnt, active);
1815 } 2050 }
2051
2052 --timercnt;
1816 } 2053 }
1817 2054
1818 ((WT)w)->at -= mn_now; 2055 ev_at (w) -= mn_now;
1819 2056
1820 ev_stop (EV_A_ (W)w); 2057 ev_stop (EV_A_ (W)w);
1821} 2058}
1822 2059
1823void noinline 2060void noinline
1825{ 2062{
1826 if (ev_is_active (w)) 2063 if (ev_is_active (w))
1827 { 2064 {
1828 if (w->repeat) 2065 if (w->repeat)
1829 { 2066 {
1830 ((WT)w)->at = mn_now + w->repeat; 2067 ev_at (w) = mn_now + w->repeat;
2068 ANHE_at_set (timers [ev_active (w)]);
1831 adjustheap (timers, timercnt, ((W)w)->active - 1); 2069 adjustheap (timers, timercnt, ev_active (w));
1832 } 2070 }
1833 else 2071 else
1834 ev_timer_stop (EV_A_ w); 2072 ev_timer_stop (EV_A_ w);
1835 } 2073 }
1836 else if (w->repeat) 2074 else if (w->repeat)
1837 { 2075 {
1838 w->at = w->repeat; 2076 ev_at (w) = w->repeat;
1839 ev_timer_start (EV_A_ w); 2077 ev_timer_start (EV_A_ w);
1840 } 2078 }
1841} 2079}
1842 2080
1843#if EV_PERIODIC_ENABLE 2081#if EV_PERIODIC_ENABLE
1846{ 2084{
1847 if (expect_false (ev_is_active (w))) 2085 if (expect_false (ev_is_active (w)))
1848 return; 2086 return;
1849 2087
1850 if (w->reschedule_cb) 2088 if (w->reschedule_cb)
1851 ((WT)w)->at = w->reschedule_cb (w, ev_rt_now); 2089 ev_at (w) = w->reschedule_cb (w, ev_rt_now);
1852 else if (w->interval) 2090 else if (w->interval)
1853 { 2091 {
1854 assert (("ev_periodic_start called with negative interval value", w->interval >= 0.)); 2092 assert (("ev_periodic_start called with negative interval value", w->interval >= 0.));
1855 /* this formula differs from the one in periodic_reify because we do not always round up */ 2093 /* this formula differs from the one in periodic_reify because we do not always round up */
1856 ((WT)w)->at = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval; 2094 ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
1857 } 2095 }
1858 else 2096 else
1859 ((WT)w)->at = w->offset; 2097 ev_at (w) = w->offset;
1860 2098
1861 ev_start (EV_A_ (W)w, ++periodiccnt); 2099 ev_start (EV_A_ (W)w, ++periodiccnt + HEAP0 - 1);
1862 array_needsize (WT, periodics, periodicmax, periodiccnt, EMPTY2); 2100 array_needsize (ANHE, periodics, periodicmax, ev_active (w) + 1, EMPTY2);
1863 periodics [periodiccnt - 1] = (WT)w; 2101 ANHE_w (periodics [ev_active (w)]) = (WT)w;
1864 upheap (periodics, periodiccnt - 1); 2102 ANHE_at_set (periodics [ev_active (w)]);
2103 upheap (periodics, ev_active (w));
1865 2104
1866 /*assert (("internal periodic heap corruption", periodics [((W)w)->active - 1] == w));*/ 2105 /*assert (("internal periodic heap corruption", ANHE_w (periodics [ev_active (w)]) == (WT)w));*/
1867} 2106}
1868 2107
1869void noinline 2108void noinline
1870ev_periodic_stop (EV_P_ ev_periodic *w) 2109ev_periodic_stop (EV_P_ ev_periodic *w)
1871{ 2110{
1872 clear_pending (EV_A_ (W)w); 2111 clear_pending (EV_A_ (W)w);
1873 if (expect_false (!ev_is_active (w))) 2112 if (expect_false (!ev_is_active (w)))
1874 return; 2113 return;
1875 2114
1876 assert (("internal periodic heap corruption", periodics [((W)w)->active - 1] == (WT)w));
1877
1878 { 2115 {
1879 int active = ((W)w)->active; 2116 int active = ev_active (w);
1880 2117
2118 assert (("internal periodic heap corruption", ANHE_w (periodics [active]) == (WT)w));
2119
1881 if (expect_true (--active < --periodiccnt)) 2120 if (expect_true (active < periodiccnt + HEAP0 - 1))
1882 { 2121 {
1883 periodics [active] = periodics [periodiccnt]; 2122 periodics [active] = periodics [periodiccnt + HEAP0 - 1];
1884 adjustheap (periodics, periodiccnt, active); 2123 adjustheap (periodics, periodiccnt, active);
1885 } 2124 }
2125
2126 --periodiccnt;
1886 } 2127 }
1887 2128
1888 ev_stop (EV_A_ (W)w); 2129 ev_stop (EV_A_ (W)w);
1889} 2130}
1890 2131
1932 wlist_add (&signals [w->signum - 1].head, (WL)w); 2173 wlist_add (&signals [w->signum - 1].head, (WL)w);
1933 2174
1934 if (!((WL)w)->next) 2175 if (!((WL)w)->next)
1935 { 2176 {
1936#if _WIN32 2177#if _WIN32
1937 signal (w->signum, sighandler); 2178 signal (w->signum, ev_sighandler);
1938#else 2179#else
1939 struct sigaction sa; 2180 struct sigaction sa;
1940 sa.sa_handler = sighandler; 2181 sa.sa_handler = ev_sighandler;
1941 sigfillset (&sa.sa_mask); 2182 sigfillset (&sa.sa_mask);
1942 sa.sa_flags = SA_RESTART; /* if restarting works we save one iteration */ 2183 sa.sa_flags = SA_RESTART; /* if restarting works we save one iteration */
1943 sigaction (w->signum, &sa, 0); 2184 sigaction (w->signum, &sa, 0);
1944#endif 2185#endif
1945 } 2186 }
2006 if (w->wd < 0) 2247 if (w->wd < 0)
2007 { 2248 {
2008 ev_timer_start (EV_A_ &w->timer); /* this is not race-free, so we still need to recheck periodically */ 2249 ev_timer_start (EV_A_ &w->timer); /* this is not race-free, so we still need to recheck periodically */
2009 2250
2010 /* monitor some parent directory for speedup hints */ 2251 /* monitor some parent directory for speedup hints */
2252 /* note that exceeding the hardcoded limit is not a correctness issue, */
2253 /* but an efficiency issue only */
2011 if ((errno == ENOENT || errno == EACCES) && strlen (w->path) < 4096) 2254 if ((errno == ENOENT || errno == EACCES) && strlen (w->path) < 4096)
2012 { 2255 {
2013 char path [4096]; 2256 char path [4096];
2014 strcpy (path, w->path); 2257 strcpy (path, w->path);
2015 2258
2260 clear_pending (EV_A_ (W)w); 2503 clear_pending (EV_A_ (W)w);
2261 if (expect_false (!ev_is_active (w))) 2504 if (expect_false (!ev_is_active (w)))
2262 return; 2505 return;
2263 2506
2264 { 2507 {
2265 int active = ((W)w)->active; 2508 int active = ev_active (w);
2266 2509
2267 idles [ABSPRI (w)][active - 1] = idles [ABSPRI (w)][--idlecnt [ABSPRI (w)]]; 2510 idles [ABSPRI (w)][active - 1] = idles [ABSPRI (w)][--idlecnt [ABSPRI (w)]];
2268 ((W)idles [ABSPRI (w)][active - 1])->active = active; 2511 ev_active (idles [ABSPRI (w)][active - 1]) = active;
2269 2512
2270 ev_stop (EV_A_ (W)w); 2513 ev_stop (EV_A_ (W)w);
2271 --idleall; 2514 --idleall;
2272 } 2515 }
2273} 2516}
2290 clear_pending (EV_A_ (W)w); 2533 clear_pending (EV_A_ (W)w);
2291 if (expect_false (!ev_is_active (w))) 2534 if (expect_false (!ev_is_active (w)))
2292 return; 2535 return;
2293 2536
2294 { 2537 {
2295 int active = ((W)w)->active; 2538 int active = ev_active (w);
2539
2296 prepares [active - 1] = prepares [--preparecnt]; 2540 prepares [active - 1] = prepares [--preparecnt];
2297 ((W)prepares [active - 1])->active = active; 2541 ev_active (prepares [active - 1]) = active;
2298 } 2542 }
2299 2543
2300 ev_stop (EV_A_ (W)w); 2544 ev_stop (EV_A_ (W)w);
2301} 2545}
2302 2546
2317 clear_pending (EV_A_ (W)w); 2561 clear_pending (EV_A_ (W)w);
2318 if (expect_false (!ev_is_active (w))) 2562 if (expect_false (!ev_is_active (w)))
2319 return; 2563 return;
2320 2564
2321 { 2565 {
2322 int active = ((W)w)->active; 2566 int active = ev_active (w);
2567
2323 checks [active - 1] = checks [--checkcnt]; 2568 checks [active - 1] = checks [--checkcnt];
2324 ((W)checks [active - 1])->active = active; 2569 ev_active (checks [active - 1]) = active;
2325 } 2570 }
2326 2571
2327 ev_stop (EV_A_ (W)w); 2572 ev_stop (EV_A_ (W)w);
2328} 2573}
2329 2574
2425 clear_pending (EV_A_ (W)w); 2670 clear_pending (EV_A_ (W)w);
2426 if (expect_false (!ev_is_active (w))) 2671 if (expect_false (!ev_is_active (w)))
2427 return; 2672 return;
2428 2673
2429 { 2674 {
2430 int active = ((W)w)->active; 2675 int active = ev_active (w);
2676
2431 forks [active - 1] = forks [--forkcnt]; 2677 forks [active - 1] = forks [--forkcnt];
2432 ((W)forks [active - 1])->active = active; 2678 ev_active (forks [active - 1]) = active;
2433 } 2679 }
2434 2680
2435 ev_stop (EV_A_ (W)w); 2681 ev_stop (EV_A_ (W)w);
2436} 2682}
2437#endif 2683#endif
2456 clear_pending (EV_A_ (W)w); 2702 clear_pending (EV_A_ (W)w);
2457 if (expect_false (!ev_is_active (w))) 2703 if (expect_false (!ev_is_active (w)))
2458 return; 2704 return;
2459 2705
2460 { 2706 {
2461 int active = ((W)w)->active; 2707 int active = ev_active (w);
2708
2462 asyncs [active - 1] = asyncs [--asynccnt]; 2709 asyncs [active - 1] = asyncs [--asynccnt];
2463 ((W)asyncs [active - 1])->active = active; 2710 ev_active (asyncs [active - 1]) = active;
2464 } 2711 }
2465 2712
2466 ev_stop (EV_A_ (W)w); 2713 ev_stop (EV_A_ (W)w);
2467} 2714}
2468 2715
2469void 2716void
2470ev_async_send (EV_P_ ev_async *w) 2717ev_async_send (EV_P_ ev_async *w)
2471{ 2718{
2472 w->sent = 1; 2719 w->sent = 1;
2473 evpipe_write (EV_A_ 0, 1); 2720 evpipe_write (EV_A_ &gotasync);
2474} 2721}
2475#endif 2722#endif
2476 2723
2477/*****************************************************************************/ 2724/*****************************************************************************/
2478 2725

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines