… | |
… | |
126 | #define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */ |
126 | #define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */ |
127 | #define MAX_BLOCKTIME 59.731 /* never wait longer than this time (to detect time jumps) */ |
127 | #define MAX_BLOCKTIME 59.731 /* never wait longer than this time (to detect time jumps) */ |
128 | #define PID_HASHSIZE 16 /* size of pid hash table, must be power of two */ |
128 | #define PID_HASHSIZE 16 /* size of pid hash table, must be power of two */ |
129 | /*#define CLEANUP_INTERVAL 300. /* how often to try to free memory and re-check fds */ |
129 | /*#define CLEANUP_INTERVAL 300. /* how often to try to free memory and re-check fds */ |
130 | |
130 | |
|
|
131 | #ifdef EV_H |
|
|
132 | # include EV_H |
|
|
133 | #else |
131 | #include "ev.h" |
134 | # include "ev.h" |
|
|
135 | #endif |
132 | |
136 | |
133 | #if __GNUC__ >= 3 |
137 | #if __GNUC__ >= 3 |
134 | # define expect(expr,value) __builtin_expect ((expr),(value)) |
138 | # define expect(expr,value) __builtin_expect ((expr),(value)) |
135 | # define inline inline |
139 | # define inline inline |
136 | #else |
140 | #else |
… | |
… | |
215 | int events; |
219 | int events; |
216 | } ANPENDING; |
220 | } ANPENDING; |
217 | |
221 | |
218 | #if EV_MULTIPLICITY |
222 | #if EV_MULTIPLICITY |
219 | |
223 | |
220 | struct ev_loop |
224 | struct ev_loop |
221 | { |
225 | { |
222 | # define VAR(name,decl) decl; |
226 | #define VAR(name,decl) decl; |
223 | # include "ev_vars.h" |
227 | #include "ev_vars.h" |
224 | }; |
|
|
225 | # undef VAR |
228 | #undef VAR |
|
|
229 | }; |
226 | # include "ev_wrap.h" |
230 | #include "ev_wrap.h" |
|
|
231 | |
|
|
232 | struct ev_loop default_loop_struct; |
|
|
233 | static struct ev_loop *default_loop; |
227 | |
234 | |
228 | #else |
235 | #else |
229 | |
236 | |
230 | # define VAR(name,decl) static decl; |
237 | #define VAR(name,decl) static decl; |
231 | # include "ev_vars.h" |
238 | #include "ev_vars.h" |
232 | # undef VAR |
239 | #undef VAR |
|
|
240 | |
|
|
241 | static int default_loop; |
233 | |
242 | |
234 | #endif |
243 | #endif |
235 | |
244 | |
236 | /*****************************************************************************/ |
245 | /*****************************************************************************/ |
237 | |
246 | |
… | |
… | |
268 | ev_now (EV_P) |
277 | ev_now (EV_P) |
269 | { |
278 | { |
270 | return rt_now; |
279 | return rt_now; |
271 | } |
280 | } |
272 | |
281 | |
273 | #define array_roundsize(base,n) ((n) | 4 & ~3) |
282 | #define array_roundsize(type,n) ((n) | 4 & ~3) |
274 | |
283 | |
275 | #define array_needsize(base,cur,cnt,init) \ |
284 | #define array_needsize(type,base,cur,cnt,init) \ |
276 | if (expect_false ((cnt) > cur)) \ |
285 | if (expect_false ((cnt) > cur)) \ |
277 | { \ |
286 | { \ |
278 | int newcnt = cur; \ |
287 | int newcnt = cur; \ |
279 | do \ |
288 | do \ |
280 | { \ |
289 | { \ |
281 | newcnt = array_roundsize (base, newcnt << 1); \ |
290 | newcnt = array_roundsize (type, newcnt << 1); \ |
282 | } \ |
291 | } \ |
283 | while ((cnt) > newcnt); \ |
292 | while ((cnt) > newcnt); \ |
284 | \ |
293 | \ |
285 | base = ev_realloc (base, sizeof (*base) * (newcnt)); \ |
294 | base = (type *)ev_realloc (base, sizeof (type) * (newcnt));\ |
286 | init (base + cur, newcnt - cur); \ |
295 | init (base + cur, newcnt - cur); \ |
287 | cur = newcnt; \ |
296 | cur = newcnt; \ |
288 | } |
297 | } |
289 | |
298 | |
290 | #define array_slim(stem) \ |
299 | #define array_slim(type,stem) \ |
291 | if (stem ## max < array_roundsize (stem ## cnt >> 2)) \ |
300 | if (stem ## max < array_roundsize (stem ## cnt >> 2)) \ |
292 | { \ |
301 | { \ |
293 | stem ## max = array_roundsize (stem ## cnt >> 1); \ |
302 | stem ## max = array_roundsize (stem ## cnt >> 1); \ |
294 | base = ev_realloc (base, sizeof (*base) * (stem ## max)); \ |
303 | base = (type *)ev_realloc (base, sizeof (type) * (stem ## max));\ |
295 | fprintf (stderr, "slimmed down " # stem " to %d\n", stem ## max);/*D*/\ |
304 | fprintf (stderr, "slimmed down " # stem " to %d\n", stem ## max);/*D*/\ |
296 | } |
305 | } |
297 | |
306 | |
298 | /* microsoft's pseudo-c is quite far from C as the rest of the world and the standard knows it */ |
307 | /* microsoft's pseudo-c is quite far from C as the rest of the world and the standard knows it */ |
299 | /* bringing us everlasting joy in form of stupid extra macros that are not required in C */ |
308 | /* bringing us everlasting joy in form of stupid extra macros that are not required in C */ |
… | |
… | |
316 | |
325 | |
317 | ++base; |
326 | ++base; |
318 | } |
327 | } |
319 | } |
328 | } |
320 | |
329 | |
321 | static void |
330 | void |
322 | event (EV_P_ W w, int events) |
331 | ev_feed_event (EV_P_ void *w, int revents) |
323 | { |
332 | { |
|
|
333 | W w_ = (W)w; |
|
|
334 | |
324 | if (w->pending) |
335 | if (w_->pending) |
325 | { |
336 | { |
326 | pendings [ABSPRI (w)][w->pending - 1].events |= events; |
337 | pendings [ABSPRI (w_)][w_->pending - 1].events |= revents; |
327 | return; |
338 | return; |
328 | } |
339 | } |
329 | |
340 | |
330 | w->pending = ++pendingcnt [ABSPRI (w)]; |
341 | w_->pending = ++pendingcnt [ABSPRI (w_)]; |
331 | array_needsize (pendings [ABSPRI (w)], pendingmax [ABSPRI (w)], pendingcnt [ABSPRI (w)], (void)); |
342 | array_needsize (ANPENDING, pendings [ABSPRI (w_)], pendingmax [ABSPRI (w_)], pendingcnt [ABSPRI (w_)], (void)); |
332 | pendings [ABSPRI (w)][w->pending - 1].w = w; |
343 | pendings [ABSPRI (w_)][w_->pending - 1].w = w_; |
333 | pendings [ABSPRI (w)][w->pending - 1].events = events; |
344 | pendings [ABSPRI (w_)][w_->pending - 1].events = revents; |
334 | } |
345 | } |
335 | |
346 | |
336 | static void |
347 | static void |
337 | queue_events (EV_P_ W *events, int eventcnt, int type) |
348 | queue_events (EV_P_ W *events, int eventcnt, int type) |
338 | { |
349 | { |
339 | int i; |
350 | int i; |
340 | |
351 | |
341 | for (i = 0; i < eventcnt; ++i) |
352 | for (i = 0; i < eventcnt; ++i) |
342 | event (EV_A_ events [i], type); |
353 | ev_feed_event (EV_A_ events [i], type); |
343 | } |
354 | } |
344 | |
355 | |
345 | static void |
356 | inline void |
346 | fd_event (EV_P_ int fd, int events) |
357 | fd_event (EV_P_ int fd, int revents) |
347 | { |
358 | { |
348 | ANFD *anfd = anfds + fd; |
359 | ANFD *anfd = anfds + fd; |
349 | struct ev_io *w; |
360 | struct ev_io *w; |
350 | |
361 | |
351 | for (w = (struct ev_io *)anfd->head; w; w = (struct ev_io *)((WL)w)->next) |
362 | for (w = (struct ev_io *)anfd->head; w; w = (struct ev_io *)((WL)w)->next) |
352 | { |
363 | { |
353 | int ev = w->events & events; |
364 | int ev = w->events & revents; |
354 | |
365 | |
355 | if (ev) |
366 | if (ev) |
356 | event (EV_A_ (W)w, ev); |
367 | ev_feed_event (EV_A_ (W)w, ev); |
357 | } |
368 | } |
|
|
369 | } |
|
|
370 | |
|
|
371 | void |
|
|
372 | ev_feed_fd_event (EV_P_ int fd, int revents) |
|
|
373 | { |
|
|
374 | fd_event (EV_A_ fd, revents); |
358 | } |
375 | } |
359 | |
376 | |
360 | /*****************************************************************************/ |
377 | /*****************************************************************************/ |
361 | |
378 | |
362 | static void |
379 | static void |
… | |
… | |
391 | return; |
408 | return; |
392 | |
409 | |
393 | anfds [fd].reify = 1; |
410 | anfds [fd].reify = 1; |
394 | |
411 | |
395 | ++fdchangecnt; |
412 | ++fdchangecnt; |
396 | array_needsize (fdchanges, fdchangemax, fdchangecnt, (void)); |
413 | array_needsize (int, fdchanges, fdchangemax, fdchangecnt, (void)); |
397 | fdchanges [fdchangecnt - 1] = fd; |
414 | fdchanges [fdchangecnt - 1] = fd; |
398 | } |
415 | } |
399 | |
416 | |
400 | static void |
417 | static void |
401 | fd_kill (EV_P_ int fd) |
418 | fd_kill (EV_P_ int fd) |
… | |
… | |
403 | struct ev_io *w; |
420 | struct ev_io *w; |
404 | |
421 | |
405 | while ((w = (struct ev_io *)anfds [fd].head)) |
422 | while ((w = (struct ev_io *)anfds [fd].head)) |
406 | { |
423 | { |
407 | ev_io_stop (EV_A_ w); |
424 | ev_io_stop (EV_A_ w); |
408 | event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE); |
425 | ev_feed_event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE); |
409 | } |
426 | } |
410 | } |
427 | } |
411 | |
428 | |
412 | static int |
429 | static int |
413 | fd_valid (int fd) |
430 | fd_valid (int fd) |
… | |
… | |
541 | |
558 | |
542 | if (!gotsig) |
559 | if (!gotsig) |
543 | { |
560 | { |
544 | int old_errno = errno; |
561 | int old_errno = errno; |
545 | gotsig = 1; |
562 | gotsig = 1; |
|
|
563 | #ifdef WIN32 |
|
|
564 | send (sigpipe [1], &signum, 1, MSG_DONTWAIT); |
|
|
565 | #else |
546 | write (sigpipe [1], &signum, 1); |
566 | write (sigpipe [1], &signum, 1); |
|
|
567 | #endif |
547 | errno = old_errno; |
568 | errno = old_errno; |
548 | } |
569 | } |
549 | } |
570 | } |
550 | |
571 | |
|
|
572 | void |
|
|
573 | ev_feed_signal_event (EV_P_ int signum) |
|
|
574 | { |
|
|
575 | WL w; |
|
|
576 | |
|
|
577 | #if EV_MULTIPLICITY |
|
|
578 | assert (("feeding signal events is only supported in the default loop", loop == default_loop)); |
|
|
579 | #endif |
|
|
580 | |
|
|
581 | --signum; |
|
|
582 | |
|
|
583 | if (signum < 0 || signum >= signalmax) |
|
|
584 | return; |
|
|
585 | |
|
|
586 | signals [signum].gotsig = 0; |
|
|
587 | |
|
|
588 | for (w = signals [signum].head; w; w = w->next) |
|
|
589 | ev_feed_event (EV_A_ (W)w, EV_SIGNAL); |
|
|
590 | } |
|
|
591 | |
551 | static void |
592 | static void |
552 | sigcb (EV_P_ struct ev_io *iow, int revents) |
593 | sigcb (EV_P_ struct ev_io *iow, int revents) |
553 | { |
594 | { |
554 | WL w; |
|
|
555 | int signum; |
595 | int signum; |
556 | |
596 | |
|
|
597 | #ifdef WIN32 |
|
|
598 | recv (sigpipe [0], &revents, 1, MSG_DONTWAIT); |
|
|
599 | #else |
557 | read (sigpipe [0], &revents, 1); |
600 | read (sigpipe [0], &revents, 1); |
|
|
601 | #endif |
558 | gotsig = 0; |
602 | gotsig = 0; |
559 | |
603 | |
560 | for (signum = signalmax; signum--; ) |
604 | for (signum = signalmax; signum--; ) |
561 | if (signals [signum].gotsig) |
605 | if (signals [signum].gotsig) |
562 | { |
606 | ev_feed_signal_event (EV_A_ signum + 1); |
563 | signals [signum].gotsig = 0; |
|
|
564 | |
|
|
565 | for (w = signals [signum].head; w; w = w->next) |
|
|
566 | event (EV_A_ (W)w, EV_SIGNAL); |
|
|
567 | } |
|
|
568 | } |
607 | } |
569 | |
608 | |
570 | static void |
609 | static void |
571 | siginit (EV_P) |
610 | siginit (EV_P) |
572 | { |
611 | { |
… | |
… | |
605 | if (w->pid == pid || !w->pid) |
644 | if (w->pid == pid || !w->pid) |
606 | { |
645 | { |
607 | ev_priority (w) = ev_priority (sw); /* need to do it *now* */ |
646 | ev_priority (w) = ev_priority (sw); /* need to do it *now* */ |
608 | w->rpid = pid; |
647 | w->rpid = pid; |
609 | w->rstatus = status; |
648 | w->rstatus = status; |
610 | event (EV_A_ (W)w, EV_CHILD); |
649 | ev_feed_event (EV_A_ (W)w, EV_CHILD); |
611 | } |
650 | } |
612 | } |
651 | } |
613 | |
652 | |
614 | static void |
653 | static void |
615 | childcb (EV_P_ struct ev_signal *sw, int revents) |
654 | childcb (EV_P_ struct ev_signal *sw, int revents) |
… | |
… | |
617 | int pid, status; |
656 | int pid, status; |
618 | |
657 | |
619 | if (0 < (pid = waitpid (-1, &status, WNOHANG | WUNTRACED | WCONTINUED))) |
658 | if (0 < (pid = waitpid (-1, &status, WNOHANG | WUNTRACED | WCONTINUED))) |
620 | { |
659 | { |
621 | /* make sure we are called again until all childs have been reaped */ |
660 | /* make sure we are called again until all childs have been reaped */ |
622 | event (EV_A_ (W)sw, EV_SIGNAL); |
661 | ev_feed_event (EV_A_ (W)sw, EV_SIGNAL); |
623 | |
662 | |
624 | child_reap (EV_A_ sw, pid, pid, status); |
663 | child_reap (EV_A_ sw, pid, pid, status); |
625 | child_reap (EV_A_ sw, 0, pid, status); /* this might trigger a watcher twice, but event catches that */ |
664 | child_reap (EV_A_ sw, 0, pid, status); /* this might trigger a watcher twice, but event catches that */ |
626 | } |
665 | } |
627 | } |
666 | } |
… | |
… | |
712 | #endif |
751 | #endif |
713 | #if EV_USE_SELECT |
752 | #if EV_USE_SELECT |
714 | if (!method && (methods & EVMETHOD_SELECT)) method = select_init (EV_A_ methods); |
753 | if (!method && (methods & EVMETHOD_SELECT)) method = select_init (EV_A_ methods); |
715 | #endif |
754 | #endif |
716 | |
755 | |
717 | ev_watcher_init (&sigev, sigcb); |
756 | ev_init (&sigev, sigcb); |
718 | ev_set_priority (&sigev, EV_MAXPRI); |
757 | ev_set_priority (&sigev, EV_MAXPRI); |
719 | } |
758 | } |
720 | } |
759 | } |
721 | |
760 | |
722 | void |
761 | void |
… | |
… | |
812 | } |
851 | } |
813 | |
852 | |
814 | #endif |
853 | #endif |
815 | |
854 | |
816 | #if EV_MULTIPLICITY |
855 | #if EV_MULTIPLICITY |
817 | struct ev_loop default_loop_struct; |
|
|
818 | static struct ev_loop *default_loop; |
|
|
819 | |
|
|
820 | struct ev_loop * |
856 | struct ev_loop * |
821 | #else |
857 | #else |
822 | static int default_loop; |
|
|
823 | |
|
|
824 | int |
858 | int |
825 | #endif |
859 | #endif |
826 | ev_default_loop (int methods) |
860 | ev_default_loop (int methods) |
827 | { |
861 | { |
828 | if (sigpipe [0] == sigpipe [1]) |
862 | if (sigpipe [0] == sigpipe [1]) |
… | |
… | |
889 | postfork = 1; |
923 | postfork = 1; |
890 | } |
924 | } |
891 | |
925 | |
892 | /*****************************************************************************/ |
926 | /*****************************************************************************/ |
893 | |
927 | |
|
|
928 | static int |
|
|
929 | any_pending (EV_P) |
|
|
930 | { |
|
|
931 | int pri; |
|
|
932 | |
|
|
933 | for (pri = NUMPRI; pri--; ) |
|
|
934 | if (pendingcnt [pri]) |
|
|
935 | return 1; |
|
|
936 | |
|
|
937 | return 0; |
|
|
938 | } |
|
|
939 | |
894 | static void |
940 | static void |
895 | call_pending (EV_P) |
941 | call_pending (EV_P) |
896 | { |
942 | { |
897 | int pri; |
943 | int pri; |
898 | |
944 | |
… | |
… | |
902 | ANPENDING *p = pendings [pri] + --pendingcnt [pri]; |
948 | ANPENDING *p = pendings [pri] + --pendingcnt [pri]; |
903 | |
949 | |
904 | if (p->w) |
950 | if (p->w) |
905 | { |
951 | { |
906 | p->w->pending = 0; |
952 | p->w->pending = 0; |
907 | p->w->cb (EV_A_ p->w, p->events); |
953 | EV_CB_INVOKE (p->w, p->events); |
908 | } |
954 | } |
909 | } |
955 | } |
910 | } |
956 | } |
911 | |
957 | |
912 | static void |
958 | static void |
… | |
… | |
926 | downheap ((WT *)timers, timercnt, 0); |
972 | downheap ((WT *)timers, timercnt, 0); |
927 | } |
973 | } |
928 | else |
974 | else |
929 | ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */ |
975 | ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */ |
930 | |
976 | |
931 | event (EV_A_ (W)w, EV_TIMEOUT); |
977 | ev_feed_event (EV_A_ (W)w, EV_TIMEOUT); |
932 | } |
978 | } |
933 | } |
979 | } |
934 | |
980 | |
935 | static void |
981 | static void |
936 | periodics_reify (EV_P) |
982 | periodics_reify (EV_P) |
… | |
… | |
940 | struct ev_periodic *w = periodics [0]; |
986 | struct ev_periodic *w = periodics [0]; |
941 | |
987 | |
942 | assert (("inactive timer on periodic heap detected", ev_is_active (w))); |
988 | assert (("inactive timer on periodic heap detected", ev_is_active (w))); |
943 | |
989 | |
944 | /* first reschedule or stop timer */ |
990 | /* first reschedule or stop timer */ |
|
|
991 | if (w->reschedule_cb) |
|
|
992 | { |
|
|
993 | ev_tstamp at = ((WT)w)->at = w->reschedule_cb (w, rt_now + 0.0001); |
|
|
994 | |
|
|
995 | assert (("ev_periodic reschedule callback returned time in the past", ((WT)w)->at > rt_now)); |
|
|
996 | downheap ((WT *)periodics, periodiccnt, 0); |
|
|
997 | } |
945 | if (w->interval) |
998 | else if (w->interval) |
946 | { |
999 | { |
947 | ((WT)w)->at += floor ((rt_now - ((WT)w)->at) / w->interval + 1.) * w->interval; |
1000 | ((WT)w)->at += floor ((rt_now - ((WT)w)->at) / w->interval + 1.) * w->interval; |
948 | assert (("ev_periodic timeout in the past detected while processing timers, negative interval?", ((WT)w)->at > rt_now)); |
1001 | assert (("ev_periodic timeout in the past detected while processing timers, negative interval?", ((WT)w)->at > rt_now)); |
949 | downheap ((WT *)periodics, periodiccnt, 0); |
1002 | downheap ((WT *)periodics, periodiccnt, 0); |
950 | } |
1003 | } |
951 | else |
1004 | else |
952 | ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */ |
1005 | ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */ |
953 | |
1006 | |
954 | event (EV_A_ (W)w, EV_PERIODIC); |
1007 | ev_feed_event (EV_A_ (W)w, EV_PERIODIC); |
955 | } |
1008 | } |
956 | } |
1009 | } |
957 | |
1010 | |
958 | static void |
1011 | static void |
959 | periodics_reschedule (EV_P) |
1012 | periodics_reschedule (EV_P) |
… | |
… | |
963 | /* adjust periodics after time jump */ |
1016 | /* adjust periodics after time jump */ |
964 | for (i = 0; i < periodiccnt; ++i) |
1017 | for (i = 0; i < periodiccnt; ++i) |
965 | { |
1018 | { |
966 | struct ev_periodic *w = periodics [i]; |
1019 | struct ev_periodic *w = periodics [i]; |
967 | |
1020 | |
|
|
1021 | if (w->reschedule_cb) |
|
|
1022 | ((WT)w)->at = w->reschedule_cb (w, rt_now); |
968 | if (w->interval) |
1023 | else if (w->interval) |
969 | { |
|
|
970 | ev_tstamp diff = ceil ((rt_now - ((WT)w)->at) / w->interval) * w->interval; |
1024 | ((WT)w)->at += ceil ((rt_now - ((WT)w)->at) / w->interval) * w->interval; |
971 | |
|
|
972 | if (fabs (diff) >= 1e-4) |
|
|
973 | { |
|
|
974 | ev_periodic_stop (EV_A_ w); |
|
|
975 | ev_periodic_start (EV_A_ w); |
|
|
976 | |
|
|
977 | i = 0; /* restart loop, inefficient, but time jumps should be rare */ |
|
|
978 | } |
|
|
979 | } |
|
|
980 | } |
1025 | } |
|
|
1026 | |
|
|
1027 | /* now rebuild the heap */ |
|
|
1028 | for (i = periodiccnt >> 1; i--; ) |
|
|
1029 | downheap ((WT *)periodics, periodiccnt, i); |
981 | } |
1030 | } |
982 | |
1031 | |
983 | inline int |
1032 | inline int |
984 | time_update_monotonic (EV_P) |
1033 | time_update_monotonic (EV_P) |
985 | { |
1034 | { |
… | |
… | |
1081 | /* update fd-related kernel structures */ |
1130 | /* update fd-related kernel structures */ |
1082 | fd_reify (EV_A); |
1131 | fd_reify (EV_A); |
1083 | |
1132 | |
1084 | /* calculate blocking time */ |
1133 | /* calculate blocking time */ |
1085 | |
1134 | |
1086 | /* we only need this for !monotonic clockor timers, but as we basically |
1135 | /* we only need this for !monotonic clock or timers, but as we basically |
1087 | always have timers, we just calculate it always */ |
1136 | always have timers, we just calculate it always */ |
1088 | #if EV_USE_MONOTONIC |
1137 | #if EV_USE_MONOTONIC |
1089 | if (expect_true (have_monotonic)) |
1138 | if (expect_true (have_monotonic)) |
1090 | time_update_monotonic (EV_A); |
1139 | time_update_monotonic (EV_A); |
1091 | else |
1140 | else |
… | |
… | |
1124 | /* queue pending timers and reschedule them */ |
1173 | /* queue pending timers and reschedule them */ |
1125 | timers_reify (EV_A); /* relative timers called last */ |
1174 | timers_reify (EV_A); /* relative timers called last */ |
1126 | periodics_reify (EV_A); /* absolute timers called first */ |
1175 | periodics_reify (EV_A); /* absolute timers called first */ |
1127 | |
1176 | |
1128 | /* queue idle watchers unless io or timers are pending */ |
1177 | /* queue idle watchers unless io or timers are pending */ |
1129 | if (!pendingcnt) |
1178 | if (idlecnt && !any_pending (EV_A)) |
1130 | queue_events (EV_A_ (W *)idles, idlecnt, EV_IDLE); |
1179 | queue_events (EV_A_ (W *)idles, idlecnt, EV_IDLE); |
1131 | |
1180 | |
1132 | /* queue check watchers, to be executed first */ |
1181 | /* queue check watchers, to be executed first */ |
1133 | if (checkcnt) |
1182 | if (checkcnt) |
1134 | queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK); |
1183 | queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK); |
… | |
… | |
1209 | return; |
1258 | return; |
1210 | |
1259 | |
1211 | assert (("ev_io_start called with negative fd", fd >= 0)); |
1260 | assert (("ev_io_start called with negative fd", fd >= 0)); |
1212 | |
1261 | |
1213 | ev_start (EV_A_ (W)w, 1); |
1262 | ev_start (EV_A_ (W)w, 1); |
1214 | array_needsize (anfds, anfdmax, fd + 1, anfds_init); |
1263 | array_needsize (ANFD, anfds, anfdmax, fd + 1, anfds_init); |
1215 | wlist_add ((WL *)&anfds[fd].head, (WL)w); |
1264 | wlist_add ((WL *)&anfds[fd].head, (WL)w); |
1216 | |
1265 | |
1217 | fd_change (EV_A_ fd); |
1266 | fd_change (EV_A_ fd); |
1218 | } |
1267 | } |
1219 | |
1268 | |
… | |
… | |
1239 | ((WT)w)->at += mn_now; |
1288 | ((WT)w)->at += mn_now; |
1240 | |
1289 | |
1241 | assert (("ev_timer_start called with negative timer repeat value", w->repeat >= 0.)); |
1290 | assert (("ev_timer_start called with negative timer repeat value", w->repeat >= 0.)); |
1242 | |
1291 | |
1243 | ev_start (EV_A_ (W)w, ++timercnt); |
1292 | ev_start (EV_A_ (W)w, ++timercnt); |
1244 | array_needsize (timers, timermax, timercnt, (void)); |
1293 | array_needsize (struct ev_timer *, timers, timermax, timercnt, (void)); |
1245 | timers [timercnt - 1] = w; |
1294 | timers [timercnt - 1] = w; |
1246 | upheap ((WT *)timers, timercnt - 1); |
1295 | upheap ((WT *)timers, timercnt - 1); |
1247 | |
1296 | |
1248 | assert (("internal timer heap corruption", timers [((W)w)->active - 1] == w)); |
1297 | assert (("internal timer heap corruption", timers [((W)w)->active - 1] == w)); |
1249 | } |
1298 | } |
… | |
… | |
1289 | ev_periodic_start (EV_P_ struct ev_periodic *w) |
1338 | ev_periodic_start (EV_P_ struct ev_periodic *w) |
1290 | { |
1339 | { |
1291 | if (ev_is_active (w)) |
1340 | if (ev_is_active (w)) |
1292 | return; |
1341 | return; |
1293 | |
1342 | |
|
|
1343 | if (w->reschedule_cb) |
|
|
1344 | ((WT)w)->at = w->reschedule_cb (w, rt_now); |
|
|
1345 | else if (w->interval) |
|
|
1346 | { |
1294 | assert (("ev_periodic_start called with negative interval value", w->interval >= 0.)); |
1347 | assert (("ev_periodic_start called with negative interval value", w->interval >= 0.)); |
1295 | |
|
|
1296 | /* this formula differs from the one in periodic_reify because we do not always round up */ |
1348 | /* this formula differs from the one in periodic_reify because we do not always round up */ |
1297 | if (w->interval) |
|
|
1298 | ((WT)w)->at += ceil ((rt_now - ((WT)w)->at) / w->interval) * w->interval; |
1349 | ((WT)w)->at += ceil ((rt_now - ((WT)w)->at) / w->interval) * w->interval; |
|
|
1350 | } |
1299 | |
1351 | |
1300 | ev_start (EV_A_ (W)w, ++periodiccnt); |
1352 | ev_start (EV_A_ (W)w, ++periodiccnt); |
1301 | array_needsize (periodics, periodicmax, periodiccnt, (void)); |
1353 | array_needsize (struct ev_periodic *, periodics, periodicmax, periodiccnt, (void)); |
1302 | periodics [periodiccnt - 1] = w; |
1354 | periodics [periodiccnt - 1] = w; |
1303 | upheap ((WT *)periodics, periodiccnt - 1); |
1355 | upheap ((WT *)periodics, periodiccnt - 1); |
1304 | |
1356 | |
1305 | assert (("internal periodic heap corruption", periodics [((W)w)->active - 1] == w)); |
1357 | assert (("internal periodic heap corruption", periodics [((W)w)->active - 1] == w)); |
1306 | } |
1358 | } |
… | |
… | |
1322 | |
1374 | |
1323 | ev_stop (EV_A_ (W)w); |
1375 | ev_stop (EV_A_ (W)w); |
1324 | } |
1376 | } |
1325 | |
1377 | |
1326 | void |
1378 | void |
|
|
1379 | ev_periodic_again (EV_P_ struct ev_periodic *w) |
|
|
1380 | { |
|
|
1381 | ev_periodic_stop (EV_A_ w); |
|
|
1382 | ev_periodic_start (EV_A_ w); |
|
|
1383 | } |
|
|
1384 | |
|
|
1385 | void |
1327 | ev_idle_start (EV_P_ struct ev_idle *w) |
1386 | ev_idle_start (EV_P_ struct ev_idle *w) |
1328 | { |
1387 | { |
1329 | if (ev_is_active (w)) |
1388 | if (ev_is_active (w)) |
1330 | return; |
1389 | return; |
1331 | |
1390 | |
1332 | ev_start (EV_A_ (W)w, ++idlecnt); |
1391 | ev_start (EV_A_ (W)w, ++idlecnt); |
1333 | array_needsize (idles, idlemax, idlecnt, (void)); |
1392 | array_needsize (struct ev_idle *, idles, idlemax, idlecnt, (void)); |
1334 | idles [idlecnt - 1] = w; |
1393 | idles [idlecnt - 1] = w; |
1335 | } |
1394 | } |
1336 | |
1395 | |
1337 | void |
1396 | void |
1338 | ev_idle_stop (EV_P_ struct ev_idle *w) |
1397 | ev_idle_stop (EV_P_ struct ev_idle *w) |
… | |
… | |
1350 | { |
1409 | { |
1351 | if (ev_is_active (w)) |
1410 | if (ev_is_active (w)) |
1352 | return; |
1411 | return; |
1353 | |
1412 | |
1354 | ev_start (EV_A_ (W)w, ++preparecnt); |
1413 | ev_start (EV_A_ (W)w, ++preparecnt); |
1355 | array_needsize (prepares, preparemax, preparecnt, (void)); |
1414 | array_needsize (struct ev_prepare *, prepares, preparemax, preparecnt, (void)); |
1356 | prepares [preparecnt - 1] = w; |
1415 | prepares [preparecnt - 1] = w; |
1357 | } |
1416 | } |
1358 | |
1417 | |
1359 | void |
1418 | void |
1360 | ev_prepare_stop (EV_P_ struct ev_prepare *w) |
1419 | ev_prepare_stop (EV_P_ struct ev_prepare *w) |
… | |
… | |
1372 | { |
1431 | { |
1373 | if (ev_is_active (w)) |
1432 | if (ev_is_active (w)) |
1374 | return; |
1433 | return; |
1375 | |
1434 | |
1376 | ev_start (EV_A_ (W)w, ++checkcnt); |
1435 | ev_start (EV_A_ (W)w, ++checkcnt); |
1377 | array_needsize (checks, checkmax, checkcnt, (void)); |
1436 | array_needsize (struct ev_check *, checks, checkmax, checkcnt, (void)); |
1378 | checks [checkcnt - 1] = w; |
1437 | checks [checkcnt - 1] = w; |
1379 | } |
1438 | } |
1380 | |
1439 | |
1381 | void |
1440 | void |
1382 | ev_check_stop (EV_P_ struct ev_check *w) |
1441 | ev_check_stop (EV_P_ struct ev_check *w) |
… | |
… | |
1403 | return; |
1462 | return; |
1404 | |
1463 | |
1405 | assert (("ev_signal_start called with illegal signal number", w->signum > 0)); |
1464 | assert (("ev_signal_start called with illegal signal number", w->signum > 0)); |
1406 | |
1465 | |
1407 | ev_start (EV_A_ (W)w, 1); |
1466 | ev_start (EV_A_ (W)w, 1); |
1408 | array_needsize (signals, signalmax, w->signum, signals_init); |
1467 | array_needsize (ANSIG, signals, signalmax, w->signum, signals_init); |
1409 | wlist_add ((WL *)&signals [w->signum - 1].head, (WL)w); |
1468 | wlist_add ((WL *)&signals [w->signum - 1].head, (WL)w); |
1410 | |
1469 | |
1411 | if (!((WL)w)->next) |
1470 | if (!((WL)w)->next) |
1412 | { |
1471 | { |
1413 | #if WIN32 |
1472 | #if WIN32 |
… | |
… | |
1496 | } |
1555 | } |
1497 | |
1556 | |
1498 | void |
1557 | void |
1499 | ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg) |
1558 | ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg) |
1500 | { |
1559 | { |
1501 | struct ev_once *once = ev_malloc (sizeof (struct ev_once)); |
1560 | struct ev_once *once = (struct ev_once *)ev_malloc (sizeof (struct ev_once)); |
1502 | |
1561 | |
1503 | if (!once) |
1562 | if (!once) |
1504 | cb (EV_ERROR | EV_READ | EV_WRITE | EV_TIMEOUT, arg); |
1563 | cb (EV_ERROR | EV_READ | EV_WRITE | EV_TIMEOUT, arg); |
1505 | else |
1564 | else |
1506 | { |
1565 | { |
1507 | once->cb = cb; |
1566 | once->cb = cb; |
1508 | once->arg = arg; |
1567 | once->arg = arg; |
1509 | |
1568 | |
1510 | ev_watcher_init (&once->io, once_cb_io); |
1569 | ev_init (&once->io, once_cb_io); |
1511 | if (fd >= 0) |
1570 | if (fd >= 0) |
1512 | { |
1571 | { |
1513 | ev_io_set (&once->io, fd, events); |
1572 | ev_io_set (&once->io, fd, events); |
1514 | ev_io_start (EV_A_ &once->io); |
1573 | ev_io_start (EV_A_ &once->io); |
1515 | } |
1574 | } |
1516 | |
1575 | |
1517 | ev_watcher_init (&once->to, once_cb_to); |
1576 | ev_init (&once->to, once_cb_to); |
1518 | if (timeout >= 0.) |
1577 | if (timeout >= 0.) |
1519 | { |
1578 | { |
1520 | ev_timer_set (&once->to, timeout, 0.); |
1579 | ev_timer_set (&once->to, timeout, 0.); |
1521 | ev_timer_start (EV_A_ &once->to); |
1580 | ev_timer_start (EV_A_ &once->to); |
1522 | } |
1581 | } |