… | |
… | |
148 | typedef struct ev_watcher_list *WL; |
148 | typedef struct ev_watcher_list *WL; |
149 | typedef struct ev_watcher_time *WT; |
149 | typedef struct ev_watcher_time *WT; |
150 | |
150 | |
151 | static int have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work? */ |
151 | static int have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work? */ |
152 | |
152 | |
153 | #if WIN32 |
153 | #include "ev_win32.c" |
154 | /* note: the comment below could not be substantiated, but what would I care */ |
|
|
155 | /* MSDN says this is required to handle SIGFPE */ |
|
|
156 | volatile double SIGFPE_REQ = 0.0f; |
|
|
157 | |
|
|
158 | static int |
|
|
159 | ev_socketpair_tcp (int filedes [2]) |
|
|
160 | { |
|
|
161 | struct sockaddr_in addr = { 0 }; |
|
|
162 | int addr_size = sizeof (addr); |
|
|
163 | SOCKET listener; |
|
|
164 | SOCKET sock [2] = { -1, -1 }; |
|
|
165 | |
|
|
166 | if ((listener = socket (AF_INET, SOCK_STREAM, 0)) == INVALID_SOCKET) |
|
|
167 | return -1; |
|
|
168 | |
|
|
169 | addr.sin_family = AF_INET; |
|
|
170 | addr.sin_addr.s_addr = htonl (INADDR_LOOPBACK); |
|
|
171 | addr.sin_port = 0; |
|
|
172 | |
|
|
173 | if (bind (listener, (struct sockaddr *)&addr, addr_size)) |
|
|
174 | goto fail; |
|
|
175 | |
|
|
176 | if (getsockname(listener, (struct sockaddr *)&addr, &addr_size)) |
|
|
177 | goto fail; |
|
|
178 | |
|
|
179 | if (listen (listener, 1)) |
|
|
180 | goto fail; |
|
|
181 | |
|
|
182 | if ((sock [0] = socket (AF_INET, SOCK_STREAM, 0)) == INVALID_SOCKET) |
|
|
183 | goto fail; |
|
|
184 | |
|
|
185 | if (connect (sock[0], (struct sockaddr *)&addr, addr_size)) |
|
|
186 | goto fail; |
|
|
187 | |
|
|
188 | if ((sock[1] = accept (listener, 0, 0)) < 0) |
|
|
189 | goto fail; |
|
|
190 | |
|
|
191 | closesocket (listener); |
|
|
192 | |
|
|
193 | filedes [0] = sock [0]; |
|
|
194 | filedes [1] = sock [1]; |
|
|
195 | |
|
|
196 | return 0; |
|
|
197 | |
|
|
198 | fail: |
|
|
199 | closesocket (listener); |
|
|
200 | |
|
|
201 | if (sock [0] != INVALID_SOCKET) closesocket (sock [0]); |
|
|
202 | if (sock [1] != INVALID_SOCKET) closesocket (sock [1]); |
|
|
203 | |
|
|
204 | return -1; |
|
|
205 | } |
|
|
206 | |
|
|
207 | # define ev_pipe(filedes) ev_socketpair_tcp (filedes) |
|
|
208 | #else |
|
|
209 | # define ev_pipe(filedes) pipe (filedes) |
|
|
210 | #endif |
|
|
211 | |
154 | |
212 | /*****************************************************************************/ |
155 | /*****************************************************************************/ |
213 | |
156 | |
214 | static void (*syserr_cb)(const char *msg); |
157 | static void (*syserr_cb)(const char *msg); |
215 | |
158 | |
… | |
… | |
325 | ev_now (EV_P) |
268 | ev_now (EV_P) |
326 | { |
269 | { |
327 | return rt_now; |
270 | return rt_now; |
328 | } |
271 | } |
329 | |
272 | |
330 | #define array_roundsize(base,n) ((n) | 4 & ~3) |
273 | #define array_roundsize(type,n) ((n) | 4 & ~3) |
331 | |
274 | |
332 | #define array_needsize(base,cur,cnt,init) \ |
275 | #define array_needsize(type,base,cur,cnt,init) \ |
333 | if (expect_false ((cnt) > cur)) \ |
276 | if (expect_false ((cnt) > cur)) \ |
334 | { \ |
277 | { \ |
335 | int newcnt = cur; \ |
278 | int newcnt = cur; \ |
336 | do \ |
279 | do \ |
337 | { \ |
280 | { \ |
338 | newcnt = array_roundsize (base, newcnt << 1); \ |
281 | newcnt = array_roundsize (type, newcnt << 1); \ |
339 | } \ |
282 | } \ |
340 | while ((cnt) > newcnt); \ |
283 | while ((cnt) > newcnt); \ |
341 | \ |
284 | \ |
342 | base = ev_realloc (base, sizeof (*base) * (newcnt)); \ |
285 | base = (type *)ev_realloc (base, sizeof (type) * (newcnt));\ |
343 | init (base + cur, newcnt - cur); \ |
286 | init (base + cur, newcnt - cur); \ |
344 | cur = newcnt; \ |
287 | cur = newcnt; \ |
345 | } |
288 | } |
346 | |
289 | |
347 | #define array_slim(stem) \ |
290 | #define array_slim(type,stem) \ |
348 | if (stem ## max < array_roundsize (stem ## cnt >> 2)) \ |
291 | if (stem ## max < array_roundsize (stem ## cnt >> 2)) \ |
349 | { \ |
292 | { \ |
350 | stem ## max = array_roundsize (stem ## cnt >> 1); \ |
293 | stem ## max = array_roundsize (stem ## cnt >> 1); \ |
351 | base = ev_realloc (base, sizeof (*base) * (stem ## max)); \ |
294 | base = (type *)ev_realloc (base, sizeof (type) * (stem ## max));\ |
352 | fprintf (stderr, "slimmed down " # stem " to %d\n", stem ## max);/*D*/\ |
295 | fprintf (stderr, "slimmed down " # stem " to %d\n", stem ## max);/*D*/\ |
353 | } |
296 | } |
354 | |
297 | |
355 | /* microsoft's pseudo-c is quite far from C as the rest of the world and the standard knows it */ |
298 | /* microsoft's pseudo-c is quite far from C as the rest of the world and the standard knows it */ |
356 | /* bringing us everlasting joy in form of stupid extra macros that are not required in C */ |
299 | /* bringing us everlasting joy in form of stupid extra macros that are not required in C */ |
… | |
… | |
373 | |
316 | |
374 | ++base; |
317 | ++base; |
375 | } |
318 | } |
376 | } |
319 | } |
377 | |
320 | |
378 | static void |
321 | void |
379 | event (EV_P_ W w, int events) |
322 | ev_feed_event (EV_P_ void *w, int revents) |
380 | { |
323 | { |
|
|
324 | W w_ = (W)w; |
|
|
325 | |
381 | if (w->pending) |
326 | if (w_->pending) |
382 | { |
327 | { |
383 | pendings [ABSPRI (w)][w->pending - 1].events |= events; |
328 | pendings [ABSPRI (w_)][w_->pending - 1].events |= revents; |
384 | return; |
329 | return; |
385 | } |
330 | } |
386 | |
331 | |
387 | w->pending = ++pendingcnt [ABSPRI (w)]; |
332 | w_->pending = ++pendingcnt [ABSPRI (w_)]; |
388 | array_needsize (pendings [ABSPRI (w)], pendingmax [ABSPRI (w)], pendingcnt [ABSPRI (w)], (void)); |
333 | array_needsize (ANPENDING, pendings [ABSPRI (w_)], pendingmax [ABSPRI (w_)], pendingcnt [ABSPRI (w_)], (void)); |
389 | pendings [ABSPRI (w)][w->pending - 1].w = w; |
334 | pendings [ABSPRI (w_)][w_->pending - 1].w = w_; |
390 | pendings [ABSPRI (w)][w->pending - 1].events = events; |
335 | pendings [ABSPRI (w_)][w_->pending - 1].events = revents; |
391 | } |
336 | } |
392 | |
337 | |
393 | static void |
338 | static void |
394 | queue_events (EV_P_ W *events, int eventcnt, int type) |
339 | queue_events (EV_P_ W *events, int eventcnt, int type) |
395 | { |
340 | { |
396 | int i; |
341 | int i; |
397 | |
342 | |
398 | for (i = 0; i < eventcnt; ++i) |
343 | for (i = 0; i < eventcnt; ++i) |
399 | event (EV_A_ events [i], type); |
344 | ev_feed_event (EV_A_ events [i], type); |
400 | } |
345 | } |
401 | |
346 | |
402 | static void |
347 | inline void |
403 | fd_event (EV_P_ int fd, int events) |
348 | fd_event (EV_P_ int fd, int revents) |
404 | { |
349 | { |
405 | ANFD *anfd = anfds + fd; |
350 | ANFD *anfd = anfds + fd; |
406 | struct ev_io *w; |
351 | struct ev_io *w; |
407 | |
352 | |
408 | for (w = (struct ev_io *)anfd->head; w; w = (struct ev_io *)((WL)w)->next) |
353 | for (w = (struct ev_io *)anfd->head; w; w = (struct ev_io *)((WL)w)->next) |
409 | { |
354 | { |
410 | int ev = w->events & events; |
355 | int ev = w->events & revents; |
411 | |
356 | |
412 | if (ev) |
357 | if (ev) |
413 | event (EV_A_ (W)w, ev); |
358 | ev_feed_event (EV_A_ (W)w, ev); |
414 | } |
359 | } |
|
|
360 | } |
|
|
361 | |
|
|
362 | void |
|
|
363 | ev_feed_fd_event (EV_P_ int fd, int revents) |
|
|
364 | { |
|
|
365 | fd_event (EV_A_ fd, revents); |
415 | } |
366 | } |
416 | |
367 | |
417 | /*****************************************************************************/ |
368 | /*****************************************************************************/ |
418 | |
369 | |
419 | static void |
370 | static void |
… | |
… | |
448 | return; |
399 | return; |
449 | |
400 | |
450 | anfds [fd].reify = 1; |
401 | anfds [fd].reify = 1; |
451 | |
402 | |
452 | ++fdchangecnt; |
403 | ++fdchangecnt; |
453 | array_needsize (fdchanges, fdchangemax, fdchangecnt, (void)); |
404 | array_needsize (int, fdchanges, fdchangemax, fdchangecnt, (void)); |
454 | fdchanges [fdchangecnt - 1] = fd; |
405 | fdchanges [fdchangecnt - 1] = fd; |
455 | } |
406 | } |
456 | |
407 | |
457 | static void |
408 | static void |
458 | fd_kill (EV_P_ int fd) |
409 | fd_kill (EV_P_ int fd) |
… | |
… | |
460 | struct ev_io *w; |
411 | struct ev_io *w; |
461 | |
412 | |
462 | while ((w = (struct ev_io *)anfds [fd].head)) |
413 | while ((w = (struct ev_io *)anfds [fd].head)) |
463 | { |
414 | { |
464 | ev_io_stop (EV_A_ w); |
415 | ev_io_stop (EV_A_ w); |
465 | event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE); |
416 | ev_feed_event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE); |
466 | } |
417 | } |
467 | } |
418 | } |
468 | |
419 | |
469 | static int |
420 | static int |
470 | fd_valid (int fd) |
421 | fd_valid (int fd) |
… | |
… | |
598 | |
549 | |
599 | if (!gotsig) |
550 | if (!gotsig) |
600 | { |
551 | { |
601 | int old_errno = errno; |
552 | int old_errno = errno; |
602 | gotsig = 1; |
553 | gotsig = 1; |
|
|
554 | #ifdef WIN32 |
|
|
555 | send (sigpipe [1], &signum, 1, MSG_DONTWAIT); |
|
|
556 | #else |
603 | write (sigpipe [1], &signum, 1); |
557 | write (sigpipe [1], &signum, 1); |
|
|
558 | #endif |
604 | errno = old_errno; |
559 | errno = old_errno; |
605 | } |
560 | } |
|
|
561 | } |
|
|
562 | |
|
|
563 | void |
|
|
564 | ev_feed_signal_event (EV_P_ int signum) |
|
|
565 | { |
|
|
566 | #if EV_MULTIPLICITY |
|
|
567 | assert (("feeding signal events is only supported in the default loop", loop == default_loop)); |
|
|
568 | #endif |
|
|
569 | |
|
|
570 | --signum; |
|
|
571 | |
|
|
572 | if (signum < 0 || signum >= signalmax) |
|
|
573 | return; |
|
|
574 | |
|
|
575 | signals [signum].gotsig = 0; |
|
|
576 | |
|
|
577 | for (w = signals [signum].head; w; w = w->next) |
|
|
578 | ev_feed_event (EV_A_ (W)w, EV_SIGNAL); |
606 | } |
579 | } |
607 | |
580 | |
608 | static void |
581 | static void |
609 | sigcb (EV_P_ struct ev_io *iow, int revents) |
582 | sigcb (EV_P_ struct ev_io *iow, int revents) |
610 | { |
583 | { |
611 | WL w; |
584 | WL w; |
612 | int signum; |
585 | int signum; |
613 | |
586 | |
|
|
587 | #ifdef WIN32 |
|
|
588 | recv (sigpipe [0], &revents, 1, MSG_DONTWAIT); |
|
|
589 | #else |
614 | read (sigpipe [0], &revents, 1); |
590 | read (sigpipe [0], &revents, 1); |
|
|
591 | #endif |
615 | gotsig = 0; |
592 | gotsig = 0; |
616 | |
593 | |
617 | for (signum = signalmax; signum--; ) |
594 | for (signum = signalmax; signum--; ) |
618 | if (signals [signum].gotsig) |
595 | if (signals [signum].gotsig) |
619 | { |
596 | sigevent (EV_A_ signum + 1); |
620 | signals [signum].gotsig = 0; |
|
|
621 | |
|
|
622 | for (w = signals [signum].head; w; w = w->next) |
|
|
623 | event (EV_A_ (W)w, EV_SIGNAL); |
|
|
624 | } |
|
|
625 | } |
597 | } |
626 | |
598 | |
627 | static void |
599 | static void |
628 | siginit (EV_P) |
600 | siginit (EV_P) |
629 | { |
601 | { |
… | |
… | |
662 | if (w->pid == pid || !w->pid) |
634 | if (w->pid == pid || !w->pid) |
663 | { |
635 | { |
664 | ev_priority (w) = ev_priority (sw); /* need to do it *now* */ |
636 | ev_priority (w) = ev_priority (sw); /* need to do it *now* */ |
665 | w->rpid = pid; |
637 | w->rpid = pid; |
666 | w->rstatus = status; |
638 | w->rstatus = status; |
667 | event (EV_A_ (W)w, EV_CHILD); |
639 | ev_feed_event (EV_A_ (W)w, EV_CHILD); |
668 | } |
640 | } |
669 | } |
641 | } |
670 | |
642 | |
671 | static void |
643 | static void |
672 | childcb (EV_P_ struct ev_signal *sw, int revents) |
644 | childcb (EV_P_ struct ev_signal *sw, int revents) |
… | |
… | |
674 | int pid, status; |
646 | int pid, status; |
675 | |
647 | |
676 | if (0 < (pid = waitpid (-1, &status, WNOHANG | WUNTRACED | WCONTINUED))) |
648 | if (0 < (pid = waitpid (-1, &status, WNOHANG | WUNTRACED | WCONTINUED))) |
677 | { |
649 | { |
678 | /* make sure we are called again until all childs have been reaped */ |
650 | /* make sure we are called again until all childs have been reaped */ |
679 | event (EV_A_ (W)sw, EV_SIGNAL); |
651 | ev_feed_event (EV_A_ (W)sw, EV_SIGNAL); |
680 | |
652 | |
681 | child_reap (EV_A_ sw, pid, pid, status); |
653 | child_reap (EV_A_ sw, pid, pid, status); |
682 | child_reap (EV_A_ sw, 0, pid, status); /* this might trigger a watcher twice, but event catches that */ |
654 | child_reap (EV_A_ sw, 0, pid, status); /* this might trigger a watcher twice, but event catches that */ |
683 | } |
655 | } |
684 | } |
656 | } |
… | |
… | |
828 | ev_ref (EV_A); |
800 | ev_ref (EV_A); |
829 | ev_io_stop (EV_A_ &sigev); |
801 | ev_io_stop (EV_A_ &sigev); |
830 | close (sigpipe [0]); |
802 | close (sigpipe [0]); |
831 | close (sigpipe [1]); |
803 | close (sigpipe [1]); |
832 | |
804 | |
833 | while (ev_pipe (sigpipe)) |
805 | while (pipe (sigpipe)) |
834 | syserr ("(libev) error creating pipe"); |
806 | syserr ("(libev) error creating pipe"); |
835 | |
807 | |
836 | siginit (EV_A); |
808 | siginit (EV_A); |
837 | } |
809 | } |
838 | |
810 | |
… | |
… | |
881 | int |
853 | int |
882 | #endif |
854 | #endif |
883 | ev_default_loop (int methods) |
855 | ev_default_loop (int methods) |
884 | { |
856 | { |
885 | if (sigpipe [0] == sigpipe [1]) |
857 | if (sigpipe [0] == sigpipe [1]) |
886 | if (ev_pipe (sigpipe)) |
858 | if (pipe (sigpipe)) |
887 | return 0; |
859 | return 0; |
888 | |
860 | |
889 | if (!default_loop) |
861 | if (!default_loop) |
890 | { |
862 | { |
891 | #if EV_MULTIPLICITY |
863 | #if EV_MULTIPLICITY |
… | |
… | |
945 | if (method) |
917 | if (method) |
946 | postfork = 1; |
918 | postfork = 1; |
947 | } |
919 | } |
948 | |
920 | |
949 | /*****************************************************************************/ |
921 | /*****************************************************************************/ |
|
|
922 | |
|
|
923 | static int |
|
|
924 | any_pending (EV_P) |
|
|
925 | { |
|
|
926 | int pri; |
|
|
927 | |
|
|
928 | for (pri = NUMPRI; pri--; ) |
|
|
929 | if (pendingcnt [pri]) |
|
|
930 | return 1; |
|
|
931 | |
|
|
932 | return 0; |
|
|
933 | } |
950 | |
934 | |
951 | static void |
935 | static void |
952 | call_pending (EV_P) |
936 | call_pending (EV_P) |
953 | { |
937 | { |
954 | int pri; |
938 | int pri; |
… | |
… | |
983 | downheap ((WT *)timers, timercnt, 0); |
967 | downheap ((WT *)timers, timercnt, 0); |
984 | } |
968 | } |
985 | else |
969 | else |
986 | ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */ |
970 | ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */ |
987 | |
971 | |
988 | event (EV_A_ (W)w, EV_TIMEOUT); |
972 | ev_feed_event (EV_A_ (W)w, EV_TIMEOUT); |
989 | } |
973 | } |
990 | } |
974 | } |
991 | |
975 | |
992 | static void |
976 | static void |
993 | periodics_reify (EV_P) |
977 | periodics_reify (EV_P) |
… | |
… | |
997 | struct ev_periodic *w = periodics [0]; |
981 | struct ev_periodic *w = periodics [0]; |
998 | |
982 | |
999 | assert (("inactive timer on periodic heap detected", ev_is_active (w))); |
983 | assert (("inactive timer on periodic heap detected", ev_is_active (w))); |
1000 | |
984 | |
1001 | /* first reschedule or stop timer */ |
985 | /* first reschedule or stop timer */ |
|
|
986 | if (w->reschedule_cb) |
|
|
987 | { |
|
|
988 | ev_tstamp at = ((WT)w)->at = w->reschedule_cb (w, rt_now + 0.0001); |
|
|
989 | |
|
|
990 | assert (("ev_periodic reschedule callback returned time in the past", ((WT)w)->at > rt_now)); |
|
|
991 | downheap ((WT *)periodics, periodiccnt, 0); |
|
|
992 | } |
1002 | if (w->interval) |
993 | else if (w->interval) |
1003 | { |
994 | { |
1004 | ((WT)w)->at += floor ((rt_now - ((WT)w)->at) / w->interval + 1.) * w->interval; |
995 | ((WT)w)->at += floor ((rt_now - ((WT)w)->at) / w->interval + 1.) * w->interval; |
1005 | assert (("ev_periodic timeout in the past detected while processing timers, negative interval?", ((WT)w)->at > rt_now)); |
996 | assert (("ev_periodic timeout in the past detected while processing timers, negative interval?", ((WT)w)->at > rt_now)); |
1006 | downheap ((WT *)periodics, periodiccnt, 0); |
997 | downheap ((WT *)periodics, periodiccnt, 0); |
1007 | } |
998 | } |
1008 | else |
999 | else |
1009 | ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */ |
1000 | ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */ |
1010 | |
1001 | |
1011 | event (EV_A_ (W)w, EV_PERIODIC); |
1002 | ev_feed_event (EV_A_ (W)w, EV_PERIODIC); |
1012 | } |
1003 | } |
1013 | } |
1004 | } |
1014 | |
1005 | |
1015 | static void |
1006 | static void |
1016 | periodics_reschedule (EV_P) |
1007 | periodics_reschedule (EV_P) |
… | |
… | |
1020 | /* adjust periodics after time jump */ |
1011 | /* adjust periodics after time jump */ |
1021 | for (i = 0; i < periodiccnt; ++i) |
1012 | for (i = 0; i < periodiccnt; ++i) |
1022 | { |
1013 | { |
1023 | struct ev_periodic *w = periodics [i]; |
1014 | struct ev_periodic *w = periodics [i]; |
1024 | |
1015 | |
|
|
1016 | if (w->reschedule_cb) |
|
|
1017 | ((WT)w)->at = w->reschedule_cb (w, rt_now); |
1025 | if (w->interval) |
1018 | else if (w->interval) |
1026 | { |
|
|
1027 | ev_tstamp diff = ceil ((rt_now - ((WT)w)->at) / w->interval) * w->interval; |
1019 | ((WT)w)->at += ceil ((rt_now - ((WT)w)->at) / w->interval) * w->interval; |
1028 | |
|
|
1029 | if (fabs (diff) >= 1e-4) |
|
|
1030 | { |
|
|
1031 | ev_periodic_stop (EV_A_ w); |
|
|
1032 | ev_periodic_start (EV_A_ w); |
|
|
1033 | |
|
|
1034 | i = 0; /* restart loop, inefficient, but time jumps should be rare */ |
|
|
1035 | } |
|
|
1036 | } |
|
|
1037 | } |
1020 | } |
|
|
1021 | |
|
|
1022 | /* now rebuild the heap */ |
|
|
1023 | for (i = periodiccnt >> 1; i--; ) |
|
|
1024 | downheap ((WT *)periodics, periodiccnt, i); |
1038 | } |
1025 | } |
1039 | |
1026 | |
1040 | inline int |
1027 | inline int |
1041 | time_update_monotonic (EV_P) |
1028 | time_update_monotonic (EV_P) |
1042 | { |
1029 | { |
… | |
… | |
1138 | /* update fd-related kernel structures */ |
1125 | /* update fd-related kernel structures */ |
1139 | fd_reify (EV_A); |
1126 | fd_reify (EV_A); |
1140 | |
1127 | |
1141 | /* calculate blocking time */ |
1128 | /* calculate blocking time */ |
1142 | |
1129 | |
1143 | /* we only need this for !monotonic clockor timers, but as we basically |
1130 | /* we only need this for !monotonic clock or timers, but as we basically |
1144 | always have timers, we just calculate it always */ |
1131 | always have timers, we just calculate it always */ |
1145 | #if EV_USE_MONOTONIC |
1132 | #if EV_USE_MONOTONIC |
1146 | if (expect_true (have_monotonic)) |
1133 | if (expect_true (have_monotonic)) |
1147 | time_update_monotonic (EV_A); |
1134 | time_update_monotonic (EV_A); |
1148 | else |
1135 | else |
… | |
… | |
1181 | /* queue pending timers and reschedule them */ |
1168 | /* queue pending timers and reschedule them */ |
1182 | timers_reify (EV_A); /* relative timers called last */ |
1169 | timers_reify (EV_A); /* relative timers called last */ |
1183 | periodics_reify (EV_A); /* absolute timers called first */ |
1170 | periodics_reify (EV_A); /* absolute timers called first */ |
1184 | |
1171 | |
1185 | /* queue idle watchers unless io or timers are pending */ |
1172 | /* queue idle watchers unless io or timers are pending */ |
1186 | if (!pendingcnt) |
1173 | if (idlecnt && !any_pending (EV_A)) |
1187 | queue_events (EV_A_ (W *)idles, idlecnt, EV_IDLE); |
1174 | queue_events (EV_A_ (W *)idles, idlecnt, EV_IDLE); |
1188 | |
1175 | |
1189 | /* queue check watchers, to be executed first */ |
1176 | /* queue check watchers, to be executed first */ |
1190 | if (checkcnt) |
1177 | if (checkcnt) |
1191 | queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK); |
1178 | queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK); |
… | |
… | |
1266 | return; |
1253 | return; |
1267 | |
1254 | |
1268 | assert (("ev_io_start called with negative fd", fd >= 0)); |
1255 | assert (("ev_io_start called with negative fd", fd >= 0)); |
1269 | |
1256 | |
1270 | ev_start (EV_A_ (W)w, 1); |
1257 | ev_start (EV_A_ (W)w, 1); |
1271 | array_needsize (anfds, anfdmax, fd + 1, anfds_init); |
1258 | array_needsize (ANFD, anfds, anfdmax, fd + 1, anfds_init); |
1272 | wlist_add ((WL *)&anfds[fd].head, (WL)w); |
1259 | wlist_add ((WL *)&anfds[fd].head, (WL)w); |
1273 | |
1260 | |
1274 | fd_change (EV_A_ fd); |
1261 | fd_change (EV_A_ fd); |
1275 | } |
1262 | } |
1276 | |
1263 | |
… | |
… | |
1296 | ((WT)w)->at += mn_now; |
1283 | ((WT)w)->at += mn_now; |
1297 | |
1284 | |
1298 | assert (("ev_timer_start called with negative timer repeat value", w->repeat >= 0.)); |
1285 | assert (("ev_timer_start called with negative timer repeat value", w->repeat >= 0.)); |
1299 | |
1286 | |
1300 | ev_start (EV_A_ (W)w, ++timercnt); |
1287 | ev_start (EV_A_ (W)w, ++timercnt); |
1301 | array_needsize (timers, timermax, timercnt, (void)); |
1288 | array_needsize (struct ev_timer *, timers, timermax, timercnt, (void)); |
1302 | timers [timercnt - 1] = w; |
1289 | timers [timercnt - 1] = w; |
1303 | upheap ((WT *)timers, timercnt - 1); |
1290 | upheap ((WT *)timers, timercnt - 1); |
1304 | |
1291 | |
1305 | assert (("internal timer heap corruption", timers [((W)w)->active - 1] == w)); |
1292 | assert (("internal timer heap corruption", timers [((W)w)->active - 1] == w)); |
1306 | } |
1293 | } |
… | |
… | |
1346 | ev_periodic_start (EV_P_ struct ev_periodic *w) |
1333 | ev_periodic_start (EV_P_ struct ev_periodic *w) |
1347 | { |
1334 | { |
1348 | if (ev_is_active (w)) |
1335 | if (ev_is_active (w)) |
1349 | return; |
1336 | return; |
1350 | |
1337 | |
|
|
1338 | if (w->reschedule_cb) |
|
|
1339 | ((WT)w)->at = w->reschedule_cb (w, rt_now); |
|
|
1340 | else if (w->interval) |
|
|
1341 | { |
1351 | assert (("ev_periodic_start called with negative interval value", w->interval >= 0.)); |
1342 | assert (("ev_periodic_start called with negative interval value", w->interval >= 0.)); |
1352 | |
|
|
1353 | /* this formula differs from the one in periodic_reify because we do not always round up */ |
1343 | /* this formula differs from the one in periodic_reify because we do not always round up */ |
1354 | if (w->interval) |
|
|
1355 | ((WT)w)->at += ceil ((rt_now - ((WT)w)->at) / w->interval) * w->interval; |
1344 | ((WT)w)->at += ceil ((rt_now - ((WT)w)->at) / w->interval) * w->interval; |
|
|
1345 | } |
1356 | |
1346 | |
1357 | ev_start (EV_A_ (W)w, ++periodiccnt); |
1347 | ev_start (EV_A_ (W)w, ++periodiccnt); |
1358 | array_needsize (periodics, periodicmax, periodiccnt, (void)); |
1348 | array_needsize (struct ev_periodic *, periodics, periodicmax, periodiccnt, (void)); |
1359 | periodics [periodiccnt - 1] = w; |
1349 | periodics [periodiccnt - 1] = w; |
1360 | upheap ((WT *)periodics, periodiccnt - 1); |
1350 | upheap ((WT *)periodics, periodiccnt - 1); |
1361 | |
1351 | |
1362 | assert (("internal periodic heap corruption", periodics [((W)w)->active - 1] == w)); |
1352 | assert (("internal periodic heap corruption", periodics [((W)w)->active - 1] == w)); |
1363 | } |
1353 | } |
… | |
… | |
1379 | |
1369 | |
1380 | ev_stop (EV_A_ (W)w); |
1370 | ev_stop (EV_A_ (W)w); |
1381 | } |
1371 | } |
1382 | |
1372 | |
1383 | void |
1373 | void |
|
|
1374 | ev_periodic_again (EV_P_ struct ev_periodic *w) |
|
|
1375 | { |
|
|
1376 | ev_periodic_stop (EV_A_ w); |
|
|
1377 | ev_periodic_start (EV_A_ w); |
|
|
1378 | } |
|
|
1379 | |
|
|
1380 | void |
1384 | ev_idle_start (EV_P_ struct ev_idle *w) |
1381 | ev_idle_start (EV_P_ struct ev_idle *w) |
1385 | { |
1382 | { |
1386 | if (ev_is_active (w)) |
1383 | if (ev_is_active (w)) |
1387 | return; |
1384 | return; |
1388 | |
1385 | |
1389 | ev_start (EV_A_ (W)w, ++idlecnt); |
1386 | ev_start (EV_A_ (W)w, ++idlecnt); |
1390 | array_needsize (idles, idlemax, idlecnt, (void)); |
1387 | array_needsize (struct ev_idle *, idles, idlemax, idlecnt, (void)); |
1391 | idles [idlecnt - 1] = w; |
1388 | idles [idlecnt - 1] = w; |
1392 | } |
1389 | } |
1393 | |
1390 | |
1394 | void |
1391 | void |
1395 | ev_idle_stop (EV_P_ struct ev_idle *w) |
1392 | ev_idle_stop (EV_P_ struct ev_idle *w) |
… | |
… | |
1407 | { |
1404 | { |
1408 | if (ev_is_active (w)) |
1405 | if (ev_is_active (w)) |
1409 | return; |
1406 | return; |
1410 | |
1407 | |
1411 | ev_start (EV_A_ (W)w, ++preparecnt); |
1408 | ev_start (EV_A_ (W)w, ++preparecnt); |
1412 | array_needsize (prepares, preparemax, preparecnt, (void)); |
1409 | array_needsize (struct ev_prepare *, prepares, preparemax, preparecnt, (void)); |
1413 | prepares [preparecnt - 1] = w; |
1410 | prepares [preparecnt - 1] = w; |
1414 | } |
1411 | } |
1415 | |
1412 | |
1416 | void |
1413 | void |
1417 | ev_prepare_stop (EV_P_ struct ev_prepare *w) |
1414 | ev_prepare_stop (EV_P_ struct ev_prepare *w) |
… | |
… | |
1429 | { |
1426 | { |
1430 | if (ev_is_active (w)) |
1427 | if (ev_is_active (w)) |
1431 | return; |
1428 | return; |
1432 | |
1429 | |
1433 | ev_start (EV_A_ (W)w, ++checkcnt); |
1430 | ev_start (EV_A_ (W)w, ++checkcnt); |
1434 | array_needsize (checks, checkmax, checkcnt, (void)); |
1431 | array_needsize (struct ev_check *, checks, checkmax, checkcnt, (void)); |
1435 | checks [checkcnt - 1] = w; |
1432 | checks [checkcnt - 1] = w; |
1436 | } |
1433 | } |
1437 | |
1434 | |
1438 | void |
1435 | void |
1439 | ev_check_stop (EV_P_ struct ev_check *w) |
1436 | ev_check_stop (EV_P_ struct ev_check *w) |
… | |
… | |
1460 | return; |
1457 | return; |
1461 | |
1458 | |
1462 | assert (("ev_signal_start called with illegal signal number", w->signum > 0)); |
1459 | assert (("ev_signal_start called with illegal signal number", w->signum > 0)); |
1463 | |
1460 | |
1464 | ev_start (EV_A_ (W)w, 1); |
1461 | ev_start (EV_A_ (W)w, 1); |
1465 | array_needsize (signals, signalmax, w->signum, signals_init); |
1462 | array_needsize (ANSIG, signals, signalmax, w->signum, signals_init); |
1466 | wlist_add ((WL *)&signals [w->signum - 1].head, (WL)w); |
1463 | wlist_add ((WL *)&signals [w->signum - 1].head, (WL)w); |
1467 | |
1464 | |
1468 | if (!((WL)w)->next) |
1465 | if (!((WL)w)->next) |
1469 | { |
1466 | { |
1470 | #if WIN32 |
1467 | #if WIN32 |
… | |
… | |
1553 | } |
1550 | } |
1554 | |
1551 | |
1555 | void |
1552 | void |
1556 | ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg) |
1553 | ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg) |
1557 | { |
1554 | { |
1558 | struct ev_once *once = ev_malloc (sizeof (struct ev_once)); |
1555 | struct ev_once *once = (struct ev_once *)ev_malloc (sizeof (struct ev_once)); |
1559 | |
1556 | |
1560 | if (!once) |
1557 | if (!once) |
1561 | cb (EV_ERROR | EV_READ | EV_WRITE | EV_TIMEOUT, arg); |
1558 | cb (EV_ERROR | EV_READ | EV_WRITE | EV_TIMEOUT, arg); |
1562 | else |
1559 | else |
1563 | { |
1560 | { |