ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev.c
Revision: 1.58
Committed: Sun Nov 4 16:52:52 2007 UTC (16 years, 6 months ago) by root
Content type: text/plain
Branch: MAIN
Changes since 1.57: +4 -2 lines
Log Message:
*** empty log message ***

File Contents

# Content
1 /*
2 * libev event processing core, watcher management
3 *
4 * Copyright (c) 2007 Marc Alexander Lehmann <libev@schmorp.de>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
9 * met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 *
14 * * Redistributions in binary form must reproduce the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer in the documentation and/or other materials provided
17 * with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31 #ifndef EV_EMBED
32 # include "config.h"
33 #endif
34
35 #include <math.h>
36 #include <stdlib.h>
37 #include <unistd.h>
38 #include <fcntl.h>
39 #include <signal.h>
40 #include <stddef.h>
41
42 #include <stdio.h>
43
44 #include <assert.h>
45 #include <errno.h>
46 #include <sys/types.h>
47 #ifndef WIN32
48 # include <sys/wait.h>
49 #endif
50 #include <sys/time.h>
51 #include <time.h>
52
53 /**/
54
55 #ifndef EV_USE_MONOTONIC
56 # define EV_USE_MONOTONIC 1
57 #endif
58
59 #ifndef EV_USE_SELECT
60 # define EV_USE_SELECT 1
61 #endif
62
63 #ifndef EV_USEV_POLL
64 # define EV_USEV_POLL 0 /* poll is usually slower than select, and not as well tested */
65 #endif
66
67 #ifndef EV_USE_EPOLL
68 # define EV_USE_EPOLL 0
69 #endif
70
71 #ifndef EV_USE_KQUEUE
72 # define EV_USE_KQUEUE 0
73 #endif
74
75 #ifndef EV_USE_REALTIME
76 # define EV_USE_REALTIME 1
77 #endif
78
79 /**/
80
81 #ifndef CLOCK_MONOTONIC
82 # undef EV_USE_MONOTONIC
83 # define EV_USE_MONOTONIC 0
84 #endif
85
86 #ifndef CLOCK_REALTIME
87 # undef EV_USE_REALTIME
88 # define EV_USE_REALTIME 0
89 #endif
90
91 /**/
92
93 #define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
94 #define MAX_BLOCKTIME 59.731 /* never wait longer than this time (to detect time jumps) */
95 #define PID_HASHSIZE 16 /* size of pid hash table, must be power of two */
96 /*#define CLEANUP_INTERVAL 300. /* how often to try to free memory and re-check fds */
97
98 #ifndef EV_EMBED
99 # include "ev.h"
100 #endif
101
102 #if __GNUC__ >= 3
103 # define expect(expr,value) __builtin_expect ((expr),(value))
104 # define inline inline
105 #else
106 # define expect(expr,value) (expr)
107 # define inline static
108 #endif
109
110 #define expect_false(expr) expect ((expr) != 0, 0)
111 #define expect_true(expr) expect ((expr) != 0, 1)
112
113 #define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)
114 #define ABSPRI(w) ((w)->priority - EV_MINPRI)
115
116 typedef struct ev_watcher *W;
117 typedef struct ev_watcher_list *WL;
118 typedef struct ev_watcher_time *WT;
119
120 static int have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work? */
121
122 /*****************************************************************************/
123
124 typedef struct
125 {
126 struct ev_watcher_list *head;
127 unsigned char events;
128 unsigned char reify;
129 } ANFD;
130
131 typedef struct
132 {
133 W w;
134 int events;
135 } ANPENDING;
136
137 #if EV_MULTIPLICITY
138
139 struct ev_loop
140 {
141 # define VAR(name,decl) decl;
142 # include "ev_vars.h"
143 };
144 # undef VAR
145 # include "ev_wrap.h"
146
147 #else
148
149 # define VAR(name,decl) static decl;
150 # include "ev_vars.h"
151 # undef VAR
152
153 #endif
154
155 /*****************************************************************************/
156
157 inline ev_tstamp
158 ev_time (void)
159 {
160 #if EV_USE_REALTIME
161 struct timespec ts;
162 clock_gettime (CLOCK_REALTIME, &ts);
163 return ts.tv_sec + ts.tv_nsec * 1e-9;
164 #else
165 struct timeval tv;
166 gettimeofday (&tv, 0);
167 return tv.tv_sec + tv.tv_usec * 1e-6;
168 #endif
169 }
170
171 inline ev_tstamp
172 get_clock (void)
173 {
174 #if EV_USE_MONOTONIC
175 if (expect_true (have_monotonic))
176 {
177 struct timespec ts;
178 clock_gettime (CLOCK_MONOTONIC, &ts);
179 return ts.tv_sec + ts.tv_nsec * 1e-9;
180 }
181 #endif
182
183 return ev_time ();
184 }
185
186 ev_tstamp
187 ev_now (EV_P)
188 {
189 return rt_now;
190 }
191
192 #define array_roundsize(base,n) ((n) | 4 & ~3)
193
194 #define array_needsize(base,cur,cnt,init) \
195 if (expect_false ((cnt) > cur)) \
196 { \
197 int newcnt = cur; \
198 do \
199 { \
200 newcnt = array_roundsize (base, newcnt << 1); \
201 } \
202 while ((cnt) > newcnt); \
203 \
204 base = realloc (base, sizeof (*base) * (newcnt)); \
205 init (base + cur, newcnt - cur); \
206 cur = newcnt; \
207 }
208
209 /*****************************************************************************/
210
211 static void
212 anfds_init (ANFD *base, int count)
213 {
214 while (count--)
215 {
216 base->head = 0;
217 base->events = EV_NONE;
218 base->reify = 0;
219
220 ++base;
221 }
222 }
223
224 static void
225 event (EV_P_ W w, int events)
226 {
227 if (w->pending)
228 {
229 pendings [ABSPRI (w)][w->pending - 1].events |= events;
230 return;
231 }
232
233 w->pending = ++pendingcnt [ABSPRI (w)];
234 array_needsize (pendings [ABSPRI (w)], pendingmax [ABSPRI (w)], pendingcnt [ABSPRI (w)], );
235 pendings [ABSPRI (w)][w->pending - 1].w = w;
236 pendings [ABSPRI (w)][w->pending - 1].events = events;
237 }
238
239 static void
240 queue_events (EV_P_ W *events, int eventcnt, int type)
241 {
242 int i;
243
244 for (i = 0; i < eventcnt; ++i)
245 event (EV_A_ events [i], type);
246 }
247
248 static void
249 fd_event (EV_P_ int fd, int events)
250 {
251 ANFD *anfd = anfds + fd;
252 struct ev_io *w;
253
254 for (w = (struct ev_io *)anfd->head; w; w = (struct ev_io *)((WL)w)->next)
255 {
256 int ev = w->events & events;
257
258 if (ev)
259 event (EV_A_ (W)w, ev);
260 }
261 }
262
263 /*****************************************************************************/
264
265 static void
266 fd_reify (EV_P)
267 {
268 int i;
269
270 for (i = 0; i < fdchangecnt; ++i)
271 {
272 int fd = fdchanges [i];
273 ANFD *anfd = anfds + fd;
274 struct ev_io *w;
275
276 int events = 0;
277
278 for (w = (struct ev_io *)anfd->head; w; w = (struct ev_io *)((WL)w)->next)
279 events |= w->events;
280
281 anfd->reify = 0;
282
283 if (anfd->events != events)
284 {
285 method_modify (EV_A_ fd, anfd->events, events);
286 anfd->events = events;
287 }
288 }
289
290 fdchangecnt = 0;
291 }
292
293 static void
294 fd_change (EV_P_ int fd)
295 {
296 if (anfds [fd].reify || fdchangecnt < 0)
297 return;
298
299 anfds [fd].reify = 1;
300
301 ++fdchangecnt;
302 array_needsize (fdchanges, fdchangemax, fdchangecnt, );
303 fdchanges [fdchangecnt - 1] = fd;
304 }
305
306 static void
307 fd_kill (EV_P_ int fd)
308 {
309 struct ev_io *w;
310
311 while ((w = (struct ev_io *)anfds [fd].head))
312 {
313 ev_io_stop (EV_A_ w);
314 event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE);
315 }
316 }
317
318 /* called on EBADF to verify fds */
319 static void
320 fd_ebadf (EV_P)
321 {
322 int fd;
323
324 for (fd = 0; fd < anfdmax; ++fd)
325 if (anfds [fd].events)
326 if (fcntl (fd, F_GETFD) == -1 && errno == EBADF)
327 fd_kill (EV_A_ fd);
328 }
329
330 /* called on ENOMEM in select/poll to kill some fds and retry */
331 static void
332 fd_enomem (EV_P)
333 {
334 int fd = anfdmax;
335
336 while (fd--)
337 if (anfds [fd].events)
338 {
339 close (fd);
340 fd_kill (EV_A_ fd);
341 return;
342 }
343 }
344
345 /* susually called after fork if method needs to re-arm all fds from scratch */
346 static void
347 fd_rearm_all (EV_P)
348 {
349 int fd;
350
351 /* this should be highly optimised to not do anything but set a flag */
352 for (fd = 0; fd < anfdmax; ++fd)
353 if (anfds [fd].events)
354 {
355 anfds [fd].events = 0;
356 fd_change (fd);
357 }
358 }
359
360 /*****************************************************************************/
361
362 static void
363 upheap (WT *heap, int k)
364 {
365 WT w = heap [k];
366
367 while (k && heap [k >> 1]->at > w->at)
368 {
369 heap [k] = heap [k >> 1];
370 heap [k]->active = k + 1;
371 k >>= 1;
372 }
373
374 heap [k] = w;
375 heap [k]->active = k + 1;
376
377 }
378
379 static void
380 downheap (WT *heap, int N, int k)
381 {
382 WT w = heap [k];
383
384 while (k < (N >> 1))
385 {
386 int j = k << 1;
387
388 if (j + 1 < N && heap [j]->at > heap [j + 1]->at)
389 ++j;
390
391 if (w->at <= heap [j]->at)
392 break;
393
394 heap [k] = heap [j];
395 heap [k]->active = k + 1;
396 k = j;
397 }
398
399 heap [k] = w;
400 heap [k]->active = k + 1;
401 }
402
403 /*****************************************************************************/
404
405 typedef struct
406 {
407 struct ev_watcher_list *head;
408 sig_atomic_t volatile gotsig;
409 } ANSIG;
410
411 static ANSIG *signals;
412 static int signalmax;
413
414 static int sigpipe [2];
415 static sig_atomic_t volatile gotsig;
416
417 static void
418 signals_init (ANSIG *base, int count)
419 {
420 while (count--)
421 {
422 base->head = 0;
423 base->gotsig = 0;
424
425 ++base;
426 }
427 }
428
429 static void
430 sighandler (int signum)
431 {
432 signals [signum - 1].gotsig = 1;
433
434 if (!gotsig)
435 {
436 int old_errno = errno;
437 gotsig = 1;
438 write (sigpipe [1], &signum, 1);
439 errno = old_errno;
440 }
441 }
442
443 static void
444 sigcb (EV_P_ struct ev_io *iow, int revents)
445 {
446 struct ev_watcher_list *w;
447 int signum;
448
449 read (sigpipe [0], &revents, 1);
450 gotsig = 0;
451
452 for (signum = signalmax; signum--; )
453 if (signals [signum].gotsig)
454 {
455 signals [signum].gotsig = 0;
456
457 for (w = signals [signum].head; w; w = w->next)
458 event (EV_A_ (W)w, EV_SIGNAL);
459 }
460 }
461
462 static void
463 siginit (EV_P)
464 {
465 #ifndef WIN32
466 fcntl (sigpipe [0], F_SETFD, FD_CLOEXEC);
467 fcntl (sigpipe [1], F_SETFD, FD_CLOEXEC);
468
469 /* rather than sort out wether we really need nb, set it */
470 fcntl (sigpipe [0], F_SETFL, O_NONBLOCK);
471 fcntl (sigpipe [1], F_SETFL, O_NONBLOCK);
472 #endif
473
474 ev_io_set (&sigev, sigpipe [0], EV_READ);
475 ev_io_start (EV_A_ &sigev);
476 ev_unref (EV_A); /* child watcher should not keep loop alive */
477 }
478
479 /*****************************************************************************/
480
481 #ifndef WIN32
482
483 #ifndef WCONTINUED
484 # define WCONTINUED 0
485 #endif
486
487 static void
488 child_reap (EV_P_ struct ev_signal *sw, int chain, int pid, int status)
489 {
490 struct ev_child *w;
491
492 for (w = (struct ev_child *)childs [chain & (PID_HASHSIZE - 1)]; w; w = (struct ev_child *)((WL)w)->next)
493 if (w->pid == pid || !w->pid)
494 {
495 w->priority = sw->priority; /* need to do it *now* */
496 w->rpid = pid;
497 w->rstatus = status;
498 event (EV_A_ (W)w, EV_CHILD);
499 }
500 }
501
502 static void
503 childcb (EV_P_ struct ev_signal *sw, int revents)
504 {
505 int pid, status;
506
507 if (0 < (pid = waitpid (-1, &status, WNOHANG | WUNTRACED | WCONTINUED)))
508 {
509 /* make sure we are called again until all childs have been reaped */
510 event (EV_A_ (W)sw, EV_SIGNAL);
511
512 child_reap (EV_A_ sw, pid, pid, status);
513 child_reap (EV_A_ sw, 0, pid, status); /* this might trigger a watcher twice, but event catches that */
514 }
515 }
516
517 #endif
518
519 /*****************************************************************************/
520
521 #if EV_USE_KQUEUE
522 # include "ev_kqueue.c"
523 #endif
524 #if EV_USE_EPOLL
525 # include "ev_epoll.c"
526 #endif
527 #if EV_USEV_POLL
528 # include "ev_poll.c"
529 #endif
530 #if EV_USE_SELECT
531 # include "ev_select.c"
532 #endif
533
534 int
535 ev_version_major (void)
536 {
537 return EV_VERSION_MAJOR;
538 }
539
540 int
541 ev_version_minor (void)
542 {
543 return EV_VERSION_MINOR;
544 }
545
546 /* return true if we are running with elevated privileges and should ignore env variables */
547 static int
548 enable_secure (void)
549 {
550 #ifdef WIN32
551 return 0;
552 #else
553 return getuid () != geteuid ()
554 || getgid () != getegid ();
555 #endif
556 }
557
558 int
559 ev_method (EV_P)
560 {
561 return method;
562 }
563
564 static void
565 loop_init (EV_P_ int methods)
566 {
567 if (!method)
568 {
569 #if EV_USE_MONOTONIC
570 {
571 struct timespec ts;
572 if (!clock_gettime (CLOCK_MONOTONIC, &ts))
573 have_monotonic = 1;
574 }
575 #endif
576
577 rt_now = ev_time ();
578 mn_now = get_clock ();
579 now_floor = mn_now;
580 rtmn_diff = rt_now - mn_now;
581
582 if (methods == EVMETHOD_AUTO)
583 if (!enable_secure () && getenv ("LIBEV_METHODS"))
584 methods = atoi (getenv ("LIBEV_METHODS"));
585 else
586 methods = EVMETHOD_ANY;
587
588 method = 0;
589 #if EV_USE_KQUEUE
590 if (!method && (methods & EVMETHOD_KQUEUE)) method = kqueue_init (EV_A_ methods);
591 #endif
592 #if EV_USE_EPOLL
593 if (!method && (methods & EVMETHOD_EPOLL )) method = epoll_init (EV_A_ methods);
594 #endif
595 #if EV_USEV_POLL
596 if (!method && (methods & EVMETHOD_POLL )) method = poll_init (EV_A_ methods);
597 #endif
598 #if EV_USE_SELECT
599 if (!method && (methods & EVMETHOD_SELECT)) method = select_init (EV_A_ methods);
600 #endif
601 }
602 }
603
604 void
605 loop_destroy (EV_P)
606 {
607 #if EV_USE_KQUEUE
608 if (method == EVMETHOD_KQUEUE) kqueue_destroy (EV_A);
609 #endif
610 #if EV_USE_EPOLL
611 if (method == EVMETHOD_EPOLL ) epoll_destroy (EV_A);
612 #endif
613 #if EV_USEV_POLL
614 if (method == EVMETHOD_POLL ) poll_destroy (EV_A);
615 #endif
616 #if EV_USE_SELECT
617 if (method == EVMETHOD_SELECT) select_destroy (EV_A);
618 #endif
619
620 method = 0;
621 /*TODO*/
622 }
623
624 void
625 loop_fork (EV_P)
626 {
627 /*TODO*/
628 #if EV_USE_EPOLL
629 if (method == EVMETHOD_EPOLL ) epoll_fork (EV_A);
630 #endif
631 #if EV_USE_KQUEUE
632 if (method == EVMETHOD_KQUEUE) kqueue_fork (EV_A);
633 #endif
634 }
635
636 #if EV_MULTIPLICITY
637 struct ev_loop *
638 ev_loop_new (int methods)
639 {
640 struct ev_loop *loop = (struct ev_loop *)calloc (1, sizeof (struct ev_loop));
641
642 loop_init (EV_A_ methods);
643
644 if (ev_methods (EV_A))
645 return loop;
646
647 return 0;
648 }
649
650 void
651 ev_loop_destroy (EV_P)
652 {
653 loop_destroy (EV_A);
654 free (loop);
655 }
656
657 void
658 ev_loop_fork (EV_P)
659 {
660 loop_fork (EV_A);
661 }
662
663 #endif
664
665 #if EV_MULTIPLICITY
666 struct ev_loop default_loop_struct;
667 static struct ev_loop *default_loop;
668
669 struct ev_loop *
670 #else
671 static int default_loop;
672
673 int
674 #endif
675 ev_default_loop (int methods)
676 {
677 if (sigpipe [0] == sigpipe [1])
678 if (pipe (sigpipe))
679 return 0;
680
681 if (!default_loop)
682 {
683 #if EV_MULTIPLICITY
684 struct ev_loop *loop = default_loop = &default_loop_struct;
685 #else
686 default_loop = 1;
687 #endif
688
689 loop_init (EV_A_ methods);
690
691 if (ev_method (EV_A))
692 {
693 ev_watcher_init (&sigev, sigcb);
694 ev_set_priority (&sigev, EV_MAXPRI);
695 siginit (EV_A);
696
697 #ifndef WIN32
698 ev_signal_init (&childev, childcb, SIGCHLD);
699 ev_set_priority (&childev, EV_MAXPRI);
700 ev_signal_start (EV_A_ &childev);
701 ev_unref (EV_A); /* child watcher should not keep loop alive */
702 #endif
703 }
704 else
705 default_loop = 0;
706 }
707
708 return default_loop;
709 }
710
711 void
712 ev_default_destroy (void)
713 {
714 #if EV_MULTIPLICITY
715 struct ev_loop *loop = default_loop;
716 #endif
717
718 ev_ref (EV_A); /* child watcher */
719 ev_signal_stop (EV_A_ &childev);
720
721 ev_ref (EV_A); /* signal watcher */
722 ev_io_stop (EV_A_ &sigev);
723
724 close (sigpipe [0]); sigpipe [0] = 0;
725 close (sigpipe [1]); sigpipe [1] = 0;
726
727 loop_destroy (EV_A);
728 }
729
730 void
731 ev_default_fork (EV_P)
732 {
733 loop_fork (EV_A);
734
735 ev_io_stop (EV_A_ &sigev);
736 close (sigpipe [0]);
737 close (sigpipe [1]);
738 pipe (sigpipe);
739
740 ev_ref (EV_A); /* signal watcher */
741 siginit (EV_A);
742 }
743
744 /*****************************************************************************/
745
746 static void
747 call_pending (EV_P)
748 {
749 int pri;
750
751 for (pri = NUMPRI; pri--; )
752 while (pendingcnt [pri])
753 {
754 ANPENDING *p = pendings [pri] + --pendingcnt [pri];
755
756 if (p->w)
757 {
758 p->w->pending = 0;
759 p->w->cb (EV_A_ p->w, p->events);
760 }
761 }
762 }
763
764 static void
765 timers_reify (EV_P)
766 {
767 while (timercnt && timers [0]->at <= mn_now)
768 {
769 struct ev_timer *w = timers [0];
770
771 /* first reschedule or stop timer */
772 if (w->repeat)
773 {
774 assert (("negative ev_timer repeat value found while processing timers", w->repeat > 0.));
775 w->at = mn_now + w->repeat;
776 downheap ((WT *)timers, timercnt, 0);
777 }
778 else
779 ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */
780
781 event (EV_A_ (W)w, EV_TIMEOUT);
782 }
783 }
784
785 static void
786 periodics_reify (EV_P)
787 {
788 while (periodiccnt && periodics [0]->at <= rt_now)
789 {
790 struct ev_periodic *w = periodics [0];
791
792 /* first reschedule or stop timer */
793 if (w->interval)
794 {
795 w->at += floor ((rt_now - w->at) / w->interval + 1.) * w->interval;
796 assert (("ev_periodic timeout in the past detected while processing timers, negative interval?", w->at > rt_now));
797 downheap ((WT *)periodics, periodiccnt, 0);
798 }
799 else
800 ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */
801
802 event (EV_A_ (W)w, EV_PERIODIC);
803 }
804 }
805
806 static void
807 periodics_reschedule (EV_P)
808 {
809 int i;
810
811 /* adjust periodics after time jump */
812 for (i = 0; i < periodiccnt; ++i)
813 {
814 struct ev_periodic *w = periodics [i];
815
816 if (w->interval)
817 {
818 ev_tstamp diff = ceil ((rt_now - w->at) / w->interval) * w->interval;
819
820 if (fabs (diff) >= 1e-4)
821 {
822 ev_periodic_stop (EV_A_ w);
823 ev_periodic_start (EV_A_ w);
824
825 i = 0; /* restart loop, inefficient, but time jumps should be rare */
826 }
827 }
828 }
829 }
830
831 inline int
832 time_update_monotonic (EV_P)
833 {
834 mn_now = get_clock ();
835
836 if (expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5))
837 {
838 rt_now = rtmn_diff + mn_now;
839 return 0;
840 }
841 else
842 {
843 now_floor = mn_now;
844 rt_now = ev_time ();
845 return 1;
846 }
847 }
848
849 static void
850 time_update (EV_P)
851 {
852 int i;
853
854 #if EV_USE_MONOTONIC
855 if (expect_true (have_monotonic))
856 {
857 if (time_update_monotonic (EV_A))
858 {
859 ev_tstamp odiff = rtmn_diff;
860
861 for (i = 4; --i; ) /* loop a few times, before making important decisions */
862 {
863 rtmn_diff = rt_now - mn_now;
864
865 if (fabs (odiff - rtmn_diff) < MIN_TIMEJUMP)
866 return; /* all is well */
867
868 rt_now = ev_time ();
869 mn_now = get_clock ();
870 now_floor = mn_now;
871 }
872
873 periodics_reschedule (EV_A);
874 /* no timer adjustment, as the monotonic clock doesn't jump */
875 /* timers_reschedule (EV_A_ rtmn_diff - odiff) */
876 }
877 }
878 else
879 #endif
880 {
881 rt_now = ev_time ();
882
883 if (expect_false (mn_now > rt_now || mn_now < rt_now - MAX_BLOCKTIME - MIN_TIMEJUMP))
884 {
885 periodics_reschedule (EV_A);
886
887 /* adjust timers. this is easy, as the offset is the same for all */
888 for (i = 0; i < timercnt; ++i)
889 timers [i]->at += rt_now - mn_now;
890 }
891
892 mn_now = rt_now;
893 }
894 }
895
896 void
897 ev_ref (EV_P)
898 {
899 ++activecnt;
900 }
901
902 void
903 ev_unref (EV_P)
904 {
905 --activecnt;
906 }
907
908 static int loop_done;
909
910 void
911 ev_loop (EV_P_ int flags)
912 {
913 double block;
914 loop_done = flags & (EVLOOP_ONESHOT | EVLOOP_NONBLOCK) ? 1 : 0;
915
916 do
917 {
918 /* queue check watchers (and execute them) */
919 if (expect_false (preparecnt))
920 {
921 queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE);
922 call_pending (EV_A);
923 }
924
925 /* update fd-related kernel structures */
926 fd_reify (EV_A);
927
928 /* calculate blocking time */
929
930 /* we only need this for !monotonic clockor timers, but as we basically
931 always have timers, we just calculate it always */
932 #if EV_USE_MONOTONIC
933 if (expect_true (have_monotonic))
934 time_update_monotonic (EV_A);
935 else
936 #endif
937 {
938 rt_now = ev_time ();
939 mn_now = rt_now;
940 }
941
942 if (flags & EVLOOP_NONBLOCK || idlecnt)
943 block = 0.;
944 else
945 {
946 block = MAX_BLOCKTIME;
947
948 if (timercnt)
949 {
950 ev_tstamp to = timers [0]->at - mn_now + method_fudge;
951 if (block > to) block = to;
952 }
953
954 if (periodiccnt)
955 {
956 ev_tstamp to = periodics [0]->at - rt_now + method_fudge;
957 if (block > to) block = to;
958 }
959
960 if (block < 0.) block = 0.;
961 }
962
963 method_poll (EV_A_ block);
964
965 /* update rt_now, do magic */
966 time_update (EV_A);
967
968 /* queue pending timers and reschedule them */
969 timers_reify (EV_A); /* relative timers called last */
970 periodics_reify (EV_A); /* absolute timers called first */
971
972 /* queue idle watchers unless io or timers are pending */
973 if (!pendingcnt)
974 queue_events (EV_A_ (W *)idles, idlecnt, EV_IDLE);
975
976 /* queue check watchers, to be executed first */
977 if (checkcnt)
978 queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK);
979
980 call_pending (EV_A);
981 }
982 while (activecnt && !loop_done);
983
984 if (loop_done != 2)
985 loop_done = 0;
986 }
987
988 void
989 ev_unloop (EV_P_ int how)
990 {
991 loop_done = how;
992 }
993
994 /*****************************************************************************/
995
996 inline void
997 wlist_add (WL *head, WL elem)
998 {
999 elem->next = *head;
1000 *head = elem;
1001 }
1002
1003 inline void
1004 wlist_del (WL *head, WL elem)
1005 {
1006 while (*head)
1007 {
1008 if (*head == elem)
1009 {
1010 *head = elem->next;
1011 return;
1012 }
1013
1014 head = &(*head)->next;
1015 }
1016 }
1017
1018 inline void
1019 ev_clear_pending (EV_P_ W w)
1020 {
1021 if (w->pending)
1022 {
1023 pendings [ABSPRI (w)][w->pending - 1].w = 0;
1024 w->pending = 0;
1025 }
1026 }
1027
1028 inline void
1029 ev_start (EV_P_ W w, int active)
1030 {
1031 if (w->priority < EV_MINPRI) w->priority = EV_MINPRI;
1032 if (w->priority > EV_MAXPRI) w->priority = EV_MAXPRI;
1033
1034 w->active = active;
1035 ev_ref (EV_A);
1036 }
1037
1038 inline void
1039 ev_stop (EV_P_ W w)
1040 {
1041 ev_unref (EV_A);
1042 w->active = 0;
1043 }
1044
1045 /*****************************************************************************/
1046
1047 void
1048 ev_io_start (EV_P_ struct ev_io *w)
1049 {
1050 int fd = w->fd;
1051
1052 if (ev_is_active (w))
1053 return;
1054
1055 assert (("ev_io_start called with negative fd", fd >= 0));
1056
1057 ev_start (EV_A_ (W)w, 1);
1058 array_needsize (anfds, anfdmax, fd + 1, anfds_init);
1059 wlist_add ((WL *)&anfds[fd].head, (WL)w);
1060
1061 fd_change (EV_A_ fd);
1062 }
1063
1064 void
1065 ev_io_stop (EV_P_ struct ev_io *w)
1066 {
1067 ev_clear_pending (EV_A_ (W)w);
1068 if (!ev_is_active (w))
1069 return;
1070
1071 wlist_del ((WL *)&anfds[w->fd].head, (WL)w);
1072 ev_stop (EV_A_ (W)w);
1073
1074 fd_change (EV_A_ w->fd);
1075 }
1076
1077 void
1078 ev_timer_start (EV_P_ struct ev_timer *w)
1079 {
1080 if (ev_is_active (w))
1081 return;
1082
1083 w->at += mn_now;
1084
1085 assert (("ev_timer_start called with negative timer repeat value", w->repeat >= 0.));
1086
1087 ev_start (EV_A_ (W)w, ++timercnt);
1088 array_needsize (timers, timermax, timercnt, );
1089 timers [timercnt - 1] = w;
1090 upheap ((WT *)timers, timercnt - 1);
1091 }
1092
1093 void
1094 ev_timer_stop (EV_P_ struct ev_timer *w)
1095 {
1096 ev_clear_pending (EV_A_ (W)w);
1097 if (!ev_is_active (w))
1098 return;
1099
1100 if (w->active < timercnt--)
1101 {
1102 timers [w->active - 1] = timers [timercnt];
1103 downheap ((WT *)timers, timercnt, w->active - 1);
1104 }
1105
1106 w->at = w->repeat;
1107
1108 ev_stop (EV_A_ (W)w);
1109 }
1110
1111 void
1112 ev_timer_again (EV_P_ struct ev_timer *w)
1113 {
1114 if (ev_is_active (w))
1115 {
1116 if (w->repeat)
1117 {
1118 w->at = mn_now + w->repeat;
1119 downheap ((WT *)timers, timercnt, w->active - 1);
1120 }
1121 else
1122 ev_timer_stop (EV_A_ w);
1123 }
1124 else if (w->repeat)
1125 ev_timer_start (EV_A_ w);
1126 }
1127
1128 void
1129 ev_periodic_start (EV_P_ struct ev_periodic *w)
1130 {
1131 if (ev_is_active (w))
1132 return;
1133
1134 assert (("ev_periodic_start called with negative interval value", w->interval >= 0.));
1135
1136 /* this formula differs from the one in periodic_reify because we do not always round up */
1137 if (w->interval)
1138 w->at += ceil ((rt_now - w->at) / w->interval) * w->interval;
1139
1140 ev_start (EV_A_ (W)w, ++periodiccnt);
1141 array_needsize (periodics, periodicmax, periodiccnt, );
1142 periodics [periodiccnt - 1] = w;
1143 upheap ((WT *)periodics, periodiccnt - 1);
1144 }
1145
1146 void
1147 ev_periodic_stop (EV_P_ struct ev_periodic *w)
1148 {
1149 ev_clear_pending (EV_A_ (W)w);
1150 if (!ev_is_active (w))
1151 return;
1152
1153 if (w->active < periodiccnt--)
1154 {
1155 periodics [w->active - 1] = periodics [periodiccnt];
1156 downheap ((WT *)periodics, periodiccnt, w->active - 1);
1157 }
1158
1159 ev_stop (EV_A_ (W)w);
1160 }
1161
1162 void
1163 ev_idle_start (EV_P_ struct ev_idle *w)
1164 {
1165 if (ev_is_active (w))
1166 return;
1167
1168 ev_start (EV_A_ (W)w, ++idlecnt);
1169 array_needsize (idles, idlemax, idlecnt, );
1170 idles [idlecnt - 1] = w;
1171 }
1172
1173 void
1174 ev_idle_stop (EV_P_ struct ev_idle *w)
1175 {
1176 ev_clear_pending (EV_A_ (W)w);
1177 if (ev_is_active (w))
1178 return;
1179
1180 idles [w->active - 1] = idles [--idlecnt];
1181 ev_stop (EV_A_ (W)w);
1182 }
1183
1184 void
1185 ev_prepare_start (EV_P_ struct ev_prepare *w)
1186 {
1187 if (ev_is_active (w))
1188 return;
1189
1190 ev_start (EV_A_ (W)w, ++preparecnt);
1191 array_needsize (prepares, preparemax, preparecnt, );
1192 prepares [preparecnt - 1] = w;
1193 }
1194
1195 void
1196 ev_prepare_stop (EV_P_ struct ev_prepare *w)
1197 {
1198 ev_clear_pending (EV_A_ (W)w);
1199 if (ev_is_active (w))
1200 return;
1201
1202 prepares [w->active - 1] = prepares [--preparecnt];
1203 ev_stop (EV_A_ (W)w);
1204 }
1205
1206 void
1207 ev_check_start (EV_P_ struct ev_check *w)
1208 {
1209 if (ev_is_active (w))
1210 return;
1211
1212 ev_start (EV_A_ (W)w, ++checkcnt);
1213 array_needsize (checks, checkmax, checkcnt, );
1214 checks [checkcnt - 1] = w;
1215 }
1216
1217 void
1218 ev_check_stop (EV_P_ struct ev_check *w)
1219 {
1220 ev_clear_pending (EV_A_ (W)w);
1221 if (ev_is_active (w))
1222 return;
1223
1224 checks [w->active - 1] = checks [--checkcnt];
1225 ev_stop (EV_A_ (W)w);
1226 }
1227
1228 #ifndef SA_RESTART
1229 # define SA_RESTART 0
1230 #endif
1231
1232 void
1233 ev_signal_start (EV_P_ struct ev_signal *w)
1234 {
1235 #if EV_MULTIPLICITY
1236 assert (("signal watchers are only supported in the default loop", loop == default_loop));
1237 #endif
1238 if (ev_is_active (w))
1239 return;
1240
1241 assert (("ev_signal_start called with illegal signal number", w->signum > 0));
1242
1243 ev_start (EV_A_ (W)w, 1);
1244 array_needsize (signals, signalmax, w->signum, signals_init);
1245 wlist_add ((WL *)&signals [w->signum - 1].head, (WL)w);
1246
1247 if (!w->next)
1248 {
1249 struct sigaction sa;
1250 sa.sa_handler = sighandler;
1251 sigfillset (&sa.sa_mask);
1252 sa.sa_flags = SA_RESTART; /* if restarting works we save one iteration */
1253 sigaction (w->signum, &sa, 0);
1254 }
1255 }
1256
1257 void
1258 ev_signal_stop (EV_P_ struct ev_signal *w)
1259 {
1260 ev_clear_pending (EV_A_ (W)w);
1261 if (!ev_is_active (w))
1262 return;
1263
1264 wlist_del ((WL *)&signals [w->signum - 1].head, (WL)w);
1265 ev_stop (EV_A_ (W)w);
1266
1267 if (!signals [w->signum - 1].head)
1268 signal (w->signum, SIG_DFL);
1269 }
1270
1271 void
1272 ev_child_start (EV_P_ struct ev_child *w)
1273 {
1274 #if EV_MULTIPLICITY
1275 assert (("child watchers are only supported in the default loop", loop == default_loop));
1276 #endif
1277 if (ev_is_active (w))
1278 return;
1279
1280 ev_start (EV_A_ (W)w, 1);
1281 wlist_add ((WL *)&childs [w->pid & (PID_HASHSIZE - 1)], (WL)w);
1282 }
1283
1284 void
1285 ev_child_stop (EV_P_ struct ev_child *w)
1286 {
1287 ev_clear_pending (EV_A_ (W)w);
1288 if (ev_is_active (w))
1289 return;
1290
1291 wlist_del ((WL *)&childs [w->pid & (PID_HASHSIZE - 1)], (WL)w);
1292 ev_stop (EV_A_ (W)w);
1293 }
1294
1295 /*****************************************************************************/
1296
1297 struct ev_once
1298 {
1299 struct ev_io io;
1300 struct ev_timer to;
1301 void (*cb)(int revents, void *arg);
1302 void *arg;
1303 };
1304
1305 static void
1306 once_cb (EV_P_ struct ev_once *once, int revents)
1307 {
1308 void (*cb)(int revents, void *arg) = once->cb;
1309 void *arg = once->arg;
1310
1311 ev_io_stop (EV_A_ &once->io);
1312 ev_timer_stop (EV_A_ &once->to);
1313 free (once);
1314
1315 cb (revents, arg);
1316 }
1317
1318 static void
1319 once_cb_io (EV_P_ struct ev_io *w, int revents)
1320 {
1321 once_cb (EV_A_ (struct ev_once *)(((char *)w) - offsetof (struct ev_once, io)), revents);
1322 }
1323
1324 static void
1325 once_cb_to (EV_P_ struct ev_timer *w, int revents)
1326 {
1327 once_cb (EV_A_ (struct ev_once *)(((char *)w) - offsetof (struct ev_once, to)), revents);
1328 }
1329
1330 void
1331 ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg)
1332 {
1333 struct ev_once *once = malloc (sizeof (struct ev_once));
1334
1335 if (!once)
1336 cb (EV_ERROR | EV_READ | EV_WRITE | EV_TIMEOUT, arg);
1337 else
1338 {
1339 once->cb = cb;
1340 once->arg = arg;
1341
1342 ev_watcher_init (&once->io, once_cb_io);
1343 if (fd >= 0)
1344 {
1345 ev_io_set (&once->io, fd, events);
1346 ev_io_start (EV_A_ &once->io);
1347 }
1348
1349 ev_watcher_init (&once->to, once_cb_to);
1350 if (timeout >= 0.)
1351 {
1352 ev_timer_set (&once->to, timeout, 0.);
1353 ev_timer_start (EV_A_ &once->to);
1354 }
1355 }
1356 }
1357