ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev.c
Revision: 1.102
Committed: Sun Nov 11 17:56:11 2007 UTC (16 years, 6 months ago) by root
Content type: text/plain
Branch: MAIN
Changes since 1.101: +1 -1 lines
Log Message:
*** empty log message ***

File Contents

# Content
1 /*
2 * libev event processing core, watcher management
3 *
4 * Copyright (c) 2007 Marc Alexander Lehmann <libev@schmorp.de>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
9 * met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 *
14 * * Redistributions in binary form must reproduce the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer in the documentation and/or other materials provided
17 * with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifdef __cplusplus
33 extern "C" {
34 #endif
35
36 #ifndef EV_STANDALONE
37 # include "config.h"
38
39 # if HAVE_CLOCK_GETTIME
40 # ifndef EV_USE_MONOTONIC
41 # define EV_USE_MONOTONIC 1
42 # endif
43 # ifndef EV_USE_REALTIME
44 # define EV_USE_REALTIME 1
45 # endif
46 # endif
47
48 # if HAVE_SELECT && HAVE_SYS_SELECT_H && !defined (EV_USE_SELECT)
49 # define EV_USE_SELECT 1
50 # endif
51
52 # if HAVE_POLL && HAVE_POLL_H && !defined (EV_USE_POLL)
53 # define EV_USE_POLL 1
54 # endif
55
56 # if HAVE_EPOLL_CTL && HAVE_SYS_EPOLL_H && !defined (EV_USE_EPOLL)
57 # define EV_USE_EPOLL 1
58 # endif
59
60 # if HAVE_KQUEUE && HAVE_SYS_EVENT_H && HAVE_SYS_QUEUE_H && !defined (EV_USE_KQUEUE)
61 # define EV_USE_KQUEUE 1
62 # endif
63
64 #endif
65
66 #include <math.h>
67 #include <stdlib.h>
68 #include <fcntl.h>
69 #include <stddef.h>
70
71 #include <stdio.h>
72
73 #include <assert.h>
74 #include <errno.h>
75 #include <sys/types.h>
76 #include <time.h>
77
78 #include <signal.h>
79
80 #ifndef WIN32
81 # include <unistd.h>
82 # include <sys/time.h>
83 # include <sys/wait.h>
84 #endif
85 /**/
86
87 #ifndef EV_USE_MONOTONIC
88 # define EV_USE_MONOTONIC 1
89 #endif
90
91 #ifndef EV_USE_SELECT
92 # define EV_USE_SELECT 1
93 #endif
94
95 #ifndef EV_USE_POLL
96 # define EV_USE_POLL 0 /* poll is usually slower than select, and not as well tested */
97 #endif
98
99 #ifndef EV_USE_EPOLL
100 # define EV_USE_EPOLL 0
101 #endif
102
103 #ifndef EV_USE_KQUEUE
104 # define EV_USE_KQUEUE 0
105 #endif
106
107 #ifndef EV_USE_WIN32
108 # ifdef WIN32
109 # define EV_USE_WIN32 0 /* it does not exist, use select */
110 # undef EV_USE_SELECT
111 # define EV_USE_SELECT 1
112 # else
113 # define EV_USE_WIN32 0
114 # endif
115 #endif
116
117 #ifndef EV_USE_REALTIME
118 # define EV_USE_REALTIME 1
119 #endif
120
121 /**/
122
123 #ifndef CLOCK_MONOTONIC
124 # undef EV_USE_MONOTONIC
125 # define EV_USE_MONOTONIC 0
126 #endif
127
128 #ifndef CLOCK_REALTIME
129 # undef EV_USE_REALTIME
130 # define EV_USE_REALTIME 0
131 #endif
132
133 /**/
134
135 #define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
136 #define MAX_BLOCKTIME 59.731 /* never wait longer than this time (to detect time jumps) */
137 #define PID_HASHSIZE 16 /* size of pid hash table, must be power of two */
138 /*#define CLEANUP_INTERVAL 300. /* how often to try to free memory and re-check fds */
139
140 #ifdef EV_H
141 # include EV_H
142 #else
143 # include "ev.h"
144 #endif
145
146 #if __GNUC__ >= 3
147 # define expect(expr,value) __builtin_expect ((expr),(value))
148 # define inline inline
149 #else
150 # define expect(expr,value) (expr)
151 # define inline static
152 #endif
153
154 #define expect_false(expr) expect ((expr) != 0, 0)
155 #define expect_true(expr) expect ((expr) != 0, 1)
156
157 #define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)
158 #define ABSPRI(w) ((w)->priority - EV_MINPRI)
159
160 typedef struct ev_watcher *W;
161 typedef struct ev_watcher_list *WL;
162 typedef struct ev_watcher_time *WT;
163
164 static int have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work? */
165
166 #ifdef WIN32
167 # include "ev_win32.c"
168 #endif
169
170 /*****************************************************************************/
171
172 static void (*syserr_cb)(const char *msg);
173
174 void ev_set_syserr_cb (void (*cb)(const char *msg))
175 {
176 syserr_cb = cb;
177 }
178
179 static void
180 syserr (const char *msg)
181 {
182 if (!msg)
183 msg = "(libev) system error";
184
185 if (syserr_cb)
186 syserr_cb (msg);
187 else
188 {
189 perror (msg);
190 abort ();
191 }
192 }
193
194 static void *(*alloc)(void *ptr, long size);
195
196 void ev_set_allocator (void *(*cb)(void *ptr, long size))
197 {
198 alloc = cb;
199 }
200
201 static void *
202 ev_realloc (void *ptr, long size)
203 {
204 ptr = alloc ? alloc (ptr, size) : realloc (ptr, size);
205
206 if (!ptr && size)
207 {
208 fprintf (stderr, "libev: cannot allocate %ld bytes, aborting.", size);
209 abort ();
210 }
211
212 return ptr;
213 }
214
215 #define ev_malloc(size) ev_realloc (0, (size))
216 #define ev_free(ptr) ev_realloc ((ptr), 0)
217
218 /*****************************************************************************/
219
220 typedef struct
221 {
222 WL head;
223 unsigned char events;
224 unsigned char reify;
225 } ANFD;
226
227 typedef struct
228 {
229 W w;
230 int events;
231 } ANPENDING;
232
233 #if EV_MULTIPLICITY
234
235 struct ev_loop
236 {
237 ev_tstamp ev_rt_now;
238 #define ev_rt_now ((loop)->ev_rt_now)
239 #define VAR(name,decl) decl;
240 #include "ev_vars.h"
241 #undef VAR
242 };
243 #include "ev_wrap.h"
244
245 struct ev_loop default_loop_struct;
246 static struct ev_loop *default_loop;
247
248 #else
249
250 ev_tstamp ev_rt_now;
251 #define VAR(name,decl) static decl;
252 #include "ev_vars.h"
253 #undef VAR
254
255 static int default_loop;
256
257 #endif
258
259 /*****************************************************************************/
260
261 ev_tstamp
262 ev_time (void)
263 {
264 #if EV_USE_REALTIME
265 struct timespec ts;
266 clock_gettime (CLOCK_REALTIME, &ts);
267 return ts.tv_sec + ts.tv_nsec * 1e-9;
268 #else
269 struct timeval tv;
270 gettimeofday (&tv, 0);
271 return tv.tv_sec + tv.tv_usec * 1e-6;
272 #endif
273 }
274
275 inline ev_tstamp
276 get_clock (void)
277 {
278 #if EV_USE_MONOTONIC
279 if (expect_true (have_monotonic))
280 {
281 struct timespec ts;
282 clock_gettime (CLOCK_MONOTONIC, &ts);
283 return ts.tv_sec + ts.tv_nsec * 1e-9;
284 }
285 #endif
286
287 return ev_time ();
288 }
289
290 #if EV_MULTIPLICITY
291 ev_tstamp
292 ev_now (EV_P)
293 {
294 return ev_rt_now;
295 }
296 #endif
297
298 #define array_roundsize(type,n) ((n) | 4 & ~3)
299
300 #define array_needsize(type,base,cur,cnt,init) \
301 if (expect_false ((cnt) > cur)) \
302 { \
303 int newcnt = cur; \
304 do \
305 { \
306 newcnt = array_roundsize (type, newcnt << 1); \
307 } \
308 while ((cnt) > newcnt); \
309 \
310 base = (type *)ev_realloc (base, sizeof (type) * (newcnt));\
311 init (base + cur, newcnt - cur); \
312 cur = newcnt; \
313 }
314
315 #define array_slim(type,stem) \
316 if (stem ## max < array_roundsize (stem ## cnt >> 2)) \
317 { \
318 stem ## max = array_roundsize (stem ## cnt >> 1); \
319 base = (type *)ev_realloc (base, sizeof (type) * (stem ## max));\
320 fprintf (stderr, "slimmed down " # stem " to %d\n", stem ## max);/*D*/\
321 }
322
323 /* microsoft's pseudo-c is quite far from C as the rest of the world and the standard knows it */
324 /* bringing us everlasting joy in form of stupid extra macros that are not required in C */
325 #define array_free_microshit(stem) \
326 ev_free (stem ## s); stem ## cnt = stem ## max = 0;
327
328 #define array_free(stem, idx) \
329 ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0;
330
331 /*****************************************************************************/
332
333 static void
334 anfds_init (ANFD *base, int count)
335 {
336 while (count--)
337 {
338 base->head = 0;
339 base->events = EV_NONE;
340 base->reify = 0;
341
342 ++base;
343 }
344 }
345
346 void
347 ev_feed_event (EV_P_ void *w, int revents)
348 {
349 W w_ = (W)w;
350
351 if (w_->pending)
352 {
353 pendings [ABSPRI (w_)][w_->pending - 1].events |= revents;
354 return;
355 }
356
357 w_->pending = ++pendingcnt [ABSPRI (w_)];
358 array_needsize (ANPENDING, pendings [ABSPRI (w_)], pendingmax [ABSPRI (w_)], pendingcnt [ABSPRI (w_)], (void));
359 pendings [ABSPRI (w_)][w_->pending - 1].w = w_;
360 pendings [ABSPRI (w_)][w_->pending - 1].events = revents;
361 }
362
363 static void
364 queue_events (EV_P_ W *events, int eventcnt, int type)
365 {
366 int i;
367
368 for (i = 0; i < eventcnt; ++i)
369 ev_feed_event (EV_A_ events [i], type);
370 }
371
372 inline void
373 fd_event (EV_P_ int fd, int revents)
374 {
375 ANFD *anfd = anfds + fd;
376 struct ev_io *w;
377
378 for (w = (struct ev_io *)anfd->head; w; w = (struct ev_io *)((WL)w)->next)
379 {
380 int ev = w->events & revents;
381
382 if (ev)
383 ev_feed_event (EV_A_ (W)w, ev);
384 }
385 }
386
387 void
388 ev_feed_fd_event (EV_P_ int fd, int revents)
389 {
390 fd_event (EV_A_ fd, revents);
391 }
392
393 /*****************************************************************************/
394
395 static void
396 fd_reify (EV_P)
397 {
398 int i;
399
400 for (i = 0; i < fdchangecnt; ++i)
401 {
402 int fd = fdchanges [i];
403 ANFD *anfd = anfds + fd;
404 struct ev_io *w;
405
406 int events = 0;
407
408 for (w = (struct ev_io *)anfd->head; w; w = (struct ev_io *)((WL)w)->next)
409 events |= w->events;
410
411 anfd->reify = 0;
412
413 method_modify (EV_A_ fd, anfd->events, events);
414 anfd->events = events;
415 }
416
417 fdchangecnt = 0;
418 }
419
420 static void
421 fd_change (EV_P_ int fd)
422 {
423 if (anfds [fd].reify)
424 return;
425
426 anfds [fd].reify = 1;
427
428 ++fdchangecnt;
429 array_needsize (int, fdchanges, fdchangemax, fdchangecnt, (void));
430 fdchanges [fdchangecnt - 1] = fd;
431 }
432
433 static void
434 fd_kill (EV_P_ int fd)
435 {
436 struct ev_io *w;
437
438 while ((w = (struct ev_io *)anfds [fd].head))
439 {
440 ev_io_stop (EV_A_ w);
441 ev_feed_event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE);
442 }
443 }
444
445 static int
446 fd_valid (int fd)
447 {
448 #ifdef WIN32
449 return !!win32_get_osfhandle (fd);
450 #else
451 return fcntl (fd, F_GETFD) != -1;
452 #endif
453 }
454
455 /* called on EBADF to verify fds */
456 static void
457 fd_ebadf (EV_P)
458 {
459 int fd;
460
461 for (fd = 0; fd < anfdmax; ++fd)
462 if (anfds [fd].events)
463 if (!fd_valid (fd) == -1 && errno == EBADF)
464 fd_kill (EV_A_ fd);
465 }
466
467 /* called on ENOMEM in select/poll to kill some fds and retry */
468 static void
469 fd_enomem (EV_P)
470 {
471 int fd;
472
473 for (fd = anfdmax; fd--; )
474 if (anfds [fd].events)
475 {
476 fd_kill (EV_A_ fd);
477 return;
478 }
479 }
480
481 /* usually called after fork if method needs to re-arm all fds from scratch */
482 static void
483 fd_rearm_all (EV_P)
484 {
485 int fd;
486
487 /* this should be highly optimised to not do anything but set a flag */
488 for (fd = 0; fd < anfdmax; ++fd)
489 if (anfds [fd].events)
490 {
491 anfds [fd].events = 0;
492 fd_change (EV_A_ fd);
493 }
494 }
495
496 /*****************************************************************************/
497
498 static void
499 upheap (WT *heap, int k)
500 {
501 WT w = heap [k];
502
503 while (k && heap [k >> 1]->at > w->at)
504 {
505 heap [k] = heap [k >> 1];
506 ((W)heap [k])->active = k + 1;
507 k >>= 1;
508 }
509
510 heap [k] = w;
511 ((W)heap [k])->active = k + 1;
512
513 }
514
515 static void
516 downheap (WT *heap, int N, int k)
517 {
518 WT w = heap [k];
519
520 while (k < (N >> 1))
521 {
522 int j = k << 1;
523
524 if (j + 1 < N && heap [j]->at > heap [j + 1]->at)
525 ++j;
526
527 if (w->at <= heap [j]->at)
528 break;
529
530 heap [k] = heap [j];
531 ((W)heap [k])->active = k + 1;
532 k = j;
533 }
534
535 heap [k] = w;
536 ((W)heap [k])->active = k + 1;
537 }
538
539 inline void
540 adjustheap (WT *heap, int N, int k)
541 {
542 upheap (heap, k);
543 downheap (heap, N, k);
544 }
545
546 /*****************************************************************************/
547
548 typedef struct
549 {
550 WL head;
551 sig_atomic_t volatile gotsig;
552 } ANSIG;
553
554 static ANSIG *signals;
555 static int signalmax;
556
557 static int sigpipe [2];
558 static sig_atomic_t volatile gotsig;
559 static struct ev_io sigev;
560
561 static void
562 signals_init (ANSIG *base, int count)
563 {
564 while (count--)
565 {
566 base->head = 0;
567 base->gotsig = 0;
568
569 ++base;
570 }
571 }
572
573 static void
574 sighandler (int signum)
575 {
576 #if WIN32
577 signal (signum, sighandler);
578 #endif
579
580 signals [signum - 1].gotsig = 1;
581
582 if (!gotsig)
583 {
584 int old_errno = errno;
585 gotsig = 1;
586 #ifdef WIN32
587 send (sigpipe [1], &signum, 1, MSG_DONTWAIT);
588 #else
589 write (sigpipe [1], &signum, 1);
590 #endif
591 errno = old_errno;
592 }
593 }
594
595 void
596 ev_feed_signal_event (EV_P_ int signum)
597 {
598 WL w;
599
600 #if EV_MULTIPLICITY
601 assert (("feeding signal events is only supported in the default loop", loop == default_loop));
602 #endif
603
604 --signum;
605
606 if (signum < 0 || signum >= signalmax)
607 return;
608
609 signals [signum].gotsig = 0;
610
611 for (w = signals [signum].head; w; w = w->next)
612 ev_feed_event (EV_A_ (W)w, EV_SIGNAL);
613 }
614
615 static void
616 sigcb (EV_P_ struct ev_io *iow, int revents)
617 {
618 int signum;
619
620 #ifdef WIN32
621 recv (sigpipe [0], &revents, 1, MSG_DONTWAIT);
622 #else
623 read (sigpipe [0], &revents, 1);
624 #endif
625 gotsig = 0;
626
627 for (signum = signalmax; signum--; )
628 if (signals [signum].gotsig)
629 ev_feed_signal_event (EV_A_ signum + 1);
630 }
631
632 static void
633 siginit (EV_P)
634 {
635 #ifndef WIN32
636 fcntl (sigpipe [0], F_SETFD, FD_CLOEXEC);
637 fcntl (sigpipe [1], F_SETFD, FD_CLOEXEC);
638
639 /* rather than sort out wether we really need nb, set it */
640 fcntl (sigpipe [0], F_SETFL, O_NONBLOCK);
641 fcntl (sigpipe [1], F_SETFL, O_NONBLOCK);
642 #endif
643
644 ev_io_set (&sigev, sigpipe [0], EV_READ);
645 ev_io_start (EV_A_ &sigev);
646 ev_unref (EV_A); /* child watcher should not keep loop alive */
647 }
648
649 /*****************************************************************************/
650
651 static struct ev_child *childs [PID_HASHSIZE];
652
653 #ifndef WIN32
654
655 static struct ev_signal childev;
656
657 #ifndef WCONTINUED
658 # define WCONTINUED 0
659 #endif
660
661 static void
662 child_reap (EV_P_ struct ev_signal *sw, int chain, int pid, int status)
663 {
664 struct ev_child *w;
665
666 for (w = (struct ev_child *)childs [chain & (PID_HASHSIZE - 1)]; w; w = (struct ev_child *)((WL)w)->next)
667 if (w->pid == pid || !w->pid)
668 {
669 ev_priority (w) = ev_priority (sw); /* need to do it *now* */
670 w->rpid = pid;
671 w->rstatus = status;
672 ev_feed_event (EV_A_ (W)w, EV_CHILD);
673 }
674 }
675
676 static void
677 childcb (EV_P_ struct ev_signal *sw, int revents)
678 {
679 int pid, status;
680
681 if (0 < (pid = waitpid (-1, &status, WNOHANG | WUNTRACED | WCONTINUED)))
682 {
683 /* make sure we are called again until all childs have been reaped */
684 ev_feed_event (EV_A_ (W)sw, EV_SIGNAL);
685
686 child_reap (EV_A_ sw, pid, pid, status);
687 child_reap (EV_A_ sw, 0, pid, status); /* this might trigger a watcher twice, but event catches that */
688 }
689 }
690
691 #endif
692
693 /*****************************************************************************/
694
695 #if EV_USE_KQUEUE
696 # include "ev_kqueue.c"
697 #endif
698 #if EV_USE_EPOLL
699 # include "ev_epoll.c"
700 #endif
701 #if EV_USE_POLL
702 # include "ev_poll.c"
703 #endif
704 #if EV_USE_SELECT
705 # include "ev_select.c"
706 #endif
707
708 int
709 ev_version_major (void)
710 {
711 return EV_VERSION_MAJOR;
712 }
713
714 int
715 ev_version_minor (void)
716 {
717 return EV_VERSION_MINOR;
718 }
719
720 /* return true if we are running with elevated privileges and should ignore env variables */
721 static int
722 enable_secure (void)
723 {
724 #ifdef WIN32
725 return 0;
726 #else
727 return getuid () != geteuid ()
728 || getgid () != getegid ();
729 #endif
730 }
731
732 int
733 ev_method (EV_P)
734 {
735 return method;
736 }
737
738 static void
739 loop_init (EV_P_ int methods)
740 {
741 if (!method)
742 {
743 #if EV_USE_MONOTONIC
744 {
745 struct timespec ts;
746 if (!clock_gettime (CLOCK_MONOTONIC, &ts))
747 have_monotonic = 1;
748 }
749 #endif
750
751 ev_rt_now = ev_time ();
752 mn_now = get_clock ();
753 now_floor = mn_now;
754 rtmn_diff = ev_rt_now - mn_now;
755
756 if (methods == EVMETHOD_AUTO)
757 if (!enable_secure () && getenv ("LIBEV_METHODS"))
758 methods = atoi (getenv ("LIBEV_METHODS"));
759 else
760 methods = EVMETHOD_ANY;
761
762 method = 0;
763 #if EV_USE_WIN32
764 if (!method && (methods & EVMETHOD_WIN32 )) method = win32_init (EV_A_ methods);
765 #endif
766 #if EV_USE_KQUEUE
767 if (!method && (methods & EVMETHOD_KQUEUE)) method = kqueue_init (EV_A_ methods);
768 #endif
769 #if EV_USE_EPOLL
770 if (!method && (methods & EVMETHOD_EPOLL )) method = epoll_init (EV_A_ methods);
771 #endif
772 #if EV_USE_POLL
773 if (!method && (methods & EVMETHOD_POLL )) method = poll_init (EV_A_ methods);
774 #endif
775 #if EV_USE_SELECT
776 if (!method && (methods & EVMETHOD_SELECT)) method = select_init (EV_A_ methods);
777 #endif
778
779 ev_init (&sigev, sigcb);
780 ev_set_priority (&sigev, EV_MAXPRI);
781 }
782 }
783
784 void
785 loop_destroy (EV_P)
786 {
787 int i;
788
789 #if EV_USE_WIN32
790 if (method == EVMETHOD_WIN32 ) win32_destroy (EV_A);
791 #endif
792 #if EV_USE_KQUEUE
793 if (method == EVMETHOD_KQUEUE) kqueue_destroy (EV_A);
794 #endif
795 #if EV_USE_EPOLL
796 if (method == EVMETHOD_EPOLL ) epoll_destroy (EV_A);
797 #endif
798 #if EV_USE_POLL
799 if (method == EVMETHOD_POLL ) poll_destroy (EV_A);
800 #endif
801 #if EV_USE_SELECT
802 if (method == EVMETHOD_SELECT) select_destroy (EV_A);
803 #endif
804
805 for (i = NUMPRI; i--; )
806 array_free (pending, [i]);
807
808 /* have to use the microsoft-never-gets-it-right macro */
809 array_free_microshit (fdchange);
810 array_free_microshit (timer);
811 #if EV_PERIODICS
812 array_free_microshit (periodic);
813 #endif
814 array_free_microshit (idle);
815 array_free_microshit (prepare);
816 array_free_microshit (check);
817
818 method = 0;
819 }
820
821 static void
822 loop_fork (EV_P)
823 {
824 #if EV_USE_EPOLL
825 if (method == EVMETHOD_EPOLL ) epoll_fork (EV_A);
826 #endif
827 #if EV_USE_KQUEUE
828 if (method == EVMETHOD_KQUEUE) kqueue_fork (EV_A);
829 #endif
830
831 if (ev_is_active (&sigev))
832 {
833 /* default loop */
834
835 ev_ref (EV_A);
836 ev_io_stop (EV_A_ &sigev);
837 close (sigpipe [0]);
838 close (sigpipe [1]);
839
840 while (pipe (sigpipe))
841 syserr ("(libev) error creating pipe");
842
843 siginit (EV_A);
844 }
845
846 postfork = 0;
847 }
848
849 #if EV_MULTIPLICITY
850 struct ev_loop *
851 ev_loop_new (int methods)
852 {
853 struct ev_loop *loop = (struct ev_loop *)ev_malloc (sizeof (struct ev_loop));
854
855 memset (loop, 0, sizeof (struct ev_loop));
856
857 loop_init (EV_A_ methods);
858
859 if (ev_method (EV_A))
860 return loop;
861
862 return 0;
863 }
864
865 void
866 ev_loop_destroy (EV_P)
867 {
868 loop_destroy (EV_A);
869 ev_free (loop);
870 }
871
872 void
873 ev_loop_fork (EV_P)
874 {
875 postfork = 1;
876 }
877
878 #endif
879
880 #if EV_MULTIPLICITY
881 struct ev_loop *
882 #else
883 int
884 #endif
885 ev_default_loop (int methods)
886 {
887 if (sigpipe [0] == sigpipe [1])
888 if (pipe (sigpipe))
889 return 0;
890
891 if (!default_loop)
892 {
893 #if EV_MULTIPLICITY
894 struct ev_loop *loop = default_loop = &default_loop_struct;
895 #else
896 default_loop = 1;
897 #endif
898
899 loop_init (EV_A_ methods);
900
901 if (ev_method (EV_A))
902 {
903 siginit (EV_A);
904
905 #ifndef WIN32
906 ev_signal_init (&childev, childcb, SIGCHLD);
907 ev_set_priority (&childev, EV_MAXPRI);
908 ev_signal_start (EV_A_ &childev);
909 ev_unref (EV_A); /* child watcher should not keep loop alive */
910 #endif
911 }
912 else
913 default_loop = 0;
914 }
915
916 return default_loop;
917 }
918
919 void
920 ev_default_destroy (void)
921 {
922 #if EV_MULTIPLICITY
923 struct ev_loop *loop = default_loop;
924 #endif
925
926 #ifndef WIN32
927 ev_ref (EV_A); /* child watcher */
928 ev_signal_stop (EV_A_ &childev);
929 #endif
930
931 ev_ref (EV_A); /* signal watcher */
932 ev_io_stop (EV_A_ &sigev);
933
934 close (sigpipe [0]); sigpipe [0] = 0;
935 close (sigpipe [1]); sigpipe [1] = 0;
936
937 loop_destroy (EV_A);
938 }
939
940 void
941 ev_default_fork (void)
942 {
943 #if EV_MULTIPLICITY
944 struct ev_loop *loop = default_loop;
945 #endif
946
947 if (method)
948 postfork = 1;
949 }
950
951 /*****************************************************************************/
952
953 static int
954 any_pending (EV_P)
955 {
956 int pri;
957
958 for (pri = NUMPRI; pri--; )
959 if (pendingcnt [pri])
960 return 1;
961
962 return 0;
963 }
964
965 static void
966 call_pending (EV_P)
967 {
968 int pri;
969
970 for (pri = NUMPRI; pri--; )
971 while (pendingcnt [pri])
972 {
973 ANPENDING *p = pendings [pri] + --pendingcnt [pri];
974
975 if (p->w)
976 {
977 p->w->pending = 0;
978 EV_CB_INVOKE (p->w, p->events);
979 }
980 }
981 }
982
983 static void
984 timers_reify (EV_P)
985 {
986 while (timercnt && ((WT)timers [0])->at <= mn_now)
987 {
988 struct ev_timer *w = timers [0];
989
990 assert (("inactive timer on timer heap detected", ev_is_active (w)));
991
992 /* first reschedule or stop timer */
993 if (w->repeat)
994 {
995 assert (("negative ev_timer repeat value found while processing timers", w->repeat > 0.));
996
997 ((WT)w)->at += w->repeat;
998 if (((WT)w)->at < mn_now)
999 ((WT)w)->at = mn_now;
1000
1001 downheap ((WT *)timers, timercnt, 0);
1002 }
1003 else
1004 ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */
1005
1006 ev_feed_event (EV_A_ (W)w, EV_TIMEOUT);
1007 }
1008 }
1009
1010 #if EV_PERIODICS
1011 static void
1012 periodics_reify (EV_P)
1013 {
1014 while (periodiccnt && ((WT)periodics [0])->at <= ev_rt_now)
1015 {
1016 struct ev_periodic *w = periodics [0];
1017
1018 assert (("inactive timer on periodic heap detected", ev_is_active (w)));
1019
1020 /* first reschedule or stop timer */
1021 if (w->reschedule_cb)
1022 {
1023 ev_tstamp at = ((WT)w)->at = w->reschedule_cb (w, ev_rt_now + 0.0001);
1024
1025 assert (("ev_periodic reschedule callback returned time in the past", ((WT)w)->at > ev_rt_now));
1026 downheap ((WT *)periodics, periodiccnt, 0);
1027 }
1028 else if (w->interval)
1029 {
1030 ((WT)w)->at += floor ((ev_rt_now - ((WT)w)->at) / w->interval + 1.) * w->interval;
1031 assert (("ev_periodic timeout in the past detected while processing timers, negative interval?", ((WT)w)->at > ev_rt_now));
1032 downheap ((WT *)periodics, periodiccnt, 0);
1033 }
1034 else
1035 ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */
1036
1037 ev_feed_event (EV_A_ (W)w, EV_PERIODIC);
1038 }
1039 }
1040
1041 static void
1042 periodics_reschedule (EV_P)
1043 {
1044 int i;
1045
1046 /* adjust periodics after time jump */
1047 for (i = 0; i < periodiccnt; ++i)
1048 {
1049 struct ev_periodic *w = periodics [i];
1050
1051 if (w->reschedule_cb)
1052 ((WT)w)->at = w->reschedule_cb (w, ev_rt_now);
1053 else if (w->interval)
1054 ((WT)w)->at += ceil ((ev_rt_now - ((WT)w)->at) / w->interval) * w->interval;
1055 }
1056
1057 /* now rebuild the heap */
1058 for (i = periodiccnt >> 1; i--; )
1059 downheap ((WT *)periodics, periodiccnt, i);
1060 }
1061 #endif
1062
1063 inline int
1064 time_update_monotonic (EV_P)
1065 {
1066 mn_now = get_clock ();
1067
1068 if (expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5))
1069 {
1070 ev_rt_now = rtmn_diff + mn_now;
1071 return 0;
1072 }
1073 else
1074 {
1075 now_floor = mn_now;
1076 ev_rt_now = ev_time ();
1077 return 1;
1078 }
1079 }
1080
1081 static void
1082 time_update (EV_P)
1083 {
1084 int i;
1085
1086 #if EV_USE_MONOTONIC
1087 if (expect_true (have_monotonic))
1088 {
1089 if (time_update_monotonic (EV_A))
1090 {
1091 ev_tstamp odiff = rtmn_diff;
1092
1093 for (i = 4; --i; ) /* loop a few times, before making important decisions */
1094 {
1095 rtmn_diff = ev_rt_now - mn_now;
1096
1097 if (fabs (odiff - rtmn_diff) < MIN_TIMEJUMP)
1098 return; /* all is well */
1099
1100 ev_rt_now = ev_time ();
1101 mn_now = get_clock ();
1102 now_floor = mn_now;
1103 }
1104
1105 # if EV_PERIODICS
1106 periodics_reschedule (EV_A);
1107 # endif
1108 /* no timer adjustment, as the monotonic clock doesn't jump */
1109 /* timers_reschedule (EV_A_ rtmn_diff - odiff) */
1110 }
1111 }
1112 else
1113 #endif
1114 {
1115 ev_rt_now = ev_time ();
1116
1117 if (expect_false (mn_now > ev_rt_now || mn_now < ev_rt_now - MAX_BLOCKTIME - MIN_TIMEJUMP))
1118 {
1119 #if EV_PERIODICS
1120 periodics_reschedule (EV_A);
1121 #endif
1122
1123 /* adjust timers. this is easy, as the offset is the same for all */
1124 for (i = 0; i < timercnt; ++i)
1125 ((WT)timers [i])->at += ev_rt_now - mn_now;
1126 }
1127
1128 mn_now = ev_rt_now;
1129 }
1130 }
1131
1132 void
1133 ev_ref (EV_P)
1134 {
1135 ++activecnt;
1136 }
1137
1138 void
1139 ev_unref (EV_P)
1140 {
1141 --activecnt;
1142 }
1143
1144 static int loop_done;
1145
1146 void
1147 ev_loop (EV_P_ int flags)
1148 {
1149 double block;
1150 loop_done = flags & (EVLOOP_ONESHOT | EVLOOP_NONBLOCK) ? 1 : 0;
1151
1152 do
1153 {
1154 /* queue check watchers (and execute them) */
1155 if (expect_false (preparecnt))
1156 {
1157 queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE);
1158 call_pending (EV_A);
1159 }
1160
1161 /* we might have forked, so reify kernel state if necessary */
1162 if (expect_false (postfork))
1163 loop_fork (EV_A);
1164
1165 /* update fd-related kernel structures */
1166 fd_reify (EV_A);
1167
1168 /* calculate blocking time */
1169
1170 /* we only need this for !monotonic clock or timers, but as we basically
1171 always have timers, we just calculate it always */
1172 #if EV_USE_MONOTONIC
1173 if (expect_true (have_monotonic))
1174 time_update_monotonic (EV_A);
1175 else
1176 #endif
1177 {
1178 ev_rt_now = ev_time ();
1179 mn_now = ev_rt_now;
1180 }
1181
1182 if (flags & EVLOOP_NONBLOCK || idlecnt)
1183 block = 0.;
1184 else
1185 {
1186 block = MAX_BLOCKTIME;
1187
1188 if (timercnt)
1189 {
1190 ev_tstamp to = ((WT)timers [0])->at - mn_now + method_fudge;
1191 if (block > to) block = to;
1192 }
1193
1194 #if EV_PERIODICS
1195 if (periodiccnt)
1196 {
1197 ev_tstamp to = ((WT)periodics [0])->at - ev_rt_now + method_fudge;
1198 if (block > to) block = to;
1199 }
1200 #endif
1201
1202 if (block < 0.) block = 0.;
1203 }
1204
1205 method_poll (EV_A_ block);
1206
1207 /* update ev_rt_now, do magic */
1208 time_update (EV_A);
1209
1210 /* queue pending timers and reschedule them */
1211 timers_reify (EV_A); /* relative timers called last */
1212 #if EV_PERIODICS
1213 periodics_reify (EV_A); /* absolute timers called first */
1214 #endif
1215
1216 /* queue idle watchers unless io or timers are pending */
1217 if (idlecnt && !any_pending (EV_A))
1218 queue_events (EV_A_ (W *)idles, idlecnt, EV_IDLE);
1219
1220 /* queue check watchers, to be executed first */
1221 if (checkcnt)
1222 queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK);
1223
1224 call_pending (EV_A);
1225 }
1226 while (activecnt && !loop_done);
1227
1228 if (loop_done != 2)
1229 loop_done = 0;
1230 }
1231
1232 void
1233 ev_unloop (EV_P_ int how)
1234 {
1235 loop_done = how;
1236 }
1237
1238 /*****************************************************************************/
1239
1240 inline void
1241 wlist_add (WL *head, WL elem)
1242 {
1243 elem->next = *head;
1244 *head = elem;
1245 }
1246
1247 inline void
1248 wlist_del (WL *head, WL elem)
1249 {
1250 while (*head)
1251 {
1252 if (*head == elem)
1253 {
1254 *head = elem->next;
1255 return;
1256 }
1257
1258 head = &(*head)->next;
1259 }
1260 }
1261
1262 inline void
1263 ev_clear_pending (EV_P_ W w)
1264 {
1265 if (w->pending)
1266 {
1267 pendings [ABSPRI (w)][w->pending - 1].w = 0;
1268 w->pending = 0;
1269 }
1270 }
1271
1272 inline void
1273 ev_start (EV_P_ W w, int active)
1274 {
1275 if (w->priority < EV_MINPRI) w->priority = EV_MINPRI;
1276 if (w->priority > EV_MAXPRI) w->priority = EV_MAXPRI;
1277
1278 w->active = active;
1279 ev_ref (EV_A);
1280 }
1281
1282 inline void
1283 ev_stop (EV_P_ W w)
1284 {
1285 ev_unref (EV_A);
1286 w->active = 0;
1287 }
1288
1289 /*****************************************************************************/
1290
1291 void
1292 ev_io_start (EV_P_ struct ev_io *w)
1293 {
1294 int fd = w->fd;
1295
1296 if (ev_is_active (w))
1297 return;
1298
1299 assert (("ev_io_start called with negative fd", fd >= 0));
1300
1301 ev_start (EV_A_ (W)w, 1);
1302 array_needsize (ANFD, anfds, anfdmax, fd + 1, anfds_init);
1303 wlist_add ((WL *)&anfds[fd].head, (WL)w);
1304
1305 fd_change (EV_A_ fd);
1306 }
1307
1308 void
1309 ev_io_stop (EV_P_ struct ev_io *w)
1310 {
1311 ev_clear_pending (EV_A_ (W)w);
1312 if (!ev_is_active (w))
1313 return;
1314
1315 assert (("ev_io_start called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax));
1316
1317 wlist_del ((WL *)&anfds[w->fd].head, (WL)w);
1318 ev_stop (EV_A_ (W)w);
1319
1320 fd_change (EV_A_ w->fd);
1321 }
1322
1323 void
1324 ev_timer_start (EV_P_ struct ev_timer *w)
1325 {
1326 if (ev_is_active (w))
1327 return;
1328
1329 ((WT)w)->at += mn_now;
1330
1331 assert (("ev_timer_start called with negative timer repeat value", w->repeat >= 0.));
1332
1333 ev_start (EV_A_ (W)w, ++timercnt);
1334 array_needsize (struct ev_timer *, timers, timermax, timercnt, (void));
1335 timers [timercnt - 1] = w;
1336 upheap ((WT *)timers, timercnt - 1);
1337
1338 assert (("internal timer heap corruption", timers [((W)w)->active - 1] == w));
1339 }
1340
1341 void
1342 ev_timer_stop (EV_P_ struct ev_timer *w)
1343 {
1344 ev_clear_pending (EV_A_ (W)w);
1345 if (!ev_is_active (w))
1346 return;
1347
1348 assert (("internal timer heap corruption", timers [((W)w)->active - 1] == w));
1349
1350 if (((W)w)->active < timercnt--)
1351 {
1352 timers [((W)w)->active - 1] = timers [timercnt];
1353 adjustheap ((WT *)timers, timercnt, ((W)w)->active - 1);
1354 }
1355
1356 ((WT)w)->at -= mn_now;
1357
1358 ev_stop (EV_A_ (W)w);
1359 }
1360
1361 void
1362 ev_timer_again (EV_P_ struct ev_timer *w)
1363 {
1364 if (ev_is_active (w))
1365 {
1366 if (w->repeat)
1367 {
1368 ((WT)w)->at = mn_now + w->repeat;
1369 adjustheap ((WT *)timers, timercnt, ((W)w)->active - 1);
1370 }
1371 else
1372 ev_timer_stop (EV_A_ w);
1373 }
1374 else if (w->repeat)
1375 ev_timer_start (EV_A_ w);
1376 }
1377
1378 #if EV_PERIODICS
1379 void
1380 ev_periodic_start (EV_P_ struct ev_periodic *w)
1381 {
1382 if (ev_is_active (w))
1383 return;
1384
1385 if (w->reschedule_cb)
1386 ((WT)w)->at = w->reschedule_cb (w, ev_rt_now);
1387 else if (w->interval)
1388 {
1389 assert (("ev_periodic_start called with negative interval value", w->interval >= 0.));
1390 /* this formula differs from the one in periodic_reify because we do not always round up */
1391 ((WT)w)->at += ceil ((ev_rt_now - ((WT)w)->at) / w->interval) * w->interval;
1392 }
1393
1394 ev_start (EV_A_ (W)w, ++periodiccnt);
1395 array_needsize (struct ev_periodic *, periodics, periodicmax, periodiccnt, (void));
1396 periodics [periodiccnt - 1] = w;
1397 upheap ((WT *)periodics, periodiccnt - 1);
1398
1399 assert (("internal periodic heap corruption", periodics [((W)w)->active - 1] == w));
1400 }
1401
1402 void
1403 ev_periodic_stop (EV_P_ struct ev_periodic *w)
1404 {
1405 ev_clear_pending (EV_A_ (W)w);
1406 if (!ev_is_active (w))
1407 return;
1408
1409 assert (("internal periodic heap corruption", periodics [((W)w)->active - 1] == w));
1410
1411 if (((W)w)->active < periodiccnt--)
1412 {
1413 periodics [((W)w)->active - 1] = periodics [periodiccnt];
1414 adjustheap ((WT *)periodics, periodiccnt, ((W)w)->active - 1);
1415 }
1416
1417 ev_stop (EV_A_ (W)w);
1418 }
1419
1420 void
1421 ev_periodic_again (EV_P_ struct ev_periodic *w)
1422 {
1423 /* TODO: use adjustheap and recalculation */
1424 ev_periodic_stop (EV_A_ w);
1425 ev_periodic_start (EV_A_ w);
1426 }
1427 #endif
1428
1429 void
1430 ev_idle_start (EV_P_ struct ev_idle *w)
1431 {
1432 if (ev_is_active (w))
1433 return;
1434
1435 ev_start (EV_A_ (W)w, ++idlecnt);
1436 array_needsize (struct ev_idle *, idles, idlemax, idlecnt, (void));
1437 idles [idlecnt - 1] = w;
1438 }
1439
1440 void
1441 ev_idle_stop (EV_P_ struct ev_idle *w)
1442 {
1443 ev_clear_pending (EV_A_ (W)w);
1444 if (!ev_is_active (w))
1445 return;
1446
1447 idles [((W)w)->active - 1] = idles [--idlecnt];
1448 ev_stop (EV_A_ (W)w);
1449 }
1450
1451 void
1452 ev_prepare_start (EV_P_ struct ev_prepare *w)
1453 {
1454 if (ev_is_active (w))
1455 return;
1456
1457 ev_start (EV_A_ (W)w, ++preparecnt);
1458 array_needsize (struct ev_prepare *, prepares, preparemax, preparecnt, (void));
1459 prepares [preparecnt - 1] = w;
1460 }
1461
1462 void
1463 ev_prepare_stop (EV_P_ struct ev_prepare *w)
1464 {
1465 ev_clear_pending (EV_A_ (W)w);
1466 if (!ev_is_active (w))
1467 return;
1468
1469 prepares [((W)w)->active - 1] = prepares [--preparecnt];
1470 ev_stop (EV_A_ (W)w);
1471 }
1472
1473 void
1474 ev_check_start (EV_P_ struct ev_check *w)
1475 {
1476 if (ev_is_active (w))
1477 return;
1478
1479 ev_start (EV_A_ (W)w, ++checkcnt);
1480 array_needsize (struct ev_check *, checks, checkmax, checkcnt, (void));
1481 checks [checkcnt - 1] = w;
1482 }
1483
1484 void
1485 ev_check_stop (EV_P_ struct ev_check *w)
1486 {
1487 ev_clear_pending (EV_A_ (W)w);
1488 if (!ev_is_active (w))
1489 return;
1490
1491 checks [((W)w)->active - 1] = checks [--checkcnt];
1492 ev_stop (EV_A_ (W)w);
1493 }
1494
1495 #ifndef SA_RESTART
1496 # define SA_RESTART 0
1497 #endif
1498
1499 void
1500 ev_signal_start (EV_P_ struct ev_signal *w)
1501 {
1502 #if EV_MULTIPLICITY
1503 assert (("signal watchers are only supported in the default loop", loop == default_loop));
1504 #endif
1505 if (ev_is_active (w))
1506 return;
1507
1508 assert (("ev_signal_start called with illegal signal number", w->signum > 0));
1509
1510 ev_start (EV_A_ (W)w, 1);
1511 array_needsize (ANSIG, signals, signalmax, w->signum, signals_init);
1512 wlist_add ((WL *)&signals [w->signum - 1].head, (WL)w);
1513
1514 if (!((WL)w)->next)
1515 {
1516 #if WIN32
1517 signal (w->signum, sighandler);
1518 #else
1519 struct sigaction sa;
1520 sa.sa_handler = sighandler;
1521 sigfillset (&sa.sa_mask);
1522 sa.sa_flags = SA_RESTART; /* if restarting works we save one iteration */
1523 sigaction (w->signum, &sa, 0);
1524 #endif
1525 }
1526 }
1527
1528 void
1529 ev_signal_stop (EV_P_ struct ev_signal *w)
1530 {
1531 ev_clear_pending (EV_A_ (W)w);
1532 if (!ev_is_active (w))
1533 return;
1534
1535 wlist_del ((WL *)&signals [w->signum - 1].head, (WL)w);
1536 ev_stop (EV_A_ (W)w);
1537
1538 if (!signals [w->signum - 1].head)
1539 signal (w->signum, SIG_DFL);
1540 }
1541
1542 void
1543 ev_child_start (EV_P_ struct ev_child *w)
1544 {
1545 #if EV_MULTIPLICITY
1546 assert (("child watchers are only supported in the default loop", loop == default_loop));
1547 #endif
1548 if (ev_is_active (w))
1549 return;
1550
1551 ev_start (EV_A_ (W)w, 1);
1552 wlist_add ((WL *)&childs [w->pid & (PID_HASHSIZE - 1)], (WL)w);
1553 }
1554
1555 void
1556 ev_child_stop (EV_P_ struct ev_child *w)
1557 {
1558 ev_clear_pending (EV_A_ (W)w);
1559 if (!ev_is_active (w))
1560 return;
1561
1562 wlist_del ((WL *)&childs [w->pid & (PID_HASHSIZE - 1)], (WL)w);
1563 ev_stop (EV_A_ (W)w);
1564 }
1565
1566 /*****************************************************************************/
1567
1568 struct ev_once
1569 {
1570 struct ev_io io;
1571 struct ev_timer to;
1572 void (*cb)(int revents, void *arg);
1573 void *arg;
1574 };
1575
1576 static void
1577 once_cb (EV_P_ struct ev_once *once, int revents)
1578 {
1579 void (*cb)(int revents, void *arg) = once->cb;
1580 void *arg = once->arg;
1581
1582 ev_io_stop (EV_A_ &once->io);
1583 ev_timer_stop (EV_A_ &once->to);
1584 ev_free (once);
1585
1586 cb (revents, arg);
1587 }
1588
1589 static void
1590 once_cb_io (EV_P_ struct ev_io *w, int revents)
1591 {
1592 once_cb (EV_A_ (struct ev_once *)(((char *)w) - offsetof (struct ev_once, io)), revents);
1593 }
1594
1595 static void
1596 once_cb_to (EV_P_ struct ev_timer *w, int revents)
1597 {
1598 once_cb (EV_A_ (struct ev_once *)(((char *)w) - offsetof (struct ev_once, to)), revents);
1599 }
1600
1601 void
1602 ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg)
1603 {
1604 struct ev_once *once = (struct ev_once *)ev_malloc (sizeof (struct ev_once));
1605
1606 if (!once)
1607 cb (EV_ERROR | EV_READ | EV_WRITE | EV_TIMEOUT, arg);
1608 else
1609 {
1610 once->cb = cb;
1611 once->arg = arg;
1612
1613 ev_init (&once->io, once_cb_io);
1614 if (fd >= 0)
1615 {
1616 ev_io_set (&once->io, fd, events);
1617 ev_io_start (EV_A_ &once->io);
1618 }
1619
1620 ev_init (&once->to, once_cb_to);
1621 if (timeout >= 0.)
1622 {
1623 ev_timer_set (&once->to, timeout, 0.);
1624 ev_timer_start (EV_A_ &once->to);
1625 }
1626 }
1627 }
1628
1629 #ifdef __cplusplus
1630 }
1631 #endif
1632