ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev.c
Revision: 1.125
Committed: Sat Nov 17 02:28:43 2007 UTC (16 years, 6 months ago) by root
Content type: text/plain
Branch: MAIN
Changes since 1.124: +1 -1 lines
Log Message:
*** empty log message ***

File Contents

# Content
1 /*
2 * libev event processing core, watcher management
3 *
4 * Copyright (c) 2007 Marc Alexander Lehmann <libev@schmorp.de>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
9 * met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 *
14 * * Redistributions in binary form must reproduce the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer in the documentation and/or other materials provided
17 * with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifdef __cplusplus
33 extern "C" {
34 #endif
35
36 #ifndef EV_STANDALONE
37 # include "config.h"
38
39 # if HAVE_CLOCK_GETTIME
40 # ifndef EV_USE_MONOTONIC
41 # define EV_USE_MONOTONIC 1
42 # endif
43 # ifndef EV_USE_REALTIME
44 # define EV_USE_REALTIME 1
45 # endif
46 # endif
47
48 # if HAVE_SELECT && HAVE_SYS_SELECT_H && !defined (EV_USE_SELECT)
49 # define EV_USE_SELECT 1
50 # endif
51
52 # if HAVE_POLL && HAVE_POLL_H && !defined (EV_USE_POLL)
53 # define EV_USE_POLL 1
54 # endif
55
56 # if HAVE_EPOLL_CTL && HAVE_SYS_EPOLL_H && !defined (EV_USE_EPOLL)
57 # define EV_USE_EPOLL 1
58 # endif
59
60 # if HAVE_KQUEUE && HAVE_SYS_EVENT_H && HAVE_SYS_QUEUE_H && !defined (EV_USE_KQUEUE)
61 # define EV_USE_KQUEUE 1
62 # endif
63
64 # if HAVE_PORT_H && HAVE_PORT_CREATE && !defined (EV_USE_PORT)
65 # define EV_USE_PORT 1
66 # endif
67
68 #endif
69
70 #include <math.h>
71 #include <stdlib.h>
72 #include <fcntl.h>
73 #include <stddef.h>
74
75 #include <stdio.h>
76
77 #include <assert.h>
78 #include <errno.h>
79 #include <sys/types.h>
80 #include <time.h>
81
82 #include <signal.h>
83
84 #ifndef _WIN32
85 # include <unistd.h>
86 # include <sys/time.h>
87 # include <sys/wait.h>
88 #else
89 # define WIN32_LEAN_AND_MEAN
90 # include <windows.h>
91 # ifndef EV_SELECT_IS_WINSOCKET
92 # define EV_SELECT_IS_WINSOCKET 1
93 # endif
94 #endif
95
96 /**/
97
98 #ifndef EV_USE_MONOTONIC
99 # define EV_USE_MONOTONIC 0
100 #endif
101
102 #ifndef EV_USE_REALTIME
103 # define EV_USE_REALTIME 0
104 #endif
105
106 #ifndef EV_USE_SELECT
107 # define EV_USE_SELECT 1
108 #endif
109
110 #ifndef EV_USE_POLL
111 # ifdef _WIN32
112 # define EV_USE_POLL 0
113 # else
114 # define EV_USE_POLL 1
115 # endif
116 #endif
117
118 #ifndef EV_USE_EPOLL
119 # define EV_USE_EPOLL 0
120 #endif
121
122 #ifndef EV_USE_KQUEUE
123 # define EV_USE_KQUEUE 0
124 #endif
125
126 #ifndef EV_USE_PORT
127 # define EV_USE_PORT 0
128 #endif
129
130 /**/
131
132 /* darwin simply cannot be helped */
133 #ifdef __APPLE__
134 # undef EV_USE_POLL
135 # undef EV_USE_KQUEUE
136 #endif
137
138 #ifndef CLOCK_MONOTONIC
139 # undef EV_USE_MONOTONIC
140 # define EV_USE_MONOTONIC 0
141 #endif
142
143 #ifndef CLOCK_REALTIME
144 # undef EV_USE_REALTIME
145 # define EV_USE_REALTIME 0
146 #endif
147
148 #if EV_SELECT_IS_WINSOCKET
149 # include <winsock.h>
150 #endif
151
152 /**/
153
154 #define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
155 #define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
156 #define PID_HASHSIZE 16 /* size of pid hash table, must be power of two */
157 /*#define CLEANUP_INTERVAL (MAX_BLOCKTIME * 5.) /* how often to try to free memory and re-check fds */
158
159 #ifdef EV_H
160 # include EV_H
161 #else
162 # include "ev.h"
163 #endif
164
165 #if __GNUC__ >= 3
166 # define expect(expr,value) __builtin_expect ((expr),(value))
167 # define inline static inline
168 #else
169 # define expect(expr,value) (expr)
170 # define inline static
171 #endif
172
173 #define expect_false(expr) expect ((expr) != 0, 0)
174 #define expect_true(expr) expect ((expr) != 0, 1)
175
176 #define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)
177 #define ABSPRI(w) ((w)->priority - EV_MINPRI)
178
179 #define EMPTY0 /* required for microsofts broken pseudo-c compiler */
180 #define EMPTY2(a,b) /* used to suppress some warnings */
181
182 typedef struct ev_watcher *W;
183 typedef struct ev_watcher_list *WL;
184 typedef struct ev_watcher_time *WT;
185
186 static int have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work? */
187
188 #ifdef _WIN32
189 # include "ev_win32.c"
190 #endif
191
192 /*****************************************************************************/
193
194 static void (*syserr_cb)(const char *msg);
195
196 void ev_set_syserr_cb (void (*cb)(const char *msg))
197 {
198 syserr_cb = cb;
199 }
200
201 static void
202 syserr (const char *msg)
203 {
204 if (!msg)
205 msg = "(libev) system error";
206
207 if (syserr_cb)
208 syserr_cb (msg);
209 else
210 {
211 perror (msg);
212 abort ();
213 }
214 }
215
216 static void *(*alloc)(void *ptr, long size);
217
218 void ev_set_allocator (void *(*cb)(void *ptr, long size))
219 {
220 alloc = cb;
221 }
222
223 static void *
224 ev_realloc (void *ptr, long size)
225 {
226 ptr = alloc ? alloc (ptr, size) : realloc (ptr, size);
227
228 if (!ptr && size)
229 {
230 fprintf (stderr, "libev: cannot allocate %ld bytes, aborting.", size);
231 abort ();
232 }
233
234 return ptr;
235 }
236
237 #define ev_malloc(size) ev_realloc (0, (size))
238 #define ev_free(ptr) ev_realloc ((ptr), 0)
239
240 /*****************************************************************************/
241
242 typedef struct
243 {
244 WL head;
245 unsigned char events;
246 unsigned char reify;
247 #if EV_SELECT_IS_WINSOCKET
248 SOCKET handle;
249 #endif
250 } ANFD;
251
252 typedef struct
253 {
254 W w;
255 int events;
256 } ANPENDING;
257
258 #if EV_MULTIPLICITY
259
260 struct ev_loop
261 {
262 ev_tstamp ev_rt_now;
263 #define ev_rt_now ((loop)->ev_rt_now)
264 #define VAR(name,decl) decl;
265 #include "ev_vars.h"
266 #undef VAR
267 };
268 #include "ev_wrap.h"
269
270 static struct ev_loop default_loop_struct;
271 struct ev_loop *ev_default_loop_ptr;
272
273 #else
274
275 ev_tstamp ev_rt_now;
276 #define VAR(name,decl) static decl;
277 #include "ev_vars.h"
278 #undef VAR
279
280 static int ev_default_loop_ptr;
281
282 #endif
283
284 /*****************************************************************************/
285
286 ev_tstamp
287 ev_time (void)
288 {
289 #if EV_USE_REALTIME
290 struct timespec ts;
291 clock_gettime (CLOCK_REALTIME, &ts);
292 return ts.tv_sec + ts.tv_nsec * 1e-9;
293 #else
294 struct timeval tv;
295 gettimeofday (&tv, 0);
296 return tv.tv_sec + tv.tv_usec * 1e-6;
297 #endif
298 }
299
300 inline ev_tstamp
301 get_clock (void)
302 {
303 #if EV_USE_MONOTONIC
304 if (expect_true (have_monotonic))
305 {
306 struct timespec ts;
307 clock_gettime (CLOCK_MONOTONIC, &ts);
308 return ts.tv_sec + ts.tv_nsec * 1e-9;
309 }
310 #endif
311
312 return ev_time ();
313 }
314
315 #if EV_MULTIPLICITY
316 ev_tstamp
317 ev_now (EV_P)
318 {
319 return ev_rt_now;
320 }
321 #endif
322
323 #define array_roundsize(type,n) (((n) | 4) & ~3)
324
325 #define array_needsize(type,base,cur,cnt,init) \
326 if (expect_false ((cnt) > cur)) \
327 { \
328 int newcnt = cur; \
329 do \
330 { \
331 newcnt = array_roundsize (type, newcnt << 1); \
332 } \
333 while ((cnt) > newcnt); \
334 \
335 base = (type *)ev_realloc (base, sizeof (type) * (newcnt));\
336 init (base + cur, newcnt - cur); \
337 cur = newcnt; \
338 }
339
340 #define array_slim(type,stem) \
341 if (stem ## max < array_roundsize (stem ## cnt >> 2)) \
342 { \
343 stem ## max = array_roundsize (stem ## cnt >> 1); \
344 base = (type *)ev_realloc (base, sizeof (type) * (stem ## max));\
345 fprintf (stderr, "slimmed down " # stem " to %d\n", stem ## max);/*D*/\
346 }
347
348 #define array_free(stem, idx) \
349 ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0;
350
351 /*****************************************************************************/
352
353 static void
354 anfds_init (ANFD *base, int count)
355 {
356 while (count--)
357 {
358 base->head = 0;
359 base->events = EV_NONE;
360 base->reify = 0;
361
362 ++base;
363 }
364 }
365
366 void
367 ev_feed_event (EV_P_ void *w, int revents)
368 {
369 W w_ = (W)w;
370
371 if (expect_false (w_->pending))
372 {
373 pendings [ABSPRI (w_)][w_->pending - 1].events |= revents;
374 return;
375 }
376
377 w_->pending = ++pendingcnt [ABSPRI (w_)];
378 array_needsize (ANPENDING, pendings [ABSPRI (w_)], pendingmax [ABSPRI (w_)], pendingcnt [ABSPRI (w_)], EMPTY2);
379 pendings [ABSPRI (w_)][w_->pending - 1].w = w_;
380 pendings [ABSPRI (w_)][w_->pending - 1].events = revents;
381 }
382
383 static void
384 queue_events (EV_P_ W *events, int eventcnt, int type)
385 {
386 int i;
387
388 for (i = 0; i < eventcnt; ++i)
389 ev_feed_event (EV_A_ events [i], type);
390 }
391
392 inline void
393 fd_event (EV_P_ int fd, int revents)
394 {
395 ANFD *anfd = anfds + fd;
396 struct ev_io *w;
397
398 for (w = (struct ev_io *)anfd->head; w; w = (struct ev_io *)((WL)w)->next)
399 {
400 int ev = w->events & revents;
401
402 if (ev)
403 ev_feed_event (EV_A_ (W)w, ev);
404 }
405 }
406
407 void
408 ev_feed_fd_event (EV_P_ int fd, int revents)
409 {
410 fd_event (EV_A_ fd, revents);
411 }
412
413 /*****************************************************************************/
414
415 inline void
416 fd_reify (EV_P)
417 {
418 int i;
419
420 for (i = 0; i < fdchangecnt; ++i)
421 {
422 int fd = fdchanges [i];
423 ANFD *anfd = anfds + fd;
424 struct ev_io *w;
425
426 int events = 0;
427
428 for (w = (struct ev_io *)anfd->head; w; w = (struct ev_io *)((WL)w)->next)
429 events |= w->events;
430
431 #if EV_SELECT_IS_WINSOCKET
432 if (events)
433 {
434 unsigned long argp;
435 anfd->handle = _get_osfhandle (fd);
436 assert (("libev only supports socket fds in this configuration", ioctlsocket (anfd->handle, FIONREAD, &argp) == 0));
437 }
438 #endif
439
440 anfd->reify = 0;
441
442 method_modify (EV_A_ fd, anfd->events, events);
443 anfd->events = events;
444 }
445
446 fdchangecnt = 0;
447 }
448
449 static void
450 fd_change (EV_P_ int fd)
451 {
452 if (expect_false (anfds [fd].reify))
453 return;
454
455 anfds [fd].reify = 1;
456
457 ++fdchangecnt;
458 array_needsize (int, fdchanges, fdchangemax, fdchangecnt, EMPTY2);
459 fdchanges [fdchangecnt - 1] = fd;
460 }
461
462 static void
463 fd_kill (EV_P_ int fd)
464 {
465 struct ev_io *w;
466
467 while ((w = (struct ev_io *)anfds [fd].head))
468 {
469 ev_io_stop (EV_A_ w);
470 ev_feed_event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE);
471 }
472 }
473
474 inline int
475 fd_valid (int fd)
476 {
477 #ifdef _WIN32
478 return _get_osfhandle (fd) != -1;
479 #else
480 return fcntl (fd, F_GETFD) != -1;
481 #endif
482 }
483
484 /* called on EBADF to verify fds */
485 static void
486 fd_ebadf (EV_P)
487 {
488 int fd;
489
490 for (fd = 0; fd < anfdmax; ++fd)
491 if (anfds [fd].events)
492 if (!fd_valid (fd) == -1 && errno == EBADF)
493 fd_kill (EV_A_ fd);
494 }
495
496 /* called on ENOMEM in select/poll to kill some fds and retry */
497 static void
498 fd_enomem (EV_P)
499 {
500 int fd;
501
502 for (fd = anfdmax; fd--; )
503 if (anfds [fd].events)
504 {
505 fd_kill (EV_A_ fd);
506 return;
507 }
508 }
509
510 /* usually called after fork if method needs to re-arm all fds from scratch */
511 static void
512 fd_rearm_all (EV_P)
513 {
514 int fd;
515
516 /* this should be highly optimised to not do anything but set a flag */
517 for (fd = 0; fd < anfdmax; ++fd)
518 if (anfds [fd].events)
519 {
520 anfds [fd].events = 0;
521 fd_change (EV_A_ fd);
522 }
523 }
524
525 /*****************************************************************************/
526
527 static void
528 upheap (WT *heap, int k)
529 {
530 WT w = heap [k];
531
532 while (k && heap [k >> 1]->at > w->at)
533 {
534 heap [k] = heap [k >> 1];
535 ((W)heap [k])->active = k + 1;
536 k >>= 1;
537 }
538
539 heap [k] = w;
540 ((W)heap [k])->active = k + 1;
541
542 }
543
544 static void
545 downheap (WT *heap, int N, int k)
546 {
547 WT w = heap [k];
548
549 while (k < (N >> 1))
550 {
551 int j = k << 1;
552
553 if (j + 1 < N && heap [j]->at > heap [j + 1]->at)
554 ++j;
555
556 if (w->at <= heap [j]->at)
557 break;
558
559 heap [k] = heap [j];
560 ((W)heap [k])->active = k + 1;
561 k = j;
562 }
563
564 heap [k] = w;
565 ((W)heap [k])->active = k + 1;
566 }
567
568 inline void
569 adjustheap (WT *heap, int N, int k)
570 {
571 upheap (heap, k);
572 downheap (heap, N, k);
573 }
574
575 /*****************************************************************************/
576
577 typedef struct
578 {
579 WL head;
580 sig_atomic_t volatile gotsig;
581 } ANSIG;
582
583 static ANSIG *signals;
584 static int signalmax;
585
586 static int sigpipe [2];
587 static sig_atomic_t volatile gotsig;
588 static struct ev_io sigev;
589
590 static void
591 signals_init (ANSIG *base, int count)
592 {
593 while (count--)
594 {
595 base->head = 0;
596 base->gotsig = 0;
597
598 ++base;
599 }
600 }
601
602 static void
603 sighandler (int signum)
604 {
605 #if _WIN32
606 signal (signum, sighandler);
607 #endif
608
609 signals [signum - 1].gotsig = 1;
610
611 if (!gotsig)
612 {
613 int old_errno = errno;
614 gotsig = 1;
615 write (sigpipe [1], &signum, 1);
616 errno = old_errno;
617 }
618 }
619
620 void
621 ev_feed_signal_event (EV_P_ int signum)
622 {
623 WL w;
624
625 #if EV_MULTIPLICITY
626 assert (("feeding signal events is only supported in the default loop", loop == ev_default_loop_ptr));
627 #endif
628
629 --signum;
630
631 if (signum < 0 || signum >= signalmax)
632 return;
633
634 signals [signum].gotsig = 0;
635
636 for (w = signals [signum].head; w; w = w->next)
637 ev_feed_event (EV_A_ (W)w, EV_SIGNAL);
638 }
639
640 static void
641 sigcb (EV_P_ struct ev_io *iow, int revents)
642 {
643 int signum;
644
645 read (sigpipe [0], &revents, 1);
646 gotsig = 0;
647
648 for (signum = signalmax; signum--; )
649 if (signals [signum].gotsig)
650 ev_feed_signal_event (EV_A_ signum + 1);
651 }
652
653 static void
654 fd_intern (int fd)
655 {
656 #ifdef _WIN32
657 int arg = 1;
658 ioctlsocket (_get_osfhandle (fd), FIONBIO, &arg);
659 #else
660 fcntl (fd, F_SETFD, FD_CLOEXEC);
661 fcntl (fd, F_SETFL, O_NONBLOCK);
662 #endif
663 }
664
665 static void
666 siginit (EV_P)
667 {
668 fd_intern (sigpipe [0]);
669 fd_intern (sigpipe [1]);
670
671 ev_io_set (&sigev, sigpipe [0], EV_READ);
672 ev_io_start (EV_A_ &sigev);
673 ev_unref (EV_A); /* child watcher should not keep loop alive */
674 }
675
676 /*****************************************************************************/
677
678 static struct ev_child *childs [PID_HASHSIZE];
679
680 #ifndef _WIN32
681
682 static struct ev_signal childev;
683
684 #ifndef WCONTINUED
685 # define WCONTINUED 0
686 #endif
687
688 static void
689 child_reap (EV_P_ struct ev_signal *sw, int chain, int pid, int status)
690 {
691 struct ev_child *w;
692
693 for (w = (struct ev_child *)childs [chain & (PID_HASHSIZE - 1)]; w; w = (struct ev_child *)((WL)w)->next)
694 if (w->pid == pid || !w->pid)
695 {
696 ev_priority (w) = ev_priority (sw); /* need to do it *now* */
697 w->rpid = pid;
698 w->rstatus = status;
699 ev_feed_event (EV_A_ (W)w, EV_CHILD);
700 }
701 }
702
703 static void
704 childcb (EV_P_ struct ev_signal *sw, int revents)
705 {
706 int pid, status;
707
708 if (0 < (pid = waitpid (-1, &status, WNOHANG | WUNTRACED | WCONTINUED)))
709 {
710 /* make sure we are called again until all childs have been reaped */
711 ev_feed_event (EV_A_ (W)sw, EV_SIGNAL);
712
713 child_reap (EV_A_ sw, pid, pid, status);
714 child_reap (EV_A_ sw, 0, pid, status); /* this might trigger a watcher twice, but event catches that */
715 }
716 }
717
718 #endif
719
720 /*****************************************************************************/
721
722 #if EV_USE_PORT
723 # include "ev_port.c"
724 #endif
725 #if EV_USE_KQUEUE
726 # include "ev_kqueue.c"
727 #endif
728 #if EV_USE_EPOLL
729 # include "ev_epoll.c"
730 #endif
731 #if EV_USE_POLL
732 # include "ev_poll.c"
733 #endif
734 #if EV_USE_SELECT
735 # include "ev_select.c"
736 #endif
737
738 int
739 ev_version_major (void)
740 {
741 return EV_VERSION_MAJOR;
742 }
743
744 int
745 ev_version_minor (void)
746 {
747 return EV_VERSION_MINOR;
748 }
749
750 /* return true if we are running with elevated privileges and should ignore env variables */
751 static int
752 enable_secure (void)
753 {
754 #ifdef _WIN32
755 return 0;
756 #else
757 return getuid () != geteuid ()
758 || getgid () != getegid ();
759 #endif
760 }
761
762 unsigned int
763 ev_method (EV_P)
764 {
765 return method;
766 }
767
768 static void
769 loop_init (EV_P_ unsigned int flags)
770 {
771 if (!method)
772 {
773 #if EV_USE_MONOTONIC
774 {
775 struct timespec ts;
776 if (!clock_gettime (CLOCK_MONOTONIC, &ts))
777 have_monotonic = 1;
778 }
779 #endif
780
781 ev_rt_now = ev_time ();
782 mn_now = get_clock ();
783 now_floor = mn_now;
784 rtmn_diff = ev_rt_now - mn_now;
785
786 if (!(flags & EVFLAG_NOENV) && !enable_secure () && getenv ("LIBEV_FLAGS"))
787 flags = atoi (getenv ("LIBEV_FLAGS"));
788
789 if (!(flags & 0x0000ffff))
790 flags |= 0x0000ffff;
791
792 method = 0;
793 #if EV_USE_PORT
794 if (!method && (flags & EVMETHOD_PORT )) method = port_init (EV_A_ flags);
795 #endif
796 #if EV_USE_KQUEUE
797 if (!method && (flags & EVMETHOD_KQUEUE)) method = kqueue_init (EV_A_ flags);
798 #endif
799 #if EV_USE_EPOLL
800 if (!method && (flags & EVMETHOD_EPOLL )) method = epoll_init (EV_A_ flags);
801 #endif
802 #if EV_USE_POLL
803 if (!method && (flags & EVMETHOD_POLL )) method = poll_init (EV_A_ flags);
804 #endif
805 #if EV_USE_SELECT
806 if (!method && (flags & EVMETHOD_SELECT)) method = select_init (EV_A_ flags);
807 #endif
808
809 ev_init (&sigev, sigcb);
810 ev_set_priority (&sigev, EV_MAXPRI);
811 }
812 }
813
814 static void
815 loop_destroy (EV_P)
816 {
817 int i;
818
819 #if EV_USE_PORT
820 if (method == EVMETHOD_PORT ) port_destroy (EV_A);
821 #endif
822 #if EV_USE_KQUEUE
823 if (method == EVMETHOD_KQUEUE) kqueue_destroy (EV_A);
824 #endif
825 #if EV_USE_EPOLL
826 if (method == EVMETHOD_EPOLL ) epoll_destroy (EV_A);
827 #endif
828 #if EV_USE_POLL
829 if (method == EVMETHOD_POLL ) poll_destroy (EV_A);
830 #endif
831 #if EV_USE_SELECT
832 if (method == EVMETHOD_SELECT) select_destroy (EV_A);
833 #endif
834
835 for (i = NUMPRI; i--; )
836 array_free (pending, [i]);
837
838 /* have to use the microsoft-never-gets-it-right macro */
839 array_free (fdchange, EMPTY0);
840 array_free (timer, EMPTY0);
841 #if EV_PERIODICS
842 array_free (periodic, EMPTY0);
843 #endif
844 array_free (idle, EMPTY0);
845 array_free (prepare, EMPTY0);
846 array_free (check, EMPTY0);
847
848 method = 0;
849 }
850
851 static void
852 loop_fork (EV_P)
853 {
854 #if EV_USE_PORT
855 if (method == EVMETHOD_PORT ) port_fork (EV_A);
856 #endif
857 #if EV_USE_KQUEUE
858 if (method == EVMETHOD_KQUEUE) kqueue_fork (EV_A);
859 #endif
860 #if EV_USE_EPOLL
861 if (method == EVMETHOD_EPOLL ) epoll_fork (EV_A);
862 #endif
863
864 if (ev_is_active (&sigev))
865 {
866 /* default loop */
867
868 ev_ref (EV_A);
869 ev_io_stop (EV_A_ &sigev);
870 close (sigpipe [0]);
871 close (sigpipe [1]);
872
873 while (pipe (sigpipe))
874 syserr ("(libev) error creating pipe");
875
876 siginit (EV_A);
877 }
878
879 postfork = 0;
880 }
881
882 #if EV_MULTIPLICITY
883 struct ev_loop *
884 ev_loop_new (unsigned int flags)
885 {
886 struct ev_loop *loop = (struct ev_loop *)ev_malloc (sizeof (struct ev_loop));
887
888 memset (loop, 0, sizeof (struct ev_loop));
889
890 loop_init (EV_A_ flags);
891
892 if (ev_method (EV_A))
893 return loop;
894
895 return 0;
896 }
897
898 void
899 ev_loop_destroy (EV_P)
900 {
901 loop_destroy (EV_A);
902 ev_free (loop);
903 }
904
905 void
906 ev_loop_fork (EV_P)
907 {
908 postfork = 1;
909 }
910
911 #endif
912
913 #if EV_MULTIPLICITY
914 struct ev_loop *
915 ev_default_loop_init (unsigned int flags)
916 #else
917 int
918 ev_default_loop (unsigned int flags)
919 #endif
920 {
921 if (sigpipe [0] == sigpipe [1])
922 if (pipe (sigpipe))
923 return 0;
924
925 if (!ev_default_loop_ptr)
926 {
927 #if EV_MULTIPLICITY
928 struct ev_loop *loop = ev_default_loop_ptr = &default_loop_struct;
929 #else
930 ev_default_loop_ptr = 1;
931 #endif
932
933 loop_init (EV_A_ flags);
934
935 if (ev_method (EV_A))
936 {
937 siginit (EV_A);
938
939 #ifndef _WIN32
940 ev_signal_init (&childev, childcb, SIGCHLD);
941 ev_set_priority (&childev, EV_MAXPRI);
942 ev_signal_start (EV_A_ &childev);
943 ev_unref (EV_A); /* child watcher should not keep loop alive */
944 #endif
945 }
946 else
947 ev_default_loop_ptr = 0;
948 }
949
950 return ev_default_loop_ptr;
951 }
952
953 void
954 ev_default_destroy (void)
955 {
956 #if EV_MULTIPLICITY
957 struct ev_loop *loop = ev_default_loop_ptr;
958 #endif
959
960 #ifndef _WIN32
961 ev_ref (EV_A); /* child watcher */
962 ev_signal_stop (EV_A_ &childev);
963 #endif
964
965 ev_ref (EV_A); /* signal watcher */
966 ev_io_stop (EV_A_ &sigev);
967
968 close (sigpipe [0]); sigpipe [0] = 0;
969 close (sigpipe [1]); sigpipe [1] = 0;
970
971 loop_destroy (EV_A);
972 }
973
974 void
975 ev_default_fork (void)
976 {
977 #if EV_MULTIPLICITY
978 struct ev_loop *loop = ev_default_loop_ptr;
979 #endif
980
981 if (method)
982 postfork = 1;
983 }
984
985 /*****************************************************************************/
986
987 static int
988 any_pending (EV_P)
989 {
990 int pri;
991
992 for (pri = NUMPRI; pri--; )
993 if (pendingcnt [pri])
994 return 1;
995
996 return 0;
997 }
998
999 inline void
1000 call_pending (EV_P)
1001 {
1002 int pri;
1003
1004 for (pri = NUMPRI; pri--; )
1005 while (pendingcnt [pri])
1006 {
1007 ANPENDING *p = pendings [pri] + --pendingcnt [pri];
1008
1009 if (expect_true (p->w))
1010 {
1011 p->w->pending = 0;
1012 EV_CB_INVOKE (p->w, p->events);
1013 }
1014 }
1015 }
1016
1017 inline void
1018 timers_reify (EV_P)
1019 {
1020 while (timercnt && ((WT)timers [0])->at <= mn_now)
1021 {
1022 struct ev_timer *w = timers [0];
1023
1024 assert (("inactive timer on timer heap detected", ev_is_active (w)));
1025
1026 /* first reschedule or stop timer */
1027 if (w->repeat)
1028 {
1029 assert (("negative ev_timer repeat value found while processing timers", w->repeat > 0.));
1030
1031 ((WT)w)->at += w->repeat;
1032 if (((WT)w)->at < mn_now)
1033 ((WT)w)->at = mn_now;
1034
1035 downheap ((WT *)timers, timercnt, 0);
1036 }
1037 else
1038 ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */
1039
1040 ev_feed_event (EV_A_ (W)w, EV_TIMEOUT);
1041 }
1042 }
1043
1044 #if EV_PERIODICS
1045 inline void
1046 periodics_reify (EV_P)
1047 {
1048 while (periodiccnt && ((WT)periodics [0])->at <= ev_rt_now)
1049 {
1050 struct ev_periodic *w = periodics [0];
1051
1052 assert (("inactive timer on periodic heap detected", ev_is_active (w)));
1053
1054 /* first reschedule or stop timer */
1055 if (w->reschedule_cb)
1056 {
1057 ((WT)w)->at = w->reschedule_cb (w, ev_rt_now + 0.0001);
1058 assert (("ev_periodic reschedule callback returned time in the past", ((WT)w)->at > ev_rt_now));
1059 downheap ((WT *)periodics, periodiccnt, 0);
1060 }
1061 else if (w->interval)
1062 {
1063 ((WT)w)->at += floor ((ev_rt_now - ((WT)w)->at) / w->interval + 1.) * w->interval;
1064 assert (("ev_periodic timeout in the past detected while processing timers, negative interval?", ((WT)w)->at > ev_rt_now));
1065 downheap ((WT *)periodics, periodiccnt, 0);
1066 }
1067 else
1068 ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */
1069
1070 ev_feed_event (EV_A_ (W)w, EV_PERIODIC);
1071 }
1072 }
1073
1074 static void
1075 periodics_reschedule (EV_P)
1076 {
1077 int i;
1078
1079 /* adjust periodics after time jump */
1080 for (i = 0; i < periodiccnt; ++i)
1081 {
1082 struct ev_periodic *w = periodics [i];
1083
1084 if (w->reschedule_cb)
1085 ((WT)w)->at = w->reschedule_cb (w, ev_rt_now);
1086 else if (w->interval)
1087 ((WT)w)->at += ceil ((ev_rt_now - ((WT)w)->at) / w->interval) * w->interval;
1088 }
1089
1090 /* now rebuild the heap */
1091 for (i = periodiccnt >> 1; i--; )
1092 downheap ((WT *)periodics, periodiccnt, i);
1093 }
1094 #endif
1095
1096 inline int
1097 time_update_monotonic (EV_P)
1098 {
1099 mn_now = get_clock ();
1100
1101 if (expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5))
1102 {
1103 ev_rt_now = rtmn_diff + mn_now;
1104 return 0;
1105 }
1106 else
1107 {
1108 now_floor = mn_now;
1109 ev_rt_now = ev_time ();
1110 return 1;
1111 }
1112 }
1113
1114 inline void
1115 time_update (EV_P)
1116 {
1117 int i;
1118
1119 #if EV_USE_MONOTONIC
1120 if (expect_true (have_monotonic))
1121 {
1122 if (time_update_monotonic (EV_A))
1123 {
1124 ev_tstamp odiff = rtmn_diff;
1125
1126 for (i = 4; --i; ) /* loop a few times, before making important decisions */
1127 {
1128 rtmn_diff = ev_rt_now - mn_now;
1129
1130 if (fabs (odiff - rtmn_diff) < MIN_TIMEJUMP)
1131 return; /* all is well */
1132
1133 ev_rt_now = ev_time ();
1134 mn_now = get_clock ();
1135 now_floor = mn_now;
1136 }
1137
1138 # if EV_PERIODICS
1139 periodics_reschedule (EV_A);
1140 # endif
1141 /* no timer adjustment, as the monotonic clock doesn't jump */
1142 /* timers_reschedule (EV_A_ rtmn_diff - odiff) */
1143 }
1144 }
1145 else
1146 #endif
1147 {
1148 ev_rt_now = ev_time ();
1149
1150 if (expect_false (mn_now > ev_rt_now || mn_now < ev_rt_now - MAX_BLOCKTIME - MIN_TIMEJUMP))
1151 {
1152 #if EV_PERIODICS
1153 periodics_reschedule (EV_A);
1154 #endif
1155
1156 /* adjust timers. this is easy, as the offset is the same for all */
1157 for (i = 0; i < timercnt; ++i)
1158 ((WT)timers [i])->at += ev_rt_now - mn_now;
1159 }
1160
1161 mn_now = ev_rt_now;
1162 }
1163 }
1164
1165 void
1166 ev_ref (EV_P)
1167 {
1168 ++activecnt;
1169 }
1170
1171 void
1172 ev_unref (EV_P)
1173 {
1174 --activecnt;
1175 }
1176
1177 static int loop_done;
1178
1179 void
1180 ev_loop (EV_P_ int flags)
1181 {
1182 double block;
1183 loop_done = flags & (EVLOOP_ONESHOT | EVLOOP_NONBLOCK) ? 1 : 0;
1184
1185 while (activecnt)
1186 {
1187 /* queue check watchers (and execute them) */
1188 if (expect_false (preparecnt))
1189 {
1190 queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE);
1191 call_pending (EV_A);
1192 }
1193
1194 /* we might have forked, so reify kernel state if necessary */
1195 if (expect_false (postfork))
1196 loop_fork (EV_A);
1197
1198 /* update fd-related kernel structures */
1199 fd_reify (EV_A);
1200
1201 /* calculate blocking time */
1202
1203 /* we only need this for !monotonic clock or timers, but as we basically
1204 always have timers, we just calculate it always */
1205 #if EV_USE_MONOTONIC
1206 if (expect_true (have_monotonic))
1207 time_update_monotonic (EV_A);
1208 else
1209 #endif
1210 {
1211 ev_rt_now = ev_time ();
1212 mn_now = ev_rt_now;
1213 }
1214
1215 if (flags & EVLOOP_NONBLOCK || idlecnt)
1216 block = 0.;
1217 else
1218 {
1219 block = MAX_BLOCKTIME;
1220
1221 if (timercnt)
1222 {
1223 ev_tstamp to = ((WT)timers [0])->at - mn_now + method_fudge;
1224 if (block > to) block = to;
1225 }
1226
1227 #if EV_PERIODICS
1228 if (periodiccnt)
1229 {
1230 ev_tstamp to = ((WT)periodics [0])->at - ev_rt_now + method_fudge;
1231 if (block > to) block = to;
1232 }
1233 #endif
1234
1235 if (expect_false (block < 0.)) block = 0.;
1236 }
1237
1238 method_poll (EV_A_ block);
1239
1240 /* update ev_rt_now, do magic */
1241 time_update (EV_A);
1242
1243 /* queue pending timers and reschedule them */
1244 timers_reify (EV_A); /* relative timers called last */
1245 #if EV_PERIODICS
1246 periodics_reify (EV_A); /* absolute timers called first */
1247 #endif
1248
1249 /* queue idle watchers unless io or timers are pending */
1250 if (idlecnt && !any_pending (EV_A))
1251 queue_events (EV_A_ (W *)idles, idlecnt, EV_IDLE);
1252
1253 /* queue check watchers, to be executed first */
1254 if (expect_false (checkcnt))
1255 queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK);
1256
1257 call_pending (EV_A);
1258
1259 if (expect_false (loop_done))
1260 break;
1261 }
1262
1263 if (loop_done != 2)
1264 loop_done = 0;
1265 }
1266
1267 void
1268 ev_unloop (EV_P_ int how)
1269 {
1270 loop_done = how;
1271 }
1272
1273 /*****************************************************************************/
1274
1275 inline void
1276 wlist_add (WL *head, WL elem)
1277 {
1278 elem->next = *head;
1279 *head = elem;
1280 }
1281
1282 inline void
1283 wlist_del (WL *head, WL elem)
1284 {
1285 while (*head)
1286 {
1287 if (*head == elem)
1288 {
1289 *head = elem->next;
1290 return;
1291 }
1292
1293 head = &(*head)->next;
1294 }
1295 }
1296
1297 inline void
1298 ev_clear_pending (EV_P_ W w)
1299 {
1300 if (w->pending)
1301 {
1302 pendings [ABSPRI (w)][w->pending - 1].w = 0;
1303 w->pending = 0;
1304 }
1305 }
1306
1307 inline void
1308 ev_start (EV_P_ W w, int active)
1309 {
1310 if (w->priority < EV_MINPRI) w->priority = EV_MINPRI;
1311 if (w->priority > EV_MAXPRI) w->priority = EV_MAXPRI;
1312
1313 w->active = active;
1314 ev_ref (EV_A);
1315 }
1316
1317 inline void
1318 ev_stop (EV_P_ W w)
1319 {
1320 ev_unref (EV_A);
1321 w->active = 0;
1322 }
1323
1324 /*****************************************************************************/
1325
1326 void
1327 ev_io_start (EV_P_ struct ev_io *w)
1328 {
1329 int fd = w->fd;
1330
1331 if (expect_false (ev_is_active (w)))
1332 return;
1333
1334 assert (("ev_io_start called with negative fd", fd >= 0));
1335
1336 ev_start (EV_A_ (W)w, 1);
1337 array_needsize (ANFD, anfds, anfdmax, fd + 1, anfds_init);
1338 wlist_add ((WL *)&anfds[fd].head, (WL)w);
1339
1340 fd_change (EV_A_ fd);
1341 }
1342
1343 void
1344 ev_io_stop (EV_P_ struct ev_io *w)
1345 {
1346 ev_clear_pending (EV_A_ (W)w);
1347 if (expect_false (!ev_is_active (w)))
1348 return;
1349
1350 assert (("ev_io_start called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax));
1351
1352 wlist_del ((WL *)&anfds[w->fd].head, (WL)w);
1353 ev_stop (EV_A_ (W)w);
1354
1355 fd_change (EV_A_ w->fd);
1356 }
1357
1358 void
1359 ev_timer_start (EV_P_ struct ev_timer *w)
1360 {
1361 if (expect_false (ev_is_active (w)))
1362 return;
1363
1364 ((WT)w)->at += mn_now;
1365
1366 assert (("ev_timer_start called with negative timer repeat value", w->repeat >= 0.));
1367
1368 ev_start (EV_A_ (W)w, ++timercnt);
1369 array_needsize (struct ev_timer *, timers, timermax, timercnt, EMPTY2);
1370 timers [timercnt - 1] = w;
1371 upheap ((WT *)timers, timercnt - 1);
1372
1373 assert (("internal timer heap corruption", timers [((W)w)->active - 1] == w));
1374 }
1375
1376 void
1377 ev_timer_stop (EV_P_ struct ev_timer *w)
1378 {
1379 ev_clear_pending (EV_A_ (W)w);
1380 if (expect_false (!ev_is_active (w)))
1381 return;
1382
1383 assert (("internal timer heap corruption", timers [((W)w)->active - 1] == w));
1384
1385 if (expect_true (((W)w)->active < timercnt--))
1386 {
1387 timers [((W)w)->active - 1] = timers [timercnt];
1388 adjustheap ((WT *)timers, timercnt, ((W)w)->active - 1);
1389 }
1390
1391 ((WT)w)->at -= mn_now;
1392
1393 ev_stop (EV_A_ (W)w);
1394 }
1395
1396 void
1397 ev_timer_again (EV_P_ struct ev_timer *w)
1398 {
1399 if (ev_is_active (w))
1400 {
1401 if (w->repeat)
1402 {
1403 ((WT)w)->at = mn_now + w->repeat;
1404 adjustheap ((WT *)timers, timercnt, ((W)w)->active - 1);
1405 }
1406 else
1407 ev_timer_stop (EV_A_ w);
1408 }
1409 else if (w->repeat)
1410 {
1411 w->at = w->repeat;
1412 ev_timer_start (EV_A_ w);
1413 }
1414 }
1415
1416 #if EV_PERIODICS
1417 void
1418 ev_periodic_start (EV_P_ struct ev_periodic *w)
1419 {
1420 if (expect_false (ev_is_active (w)))
1421 return;
1422
1423 if (w->reschedule_cb)
1424 ((WT)w)->at = w->reschedule_cb (w, ev_rt_now);
1425 else if (w->interval)
1426 {
1427 assert (("ev_periodic_start called with negative interval value", w->interval >= 0.));
1428 /* this formula differs from the one in periodic_reify because we do not always round up */
1429 ((WT)w)->at += ceil ((ev_rt_now - ((WT)w)->at) / w->interval) * w->interval;
1430 }
1431
1432 ev_start (EV_A_ (W)w, ++periodiccnt);
1433 array_needsize (struct ev_periodic *, periodics, periodicmax, periodiccnt, EMPTY2);
1434 periodics [periodiccnt - 1] = w;
1435 upheap ((WT *)periodics, periodiccnt - 1);
1436
1437 assert (("internal periodic heap corruption", periodics [((W)w)->active - 1] == w));
1438 }
1439
1440 void
1441 ev_periodic_stop (EV_P_ struct ev_periodic *w)
1442 {
1443 ev_clear_pending (EV_A_ (W)w);
1444 if (expect_false (!ev_is_active (w)))
1445 return;
1446
1447 assert (("internal periodic heap corruption", periodics [((W)w)->active - 1] == w));
1448
1449 if (expect_true (((W)w)->active < periodiccnt--))
1450 {
1451 periodics [((W)w)->active - 1] = periodics [periodiccnt];
1452 adjustheap ((WT *)periodics, periodiccnt, ((W)w)->active - 1);
1453 }
1454
1455 ev_stop (EV_A_ (W)w);
1456 }
1457
1458 void
1459 ev_periodic_again (EV_P_ struct ev_periodic *w)
1460 {
1461 /* TODO: use adjustheap and recalculation */
1462 ev_periodic_stop (EV_A_ w);
1463 ev_periodic_start (EV_A_ w);
1464 }
1465 #endif
1466
1467 void
1468 ev_idle_start (EV_P_ struct ev_idle *w)
1469 {
1470 if (expect_false (ev_is_active (w)))
1471 return;
1472
1473 ev_start (EV_A_ (W)w, ++idlecnt);
1474 array_needsize (struct ev_idle *, idles, idlemax, idlecnt, EMPTY2);
1475 idles [idlecnt - 1] = w;
1476 }
1477
1478 void
1479 ev_idle_stop (EV_P_ struct ev_idle *w)
1480 {
1481 ev_clear_pending (EV_A_ (W)w);
1482 if (expect_false (!ev_is_active (w)))
1483 return;
1484
1485 idles [((W)w)->active - 1] = idles [--idlecnt];
1486 ev_stop (EV_A_ (W)w);
1487 }
1488
1489 void
1490 ev_prepare_start (EV_P_ struct ev_prepare *w)
1491 {
1492 if (expect_false (ev_is_active (w)))
1493 return;
1494
1495 ev_start (EV_A_ (W)w, ++preparecnt);
1496 array_needsize (struct ev_prepare *, prepares, preparemax, preparecnt, EMPTY2);
1497 prepares [preparecnt - 1] = w;
1498 }
1499
1500 void
1501 ev_prepare_stop (EV_P_ struct ev_prepare *w)
1502 {
1503 ev_clear_pending (EV_A_ (W)w);
1504 if (expect_false (!ev_is_active (w)))
1505 return;
1506
1507 prepares [((W)w)->active - 1] = prepares [--preparecnt];
1508 ev_stop (EV_A_ (W)w);
1509 }
1510
1511 void
1512 ev_check_start (EV_P_ struct ev_check *w)
1513 {
1514 if (expect_false (ev_is_active (w)))
1515 return;
1516
1517 ev_start (EV_A_ (W)w, ++checkcnt);
1518 array_needsize (struct ev_check *, checks, checkmax, checkcnt, EMPTY2);
1519 checks [checkcnt - 1] = w;
1520 }
1521
1522 void
1523 ev_check_stop (EV_P_ struct ev_check *w)
1524 {
1525 ev_clear_pending (EV_A_ (W)w);
1526 if (expect_false (!ev_is_active (w)))
1527 return;
1528
1529 checks [((W)w)->active - 1] = checks [--checkcnt];
1530 ev_stop (EV_A_ (W)w);
1531 }
1532
1533 #ifndef SA_RESTART
1534 # define SA_RESTART 0
1535 #endif
1536
1537 void
1538 ev_signal_start (EV_P_ struct ev_signal *w)
1539 {
1540 #if EV_MULTIPLICITY
1541 assert (("signal watchers are only supported in the default loop", loop == ev_default_loop_ptr));
1542 #endif
1543 if (expect_false (ev_is_active (w)))
1544 return;
1545
1546 assert (("ev_signal_start called with illegal signal number", w->signum > 0));
1547
1548 ev_start (EV_A_ (W)w, 1);
1549 array_needsize (ANSIG, signals, signalmax, w->signum, signals_init);
1550 wlist_add ((WL *)&signals [w->signum - 1].head, (WL)w);
1551
1552 if (!((WL)w)->next)
1553 {
1554 #if _WIN32
1555 signal (w->signum, sighandler);
1556 #else
1557 struct sigaction sa;
1558 sa.sa_handler = sighandler;
1559 sigfillset (&sa.sa_mask);
1560 sa.sa_flags = SA_RESTART; /* if restarting works we save one iteration */
1561 sigaction (w->signum, &sa, 0);
1562 #endif
1563 }
1564 }
1565
1566 void
1567 ev_signal_stop (EV_P_ struct ev_signal *w)
1568 {
1569 ev_clear_pending (EV_A_ (W)w);
1570 if (expect_false (!ev_is_active (w)))
1571 return;
1572
1573 wlist_del ((WL *)&signals [w->signum - 1].head, (WL)w);
1574 ev_stop (EV_A_ (W)w);
1575
1576 if (!signals [w->signum - 1].head)
1577 signal (w->signum, SIG_DFL);
1578 }
1579
1580 void
1581 ev_child_start (EV_P_ struct ev_child *w)
1582 {
1583 #if EV_MULTIPLICITY
1584 assert (("child watchers are only supported in the default loop", loop == ev_default_loop_ptr));
1585 #endif
1586 if (expect_false (ev_is_active (w)))
1587 return;
1588
1589 ev_start (EV_A_ (W)w, 1);
1590 wlist_add ((WL *)&childs [w->pid & (PID_HASHSIZE - 1)], (WL)w);
1591 }
1592
1593 void
1594 ev_child_stop (EV_P_ struct ev_child *w)
1595 {
1596 ev_clear_pending (EV_A_ (W)w);
1597 if (expect_false (!ev_is_active (w)))
1598 return;
1599
1600 wlist_del ((WL *)&childs [w->pid & (PID_HASHSIZE - 1)], (WL)w);
1601 ev_stop (EV_A_ (W)w);
1602 }
1603
1604 /*****************************************************************************/
1605
1606 struct ev_once
1607 {
1608 struct ev_io io;
1609 struct ev_timer to;
1610 void (*cb)(int revents, void *arg);
1611 void *arg;
1612 };
1613
1614 static void
1615 once_cb (EV_P_ struct ev_once *once, int revents)
1616 {
1617 void (*cb)(int revents, void *arg) = once->cb;
1618 void *arg = once->arg;
1619
1620 ev_io_stop (EV_A_ &once->io);
1621 ev_timer_stop (EV_A_ &once->to);
1622 ev_free (once);
1623
1624 cb (revents, arg);
1625 }
1626
1627 static void
1628 once_cb_io (EV_P_ struct ev_io *w, int revents)
1629 {
1630 once_cb (EV_A_ (struct ev_once *)(((char *)w) - offsetof (struct ev_once, io)), revents);
1631 }
1632
1633 static void
1634 once_cb_to (EV_P_ struct ev_timer *w, int revents)
1635 {
1636 once_cb (EV_A_ (struct ev_once *)(((char *)w) - offsetof (struct ev_once, to)), revents);
1637 }
1638
1639 void
1640 ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg)
1641 {
1642 struct ev_once *once = (struct ev_once *)ev_malloc (sizeof (struct ev_once));
1643
1644 if (expect_false (!once))
1645 {
1646 cb (EV_ERROR | EV_READ | EV_WRITE | EV_TIMEOUT, arg);
1647 return;
1648 }
1649
1650 once->cb = cb;
1651 once->arg = arg;
1652
1653 ev_init (&once->io, once_cb_io);
1654 if (fd >= 0)
1655 {
1656 ev_io_set (&once->io, fd, events);
1657 ev_io_start (EV_A_ &once->io);
1658 }
1659
1660 ev_init (&once->to, once_cb_to);
1661 if (timeout >= 0.)
1662 {
1663 ev_timer_set (&once->to, timeout, 0.);
1664 ev_timer_start (EV_A_ &once->to);
1665 }
1666 }
1667
1668 #ifdef __cplusplus
1669 }
1670 #endif
1671