1 |
root |
1.17 |
/* |
2 |
root |
1.36 |
* libev event processing core, watcher management |
3 |
|
|
* |
4 |
root |
1.17 |
* Copyright (c) 2007 Marc Alexander Lehmann <libev@schmorp.de> |
5 |
|
|
* All rights reserved. |
6 |
|
|
* |
7 |
|
|
* Redistribution and use in source and binary forms, with or without |
8 |
|
|
* modification, are permitted provided that the following conditions are |
9 |
|
|
* met: |
10 |
|
|
* |
11 |
|
|
* * Redistributions of source code must retain the above copyright |
12 |
|
|
* notice, this list of conditions and the following disclaimer. |
13 |
|
|
* |
14 |
|
|
* * Redistributions in binary form must reproduce the above |
15 |
|
|
* copyright notice, this list of conditions and the following |
16 |
|
|
* disclaimer in the documentation and/or other materials provided |
17 |
|
|
* with the distribution. |
18 |
|
|
* |
19 |
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
20 |
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
21 |
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
22 |
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
23 |
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
24 |
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
25 |
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
26 |
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
27 |
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
28 |
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
29 |
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
30 |
|
|
*/ |
31 |
root |
1.59 |
#ifndef EV_STANDALONE |
32 |
root |
1.29 |
# include "config.h" |
33 |
root |
1.60 |
|
34 |
|
|
# if HAVE_CLOCK_GETTIME |
35 |
|
|
# define EV_USE_MONOTONIC 1 |
36 |
|
|
# define EV_USE_REALTIME 1 |
37 |
|
|
# endif |
38 |
|
|
|
39 |
|
|
# if HAVE_SELECT && HAVE_SYS_SELECT_H |
40 |
|
|
# define EV_USE_SELECT 1 |
41 |
|
|
# endif |
42 |
|
|
|
43 |
|
|
# if HAVE_POLL && HAVE_POLL_H |
44 |
|
|
# define EV_USE_POLL 1 |
45 |
|
|
# endif |
46 |
|
|
|
47 |
|
|
# if HAVE_EPOLL && HAVE_EPOLL_CTL && HAVE_SYS_EPOLL_H |
48 |
|
|
# define EV_USE_EPOLL 1 |
49 |
|
|
# endif |
50 |
|
|
|
51 |
|
|
# if HAVE_KQUEUE && HAVE_WORKING_KQUEUE && HAVE_SYS_EVENT_H && HAVE_SYS_QUEUE_H |
52 |
|
|
# define EV_USE_KQUEUE 1 |
53 |
|
|
# endif |
54 |
|
|
|
55 |
root |
1.29 |
#endif |
56 |
root |
1.17 |
|
57 |
root |
1.1 |
#include <math.h> |
58 |
|
|
#include <stdlib.h> |
59 |
root |
1.7 |
#include <unistd.h> |
60 |
|
|
#include <fcntl.h> |
61 |
|
|
#include <signal.h> |
62 |
root |
1.16 |
#include <stddef.h> |
63 |
root |
1.1 |
|
64 |
|
|
#include <stdio.h> |
65 |
|
|
|
66 |
root |
1.4 |
#include <assert.h> |
67 |
root |
1.1 |
#include <errno.h> |
68 |
root |
1.22 |
#include <sys/types.h> |
69 |
root |
1.45 |
#ifndef WIN32 |
70 |
|
|
# include <sys/wait.h> |
71 |
|
|
#endif |
72 |
root |
1.1 |
#include <sys/time.h> |
73 |
|
|
#include <time.h> |
74 |
|
|
|
75 |
root |
1.40 |
/**/ |
76 |
|
|
|
77 |
root |
1.29 |
#ifndef EV_USE_MONOTONIC |
78 |
root |
1.37 |
# define EV_USE_MONOTONIC 1 |
79 |
|
|
#endif |
80 |
|
|
|
81 |
root |
1.29 |
#ifndef EV_USE_SELECT |
82 |
|
|
# define EV_USE_SELECT 1 |
83 |
root |
1.10 |
#endif |
84 |
|
|
|
85 |
root |
1.59 |
#ifndef EV_USE_POLL |
86 |
|
|
# define EV_USE_POLL 0 /* poll is usually slower than select, and not as well tested */ |
87 |
root |
1.41 |
#endif |
88 |
|
|
|
89 |
root |
1.29 |
#ifndef EV_USE_EPOLL |
90 |
|
|
# define EV_USE_EPOLL 0 |
91 |
root |
1.10 |
#endif |
92 |
|
|
|
93 |
root |
1.44 |
#ifndef EV_USE_KQUEUE |
94 |
|
|
# define EV_USE_KQUEUE 0 |
95 |
|
|
#endif |
96 |
|
|
|
97 |
root |
1.62 |
#ifndef EV_USE_WIN32 |
98 |
|
|
# ifdef WIN32 |
99 |
|
|
# define EV_USE_WIN32 1 |
100 |
|
|
# else |
101 |
|
|
# define EV_USE_WIN32 0 |
102 |
|
|
# endif |
103 |
|
|
#endif |
104 |
|
|
|
105 |
root |
1.40 |
#ifndef EV_USE_REALTIME |
106 |
|
|
# define EV_USE_REALTIME 1 |
107 |
|
|
#endif |
108 |
|
|
|
109 |
|
|
/**/ |
110 |
|
|
|
111 |
|
|
#ifndef CLOCK_MONOTONIC |
112 |
|
|
# undef EV_USE_MONOTONIC |
113 |
|
|
# define EV_USE_MONOTONIC 0 |
114 |
|
|
#endif |
115 |
|
|
|
116 |
root |
1.31 |
#ifndef CLOCK_REALTIME |
117 |
root |
1.40 |
# undef EV_USE_REALTIME |
118 |
root |
1.31 |
# define EV_USE_REALTIME 0 |
119 |
|
|
#endif |
120 |
root |
1.40 |
|
121 |
|
|
/**/ |
122 |
root |
1.1 |
|
123 |
root |
1.4 |
#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */ |
124 |
root |
1.40 |
#define MAX_BLOCKTIME 59.731 /* never wait longer than this time (to detect time jumps) */ |
125 |
root |
1.31 |
#define PID_HASHSIZE 16 /* size of pid hash table, must be power of two */ |
126 |
root |
1.40 |
/*#define CLEANUP_INTERVAL 300. /* how often to try to free memory and re-check fds */ |
127 |
root |
1.1 |
|
128 |
root |
1.59 |
#include "ev.h" |
129 |
root |
1.1 |
|
130 |
root |
1.40 |
#if __GNUC__ >= 3 |
131 |
|
|
# define expect(expr,value) __builtin_expect ((expr),(value)) |
132 |
|
|
# define inline inline |
133 |
|
|
#else |
134 |
|
|
# define expect(expr,value) (expr) |
135 |
|
|
# define inline static |
136 |
|
|
#endif |
137 |
|
|
|
138 |
|
|
#define expect_false(expr) expect ((expr) != 0, 0) |
139 |
|
|
#define expect_true(expr) expect ((expr) != 0, 1) |
140 |
|
|
|
141 |
root |
1.42 |
#define NUMPRI (EV_MAXPRI - EV_MINPRI + 1) |
142 |
|
|
#define ABSPRI(w) ((w)->priority - EV_MINPRI) |
143 |
|
|
|
144 |
root |
1.10 |
typedef struct ev_watcher *W; |
145 |
|
|
typedef struct ev_watcher_list *WL; |
146 |
root |
1.12 |
typedef struct ev_watcher_time *WT; |
147 |
root |
1.10 |
|
148 |
root |
1.54 |
static int have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work? */ |
149 |
|
|
|
150 |
root |
1.53 |
/*****************************************************************************/ |
151 |
root |
1.1 |
|
152 |
root |
1.53 |
typedef struct |
153 |
|
|
{ |
154 |
|
|
struct ev_watcher_list *head; |
155 |
|
|
unsigned char events; |
156 |
|
|
unsigned char reify; |
157 |
|
|
} ANFD; |
158 |
root |
1.1 |
|
159 |
root |
1.53 |
typedef struct |
160 |
|
|
{ |
161 |
|
|
W w; |
162 |
|
|
int events; |
163 |
|
|
} ANPENDING; |
164 |
root |
1.51 |
|
165 |
root |
1.55 |
#if EV_MULTIPLICITY |
166 |
root |
1.54 |
|
167 |
root |
1.53 |
struct ev_loop |
168 |
|
|
{ |
169 |
root |
1.54 |
# define VAR(name,decl) decl; |
170 |
root |
1.53 |
# include "ev_vars.h" |
171 |
|
|
}; |
172 |
root |
1.54 |
# undef VAR |
173 |
|
|
# include "ev_wrap.h" |
174 |
|
|
|
175 |
root |
1.53 |
#else |
176 |
root |
1.54 |
|
177 |
|
|
# define VAR(name,decl) static decl; |
178 |
root |
1.53 |
# include "ev_vars.h" |
179 |
root |
1.54 |
# undef VAR |
180 |
|
|
|
181 |
root |
1.51 |
#endif |
182 |
root |
1.1 |
|
183 |
root |
1.8 |
/*****************************************************************************/ |
184 |
|
|
|
185 |
root |
1.51 |
inline ev_tstamp |
186 |
root |
1.1 |
ev_time (void) |
187 |
|
|
{ |
188 |
root |
1.29 |
#if EV_USE_REALTIME |
189 |
root |
1.1 |
struct timespec ts; |
190 |
|
|
clock_gettime (CLOCK_REALTIME, &ts); |
191 |
|
|
return ts.tv_sec + ts.tv_nsec * 1e-9; |
192 |
|
|
#else |
193 |
|
|
struct timeval tv; |
194 |
|
|
gettimeofday (&tv, 0); |
195 |
|
|
return tv.tv_sec + tv.tv_usec * 1e-6; |
196 |
|
|
#endif |
197 |
|
|
} |
198 |
|
|
|
199 |
root |
1.51 |
inline ev_tstamp |
200 |
root |
1.1 |
get_clock (void) |
201 |
|
|
{ |
202 |
root |
1.29 |
#if EV_USE_MONOTONIC |
203 |
root |
1.40 |
if (expect_true (have_monotonic)) |
204 |
root |
1.1 |
{ |
205 |
|
|
struct timespec ts; |
206 |
|
|
clock_gettime (CLOCK_MONOTONIC, &ts); |
207 |
|
|
return ts.tv_sec + ts.tv_nsec * 1e-9; |
208 |
|
|
} |
209 |
|
|
#endif |
210 |
|
|
|
211 |
|
|
return ev_time (); |
212 |
|
|
} |
213 |
|
|
|
214 |
root |
1.51 |
ev_tstamp |
215 |
|
|
ev_now (EV_P) |
216 |
|
|
{ |
217 |
|
|
return rt_now; |
218 |
|
|
} |
219 |
|
|
|
220 |
root |
1.30 |
#define array_roundsize(base,n) ((n) | 4 & ~3) |
221 |
root |
1.29 |
|
222 |
root |
1.1 |
#define array_needsize(base,cur,cnt,init) \ |
223 |
root |
1.40 |
if (expect_false ((cnt) > cur)) \ |
224 |
root |
1.1 |
{ \ |
225 |
root |
1.23 |
int newcnt = cur; \ |
226 |
|
|
do \ |
227 |
|
|
{ \ |
228 |
root |
1.30 |
newcnt = array_roundsize (base, newcnt << 1); \ |
229 |
root |
1.23 |
} \ |
230 |
|
|
while ((cnt) > newcnt); \ |
231 |
|
|
\ |
232 |
root |
1.1 |
base = realloc (base, sizeof (*base) * (newcnt)); \ |
233 |
|
|
init (base + cur, newcnt - cur); \ |
234 |
|
|
cur = newcnt; \ |
235 |
|
|
} |
236 |
|
|
|
237 |
root |
1.65 |
#define array_free(stem, idx) \ |
238 |
|
|
free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0; |
239 |
|
|
|
240 |
root |
1.8 |
/*****************************************************************************/ |
241 |
|
|
|
242 |
root |
1.1 |
static void |
243 |
|
|
anfds_init (ANFD *base, int count) |
244 |
|
|
{ |
245 |
|
|
while (count--) |
246 |
|
|
{ |
247 |
root |
1.27 |
base->head = 0; |
248 |
|
|
base->events = EV_NONE; |
249 |
root |
1.33 |
base->reify = 0; |
250 |
|
|
|
251 |
root |
1.1 |
++base; |
252 |
|
|
} |
253 |
|
|
} |
254 |
|
|
|
255 |
|
|
static void |
256 |
root |
1.51 |
event (EV_P_ W w, int events) |
257 |
root |
1.1 |
{ |
258 |
root |
1.32 |
if (w->pending) |
259 |
|
|
{ |
260 |
root |
1.42 |
pendings [ABSPRI (w)][w->pending - 1].events |= events; |
261 |
root |
1.32 |
return; |
262 |
|
|
} |
263 |
|
|
|
264 |
root |
1.42 |
w->pending = ++pendingcnt [ABSPRI (w)]; |
265 |
|
|
array_needsize (pendings [ABSPRI (w)], pendingmax [ABSPRI (w)], pendingcnt [ABSPRI (w)], ); |
266 |
|
|
pendings [ABSPRI (w)][w->pending - 1].w = w; |
267 |
|
|
pendings [ABSPRI (w)][w->pending - 1].events = events; |
268 |
root |
1.1 |
} |
269 |
|
|
|
270 |
|
|
static void |
271 |
root |
1.51 |
queue_events (EV_P_ W *events, int eventcnt, int type) |
272 |
root |
1.27 |
{ |
273 |
|
|
int i; |
274 |
|
|
|
275 |
|
|
for (i = 0; i < eventcnt; ++i) |
276 |
root |
1.51 |
event (EV_A_ events [i], type); |
277 |
root |
1.27 |
} |
278 |
|
|
|
279 |
|
|
static void |
280 |
root |
1.51 |
fd_event (EV_P_ int fd, int events) |
281 |
root |
1.1 |
{ |
282 |
|
|
ANFD *anfd = anfds + fd; |
283 |
|
|
struct ev_io *w; |
284 |
|
|
|
285 |
root |
1.50 |
for (w = (struct ev_io *)anfd->head; w; w = (struct ev_io *)((WL)w)->next) |
286 |
root |
1.1 |
{ |
287 |
|
|
int ev = w->events & events; |
288 |
|
|
|
289 |
|
|
if (ev) |
290 |
root |
1.51 |
event (EV_A_ (W)w, ev); |
291 |
root |
1.1 |
} |
292 |
|
|
} |
293 |
|
|
|
294 |
root |
1.27 |
/*****************************************************************************/ |
295 |
|
|
|
296 |
root |
1.9 |
static void |
297 |
root |
1.51 |
fd_reify (EV_P) |
298 |
root |
1.9 |
{ |
299 |
|
|
int i; |
300 |
|
|
|
301 |
root |
1.27 |
for (i = 0; i < fdchangecnt; ++i) |
302 |
|
|
{ |
303 |
|
|
int fd = fdchanges [i]; |
304 |
|
|
ANFD *anfd = anfds + fd; |
305 |
|
|
struct ev_io *w; |
306 |
|
|
|
307 |
|
|
int events = 0; |
308 |
|
|
|
309 |
root |
1.50 |
for (w = (struct ev_io *)anfd->head; w; w = (struct ev_io *)((WL)w)->next) |
310 |
root |
1.27 |
events |= w->events; |
311 |
|
|
|
312 |
root |
1.33 |
anfd->reify = 0; |
313 |
root |
1.27 |
|
314 |
root |
1.64 |
method_modify (EV_A_ fd, anfd->events, events); |
315 |
|
|
anfd->events = events; |
316 |
root |
1.27 |
} |
317 |
|
|
|
318 |
|
|
fdchangecnt = 0; |
319 |
|
|
} |
320 |
|
|
|
321 |
|
|
static void |
322 |
root |
1.51 |
fd_change (EV_P_ int fd) |
323 |
root |
1.27 |
{ |
324 |
root |
1.33 |
if (anfds [fd].reify || fdchangecnt < 0) |
325 |
root |
1.27 |
return; |
326 |
|
|
|
327 |
root |
1.33 |
anfds [fd].reify = 1; |
328 |
root |
1.27 |
|
329 |
|
|
++fdchangecnt; |
330 |
|
|
array_needsize (fdchanges, fdchangemax, fdchangecnt, ); |
331 |
|
|
fdchanges [fdchangecnt - 1] = fd; |
332 |
root |
1.9 |
} |
333 |
|
|
|
334 |
root |
1.41 |
static void |
335 |
root |
1.51 |
fd_kill (EV_P_ int fd) |
336 |
root |
1.41 |
{ |
337 |
|
|
struct ev_io *w; |
338 |
|
|
|
339 |
root |
1.50 |
while ((w = (struct ev_io *)anfds [fd].head)) |
340 |
root |
1.41 |
{ |
341 |
root |
1.51 |
ev_io_stop (EV_A_ w); |
342 |
|
|
event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE); |
343 |
root |
1.41 |
} |
344 |
|
|
} |
345 |
|
|
|
346 |
root |
1.19 |
/* called on EBADF to verify fds */ |
347 |
|
|
static void |
348 |
root |
1.51 |
fd_ebadf (EV_P) |
349 |
root |
1.19 |
{ |
350 |
|
|
int fd; |
351 |
|
|
|
352 |
|
|
for (fd = 0; fd < anfdmax; ++fd) |
353 |
root |
1.27 |
if (anfds [fd].events) |
354 |
root |
1.19 |
if (fcntl (fd, F_GETFD) == -1 && errno == EBADF) |
355 |
root |
1.51 |
fd_kill (EV_A_ fd); |
356 |
root |
1.41 |
} |
357 |
|
|
|
358 |
|
|
/* called on ENOMEM in select/poll to kill some fds and retry */ |
359 |
|
|
static void |
360 |
root |
1.51 |
fd_enomem (EV_P) |
361 |
root |
1.41 |
{ |
362 |
root |
1.62 |
int fd; |
363 |
root |
1.41 |
|
364 |
root |
1.62 |
for (fd = anfdmax; fd--; ) |
365 |
root |
1.41 |
if (anfds [fd].events) |
366 |
|
|
{ |
367 |
|
|
close (fd); |
368 |
root |
1.51 |
fd_kill (EV_A_ fd); |
369 |
root |
1.41 |
return; |
370 |
|
|
} |
371 |
root |
1.19 |
} |
372 |
|
|
|
373 |
root |
1.56 |
/* susually called after fork if method needs to re-arm all fds from scratch */ |
374 |
|
|
static void |
375 |
|
|
fd_rearm_all (EV_P) |
376 |
|
|
{ |
377 |
|
|
int fd; |
378 |
|
|
|
379 |
|
|
/* this should be highly optimised to not do anything but set a flag */ |
380 |
|
|
for (fd = 0; fd < anfdmax; ++fd) |
381 |
|
|
if (anfds [fd].events) |
382 |
|
|
{ |
383 |
|
|
anfds [fd].events = 0; |
384 |
root |
1.60 |
fd_change (EV_A_ fd); |
385 |
root |
1.56 |
} |
386 |
|
|
} |
387 |
|
|
|
388 |
root |
1.8 |
/*****************************************************************************/ |
389 |
|
|
|
390 |
root |
1.1 |
static void |
391 |
root |
1.54 |
upheap (WT *heap, int k) |
392 |
root |
1.1 |
{ |
393 |
root |
1.54 |
WT w = heap [k]; |
394 |
root |
1.1 |
|
395 |
root |
1.54 |
while (k && heap [k >> 1]->at > w->at) |
396 |
root |
1.1 |
{ |
397 |
root |
1.54 |
heap [k] = heap [k >> 1]; |
398 |
root |
1.62 |
((W)heap [k])->active = k + 1; |
399 |
root |
1.1 |
k >>= 1; |
400 |
|
|
} |
401 |
|
|
|
402 |
root |
1.54 |
heap [k] = w; |
403 |
root |
1.62 |
((W)heap [k])->active = k + 1; |
404 |
root |
1.1 |
|
405 |
|
|
} |
406 |
|
|
|
407 |
|
|
static void |
408 |
root |
1.54 |
downheap (WT *heap, int N, int k) |
409 |
root |
1.1 |
{ |
410 |
root |
1.54 |
WT w = heap [k]; |
411 |
root |
1.1 |
|
412 |
root |
1.4 |
while (k < (N >> 1)) |
413 |
root |
1.1 |
{ |
414 |
|
|
int j = k << 1; |
415 |
|
|
|
416 |
root |
1.54 |
if (j + 1 < N && heap [j]->at > heap [j + 1]->at) |
417 |
root |
1.1 |
++j; |
418 |
|
|
|
419 |
root |
1.54 |
if (w->at <= heap [j]->at) |
420 |
root |
1.1 |
break; |
421 |
|
|
|
422 |
root |
1.54 |
heap [k] = heap [j]; |
423 |
root |
1.62 |
((W)heap [k])->active = k + 1; |
424 |
root |
1.1 |
k = j; |
425 |
|
|
} |
426 |
|
|
|
427 |
root |
1.54 |
heap [k] = w; |
428 |
root |
1.62 |
((W)heap [k])->active = k + 1; |
429 |
root |
1.1 |
} |
430 |
|
|
|
431 |
root |
1.8 |
/*****************************************************************************/ |
432 |
|
|
|
433 |
root |
1.7 |
typedef struct |
434 |
|
|
{ |
435 |
root |
1.50 |
struct ev_watcher_list *head; |
436 |
root |
1.34 |
sig_atomic_t volatile gotsig; |
437 |
root |
1.7 |
} ANSIG; |
438 |
|
|
|
439 |
|
|
static ANSIG *signals; |
440 |
root |
1.4 |
static int signalmax; |
441 |
root |
1.1 |
|
442 |
root |
1.7 |
static int sigpipe [2]; |
443 |
root |
1.34 |
static sig_atomic_t volatile gotsig; |
444 |
root |
1.59 |
static struct ev_io sigev; |
445 |
root |
1.7 |
|
446 |
root |
1.1 |
static void |
447 |
root |
1.7 |
signals_init (ANSIG *base, int count) |
448 |
root |
1.1 |
{ |
449 |
|
|
while (count--) |
450 |
root |
1.7 |
{ |
451 |
|
|
base->head = 0; |
452 |
|
|
base->gotsig = 0; |
453 |
root |
1.33 |
|
454 |
root |
1.7 |
++base; |
455 |
|
|
} |
456 |
|
|
} |
457 |
|
|
|
458 |
|
|
static void |
459 |
|
|
sighandler (int signum) |
460 |
|
|
{ |
461 |
|
|
signals [signum - 1].gotsig = 1; |
462 |
|
|
|
463 |
|
|
if (!gotsig) |
464 |
|
|
{ |
465 |
root |
1.48 |
int old_errno = errno; |
466 |
root |
1.7 |
gotsig = 1; |
467 |
root |
1.34 |
write (sigpipe [1], &signum, 1); |
468 |
root |
1.48 |
errno = old_errno; |
469 |
root |
1.7 |
} |
470 |
|
|
} |
471 |
|
|
|
472 |
|
|
static void |
473 |
root |
1.51 |
sigcb (EV_P_ struct ev_io *iow, int revents) |
474 |
root |
1.7 |
{ |
475 |
root |
1.50 |
struct ev_watcher_list *w; |
476 |
root |
1.38 |
int signum; |
477 |
root |
1.7 |
|
478 |
root |
1.34 |
read (sigpipe [0], &revents, 1); |
479 |
root |
1.7 |
gotsig = 0; |
480 |
|
|
|
481 |
root |
1.38 |
for (signum = signalmax; signum--; ) |
482 |
|
|
if (signals [signum].gotsig) |
483 |
root |
1.7 |
{ |
484 |
root |
1.38 |
signals [signum].gotsig = 0; |
485 |
root |
1.7 |
|
486 |
root |
1.38 |
for (w = signals [signum].head; w; w = w->next) |
487 |
root |
1.51 |
event (EV_A_ (W)w, EV_SIGNAL); |
488 |
root |
1.7 |
} |
489 |
|
|
} |
490 |
|
|
|
491 |
|
|
static void |
492 |
root |
1.51 |
siginit (EV_P) |
493 |
root |
1.7 |
{ |
494 |
root |
1.45 |
#ifndef WIN32 |
495 |
root |
1.7 |
fcntl (sigpipe [0], F_SETFD, FD_CLOEXEC); |
496 |
|
|
fcntl (sigpipe [1], F_SETFD, FD_CLOEXEC); |
497 |
|
|
|
498 |
|
|
/* rather than sort out wether we really need nb, set it */ |
499 |
|
|
fcntl (sigpipe [0], F_SETFL, O_NONBLOCK); |
500 |
|
|
fcntl (sigpipe [1], F_SETFL, O_NONBLOCK); |
501 |
root |
1.45 |
#endif |
502 |
root |
1.7 |
|
503 |
root |
1.28 |
ev_io_set (&sigev, sigpipe [0], EV_READ); |
504 |
root |
1.54 |
ev_io_start (EV_A_ &sigev); |
505 |
root |
1.52 |
ev_unref (EV_A); /* child watcher should not keep loop alive */ |
506 |
root |
1.1 |
} |
507 |
|
|
|
508 |
root |
1.8 |
/*****************************************************************************/ |
509 |
|
|
|
510 |
root |
1.45 |
#ifndef WIN32 |
511 |
|
|
|
512 |
root |
1.59 |
static struct ev_child *childs [PID_HASHSIZE]; |
513 |
|
|
static struct ev_signal childev; |
514 |
|
|
|
515 |
root |
1.22 |
#ifndef WCONTINUED |
516 |
|
|
# define WCONTINUED 0 |
517 |
|
|
#endif |
518 |
|
|
|
519 |
|
|
static void |
520 |
root |
1.51 |
child_reap (EV_P_ struct ev_signal *sw, int chain, int pid, int status) |
521 |
root |
1.47 |
{ |
522 |
|
|
struct ev_child *w; |
523 |
|
|
|
524 |
root |
1.50 |
for (w = (struct ev_child *)childs [chain & (PID_HASHSIZE - 1)]; w; w = (struct ev_child *)((WL)w)->next) |
525 |
root |
1.47 |
if (w->pid == pid || !w->pid) |
526 |
|
|
{ |
527 |
root |
1.63 |
ev_priority (w) = ev_priority (sw); /* need to do it *now* */ |
528 |
|
|
w->rpid = pid; |
529 |
|
|
w->rstatus = status; |
530 |
root |
1.51 |
event (EV_A_ (W)w, EV_CHILD); |
531 |
root |
1.47 |
} |
532 |
|
|
} |
533 |
|
|
|
534 |
|
|
static void |
535 |
root |
1.51 |
childcb (EV_P_ struct ev_signal *sw, int revents) |
536 |
root |
1.22 |
{ |
537 |
|
|
int pid, status; |
538 |
|
|
|
539 |
root |
1.47 |
if (0 < (pid = waitpid (-1, &status, WNOHANG | WUNTRACED | WCONTINUED))) |
540 |
|
|
{ |
541 |
|
|
/* make sure we are called again until all childs have been reaped */ |
542 |
root |
1.51 |
event (EV_A_ (W)sw, EV_SIGNAL); |
543 |
root |
1.47 |
|
544 |
root |
1.51 |
child_reap (EV_A_ sw, pid, pid, status); |
545 |
|
|
child_reap (EV_A_ sw, 0, pid, status); /* this might trigger a watcher twice, but event catches that */ |
546 |
root |
1.47 |
} |
547 |
root |
1.22 |
} |
548 |
|
|
|
549 |
root |
1.45 |
#endif |
550 |
|
|
|
551 |
root |
1.22 |
/*****************************************************************************/ |
552 |
|
|
|
553 |
root |
1.44 |
#if EV_USE_KQUEUE |
554 |
|
|
# include "ev_kqueue.c" |
555 |
|
|
#endif |
556 |
root |
1.29 |
#if EV_USE_EPOLL |
557 |
root |
1.1 |
# include "ev_epoll.c" |
558 |
|
|
#endif |
559 |
root |
1.59 |
#if EV_USE_POLL |
560 |
root |
1.41 |
# include "ev_poll.c" |
561 |
|
|
#endif |
562 |
root |
1.29 |
#if EV_USE_SELECT |
563 |
root |
1.1 |
# include "ev_select.c" |
564 |
|
|
#endif |
565 |
|
|
|
566 |
root |
1.24 |
int |
567 |
|
|
ev_version_major (void) |
568 |
|
|
{ |
569 |
|
|
return EV_VERSION_MAJOR; |
570 |
|
|
} |
571 |
|
|
|
572 |
|
|
int |
573 |
|
|
ev_version_minor (void) |
574 |
|
|
{ |
575 |
|
|
return EV_VERSION_MINOR; |
576 |
|
|
} |
577 |
|
|
|
578 |
root |
1.49 |
/* return true if we are running with elevated privileges and should ignore env variables */ |
579 |
root |
1.41 |
static int |
580 |
root |
1.51 |
enable_secure (void) |
581 |
root |
1.41 |
{ |
582 |
root |
1.49 |
#ifdef WIN32 |
583 |
|
|
return 0; |
584 |
|
|
#else |
585 |
root |
1.41 |
return getuid () != geteuid () |
586 |
|
|
|| getgid () != getegid (); |
587 |
root |
1.49 |
#endif |
588 |
root |
1.41 |
} |
589 |
|
|
|
590 |
root |
1.51 |
int |
591 |
|
|
ev_method (EV_P) |
592 |
root |
1.1 |
{ |
593 |
root |
1.51 |
return method; |
594 |
|
|
} |
595 |
|
|
|
596 |
root |
1.56 |
static void |
597 |
root |
1.54 |
loop_init (EV_P_ int methods) |
598 |
root |
1.51 |
{ |
599 |
|
|
if (!method) |
600 |
root |
1.23 |
{ |
601 |
root |
1.29 |
#if EV_USE_MONOTONIC |
602 |
root |
1.23 |
{ |
603 |
|
|
struct timespec ts; |
604 |
|
|
if (!clock_gettime (CLOCK_MONOTONIC, &ts)) |
605 |
|
|
have_monotonic = 1; |
606 |
|
|
} |
607 |
root |
1.1 |
#endif |
608 |
|
|
|
609 |
root |
1.51 |
rt_now = ev_time (); |
610 |
|
|
mn_now = get_clock (); |
611 |
|
|
now_floor = mn_now; |
612 |
root |
1.54 |
rtmn_diff = rt_now - mn_now; |
613 |
root |
1.1 |
|
614 |
root |
1.41 |
if (methods == EVMETHOD_AUTO) |
615 |
root |
1.56 |
if (!enable_secure () && getenv ("LIBEV_METHODS")) |
616 |
|
|
methods = atoi (getenv ("LIBEV_METHODS")); |
617 |
root |
1.50 |
else |
618 |
|
|
methods = EVMETHOD_ANY; |
619 |
root |
1.41 |
|
620 |
root |
1.51 |
method = 0; |
621 |
root |
1.62 |
#if EV_USE_WIN32 |
622 |
|
|
if (!method && (methods & EVMETHOD_WIN32 )) method = win32_init (EV_A_ methods); |
623 |
|
|
#endif |
624 |
root |
1.44 |
#if EV_USE_KQUEUE |
625 |
root |
1.51 |
if (!method && (methods & EVMETHOD_KQUEUE)) method = kqueue_init (EV_A_ methods); |
626 |
root |
1.44 |
#endif |
627 |
root |
1.29 |
#if EV_USE_EPOLL |
628 |
root |
1.51 |
if (!method && (methods & EVMETHOD_EPOLL )) method = epoll_init (EV_A_ methods); |
629 |
root |
1.41 |
#endif |
630 |
root |
1.59 |
#if EV_USE_POLL |
631 |
root |
1.51 |
if (!method && (methods & EVMETHOD_POLL )) method = poll_init (EV_A_ methods); |
632 |
root |
1.1 |
#endif |
633 |
root |
1.29 |
#if EV_USE_SELECT |
634 |
root |
1.51 |
if (!method && (methods & EVMETHOD_SELECT)) method = select_init (EV_A_ methods); |
635 |
root |
1.1 |
#endif |
636 |
root |
1.56 |
} |
637 |
|
|
} |
638 |
|
|
|
639 |
|
|
void |
640 |
|
|
loop_destroy (EV_P) |
641 |
|
|
{ |
642 |
root |
1.65 |
int i; |
643 |
|
|
|
644 |
root |
1.62 |
#if EV_USE_WIN32 |
645 |
|
|
if (method == EVMETHOD_WIN32 ) win32_destroy (EV_A); |
646 |
|
|
#endif |
647 |
root |
1.56 |
#if EV_USE_KQUEUE |
648 |
|
|
if (method == EVMETHOD_KQUEUE) kqueue_destroy (EV_A); |
649 |
|
|
#endif |
650 |
|
|
#if EV_USE_EPOLL |
651 |
|
|
if (method == EVMETHOD_EPOLL ) epoll_destroy (EV_A); |
652 |
|
|
#endif |
653 |
root |
1.59 |
#if EV_USE_POLL |
654 |
root |
1.56 |
if (method == EVMETHOD_POLL ) poll_destroy (EV_A); |
655 |
|
|
#endif |
656 |
|
|
#if EV_USE_SELECT |
657 |
|
|
if (method == EVMETHOD_SELECT) select_destroy (EV_A); |
658 |
|
|
#endif |
659 |
root |
1.1 |
|
660 |
root |
1.65 |
for (i = NUMPRI; i--; ) |
661 |
|
|
array_free (pending, [i]); |
662 |
|
|
|
663 |
|
|
array_free (fdchange, ); |
664 |
|
|
array_free (timer, ); |
665 |
|
|
array_free (periodic, ); |
666 |
|
|
array_free (idle, ); |
667 |
|
|
array_free (prepare, ); |
668 |
|
|
array_free (check, ); |
669 |
|
|
|
670 |
root |
1.56 |
method = 0; |
671 |
|
|
/*TODO*/ |
672 |
|
|
} |
673 |
root |
1.22 |
|
674 |
root |
1.56 |
void |
675 |
|
|
loop_fork (EV_P) |
676 |
|
|
{ |
677 |
|
|
/*TODO*/ |
678 |
|
|
#if EV_USE_EPOLL |
679 |
|
|
if (method == EVMETHOD_EPOLL ) epoll_fork (EV_A); |
680 |
|
|
#endif |
681 |
|
|
#if EV_USE_KQUEUE |
682 |
|
|
if (method == EVMETHOD_KQUEUE) kqueue_fork (EV_A); |
683 |
root |
1.45 |
#endif |
684 |
root |
1.1 |
} |
685 |
|
|
|
686 |
root |
1.55 |
#if EV_MULTIPLICITY |
687 |
root |
1.54 |
struct ev_loop * |
688 |
|
|
ev_loop_new (int methods) |
689 |
|
|
{ |
690 |
|
|
struct ev_loop *loop = (struct ev_loop *)calloc (1, sizeof (struct ev_loop)); |
691 |
|
|
|
692 |
root |
1.56 |
loop_init (EV_A_ methods); |
693 |
|
|
|
694 |
root |
1.60 |
if (ev_method (EV_A)) |
695 |
root |
1.55 |
return loop; |
696 |
root |
1.54 |
|
697 |
root |
1.55 |
return 0; |
698 |
root |
1.54 |
} |
699 |
|
|
|
700 |
|
|
void |
701 |
root |
1.56 |
ev_loop_destroy (EV_P) |
702 |
root |
1.54 |
{ |
703 |
root |
1.56 |
loop_destroy (EV_A); |
704 |
root |
1.54 |
free (loop); |
705 |
|
|
} |
706 |
|
|
|
707 |
root |
1.56 |
void |
708 |
|
|
ev_loop_fork (EV_P) |
709 |
|
|
{ |
710 |
|
|
loop_fork (EV_A); |
711 |
|
|
} |
712 |
|
|
|
713 |
|
|
#endif |
714 |
|
|
|
715 |
|
|
#if EV_MULTIPLICITY |
716 |
|
|
struct ev_loop default_loop_struct; |
717 |
|
|
static struct ev_loop *default_loop; |
718 |
|
|
|
719 |
|
|
struct ev_loop * |
720 |
root |
1.54 |
#else |
721 |
root |
1.56 |
static int default_loop; |
722 |
root |
1.54 |
|
723 |
|
|
int |
724 |
root |
1.56 |
#endif |
725 |
|
|
ev_default_loop (int methods) |
726 |
root |
1.54 |
{ |
727 |
root |
1.56 |
if (sigpipe [0] == sigpipe [1]) |
728 |
|
|
if (pipe (sigpipe)) |
729 |
|
|
return 0; |
730 |
root |
1.54 |
|
731 |
root |
1.56 |
if (!default_loop) |
732 |
|
|
{ |
733 |
|
|
#if EV_MULTIPLICITY |
734 |
|
|
struct ev_loop *loop = default_loop = &default_loop_struct; |
735 |
|
|
#else |
736 |
|
|
default_loop = 1; |
737 |
root |
1.54 |
#endif |
738 |
|
|
|
739 |
root |
1.56 |
loop_init (EV_A_ methods); |
740 |
|
|
|
741 |
|
|
if (ev_method (EV_A)) |
742 |
|
|
{ |
743 |
|
|
ev_watcher_init (&sigev, sigcb); |
744 |
|
|
ev_set_priority (&sigev, EV_MAXPRI); |
745 |
|
|
siginit (EV_A); |
746 |
|
|
|
747 |
|
|
#ifndef WIN32 |
748 |
|
|
ev_signal_init (&childev, childcb, SIGCHLD); |
749 |
|
|
ev_set_priority (&childev, EV_MAXPRI); |
750 |
|
|
ev_signal_start (EV_A_ &childev); |
751 |
|
|
ev_unref (EV_A); /* child watcher should not keep loop alive */ |
752 |
|
|
#endif |
753 |
|
|
} |
754 |
|
|
else |
755 |
|
|
default_loop = 0; |
756 |
|
|
} |
757 |
root |
1.8 |
|
758 |
root |
1.56 |
return default_loop; |
759 |
root |
1.1 |
} |
760 |
|
|
|
761 |
root |
1.24 |
void |
762 |
root |
1.56 |
ev_default_destroy (void) |
763 |
root |
1.1 |
{ |
764 |
root |
1.57 |
#if EV_MULTIPLICITY |
765 |
root |
1.56 |
struct ev_loop *loop = default_loop; |
766 |
root |
1.57 |
#endif |
767 |
root |
1.56 |
|
768 |
|
|
ev_ref (EV_A); /* child watcher */ |
769 |
|
|
ev_signal_stop (EV_A_ &childev); |
770 |
|
|
|
771 |
|
|
ev_ref (EV_A); /* signal watcher */ |
772 |
|
|
ev_io_stop (EV_A_ &sigev); |
773 |
|
|
|
774 |
|
|
close (sigpipe [0]); sigpipe [0] = 0; |
775 |
|
|
close (sigpipe [1]); sigpipe [1] = 0; |
776 |
|
|
|
777 |
|
|
loop_destroy (EV_A); |
778 |
root |
1.1 |
} |
779 |
|
|
|
780 |
root |
1.24 |
void |
781 |
root |
1.60 |
ev_default_fork (void) |
782 |
root |
1.1 |
{ |
783 |
root |
1.60 |
#if EV_MULTIPLICITY |
784 |
|
|
struct ev_loop *loop = default_loop; |
785 |
|
|
#endif |
786 |
|
|
|
787 |
root |
1.56 |
loop_fork (EV_A); |
788 |
root |
1.7 |
|
789 |
root |
1.54 |
ev_io_stop (EV_A_ &sigev); |
790 |
root |
1.7 |
close (sigpipe [0]); |
791 |
|
|
close (sigpipe [1]); |
792 |
|
|
pipe (sigpipe); |
793 |
root |
1.56 |
|
794 |
|
|
ev_ref (EV_A); /* signal watcher */ |
795 |
root |
1.54 |
siginit (EV_A); |
796 |
root |
1.1 |
} |
797 |
|
|
|
798 |
root |
1.8 |
/*****************************************************************************/ |
799 |
|
|
|
800 |
root |
1.1 |
static void |
801 |
root |
1.51 |
call_pending (EV_P) |
802 |
root |
1.1 |
{ |
803 |
root |
1.42 |
int pri; |
804 |
|
|
|
805 |
|
|
for (pri = NUMPRI; pri--; ) |
806 |
|
|
while (pendingcnt [pri]) |
807 |
|
|
{ |
808 |
|
|
ANPENDING *p = pendings [pri] + --pendingcnt [pri]; |
809 |
root |
1.1 |
|
810 |
root |
1.42 |
if (p->w) |
811 |
|
|
{ |
812 |
|
|
p->w->pending = 0; |
813 |
root |
1.63 |
|
814 |
root |
1.65 |
((void (*)(EV_P_ W, int))p->w->cb) (EV_A_ p->w, p->events); |
815 |
root |
1.42 |
} |
816 |
|
|
} |
817 |
root |
1.1 |
} |
818 |
|
|
|
819 |
|
|
static void |
820 |
root |
1.51 |
timers_reify (EV_P) |
821 |
root |
1.1 |
{ |
822 |
root |
1.63 |
while (timercnt && ((WT)timers [0])->at <= mn_now) |
823 |
root |
1.1 |
{ |
824 |
|
|
struct ev_timer *w = timers [0]; |
825 |
|
|
|
826 |
root |
1.61 |
assert (("inactive timer on timer heap detected", ev_is_active (w))); |
827 |
|
|
|
828 |
root |
1.4 |
/* first reschedule or stop timer */ |
829 |
root |
1.1 |
if (w->repeat) |
830 |
|
|
{ |
831 |
root |
1.33 |
assert (("negative ev_timer repeat value found while processing timers", w->repeat > 0.)); |
832 |
root |
1.63 |
((WT)w)->at = mn_now + w->repeat; |
833 |
root |
1.12 |
downheap ((WT *)timers, timercnt, 0); |
834 |
|
|
} |
835 |
|
|
else |
836 |
root |
1.51 |
ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */ |
837 |
root |
1.30 |
|
838 |
root |
1.54 |
event (EV_A_ (W)w, EV_TIMEOUT); |
839 |
root |
1.12 |
} |
840 |
|
|
} |
841 |
root |
1.4 |
|
842 |
root |
1.12 |
static void |
843 |
root |
1.51 |
periodics_reify (EV_P) |
844 |
root |
1.12 |
{ |
845 |
root |
1.63 |
while (periodiccnt && ((WT)periodics [0])->at <= rt_now) |
846 |
root |
1.12 |
{ |
847 |
|
|
struct ev_periodic *w = periodics [0]; |
848 |
root |
1.1 |
|
849 |
root |
1.61 |
assert (("inactive timer on periodic heap detected", ev_is_active (w))); |
850 |
|
|
|
851 |
root |
1.12 |
/* first reschedule or stop timer */ |
852 |
|
|
if (w->interval) |
853 |
|
|
{ |
854 |
root |
1.63 |
((WT)w)->at += floor ((rt_now - ((WT)w)->at) / w->interval + 1.) * w->interval; |
855 |
|
|
assert (("ev_periodic timeout in the past detected while processing timers, negative interval?", ((WT)w)->at > rt_now)); |
856 |
root |
1.12 |
downheap ((WT *)periodics, periodiccnt, 0); |
857 |
root |
1.1 |
} |
858 |
|
|
else |
859 |
root |
1.51 |
ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */ |
860 |
root |
1.12 |
|
861 |
root |
1.51 |
event (EV_A_ (W)w, EV_PERIODIC); |
862 |
root |
1.12 |
} |
863 |
|
|
} |
864 |
|
|
|
865 |
|
|
static void |
866 |
root |
1.54 |
periodics_reschedule (EV_P) |
867 |
root |
1.12 |
{ |
868 |
|
|
int i; |
869 |
|
|
|
870 |
root |
1.13 |
/* adjust periodics after time jump */ |
871 |
root |
1.12 |
for (i = 0; i < periodiccnt; ++i) |
872 |
|
|
{ |
873 |
|
|
struct ev_periodic *w = periodics [i]; |
874 |
|
|
|
875 |
|
|
if (w->interval) |
876 |
root |
1.4 |
{ |
877 |
root |
1.63 |
ev_tstamp diff = ceil ((rt_now - ((WT)w)->at) / w->interval) * w->interval; |
878 |
root |
1.12 |
|
879 |
|
|
if (fabs (diff) >= 1e-4) |
880 |
|
|
{ |
881 |
root |
1.51 |
ev_periodic_stop (EV_A_ w); |
882 |
|
|
ev_periodic_start (EV_A_ w); |
883 |
root |
1.12 |
|
884 |
|
|
i = 0; /* restart loop, inefficient, but time jumps should be rare */ |
885 |
|
|
} |
886 |
root |
1.4 |
} |
887 |
root |
1.12 |
} |
888 |
root |
1.1 |
} |
889 |
|
|
|
890 |
root |
1.51 |
inline int |
891 |
|
|
time_update_monotonic (EV_P) |
892 |
root |
1.40 |
{ |
893 |
root |
1.51 |
mn_now = get_clock (); |
894 |
root |
1.40 |
|
895 |
root |
1.51 |
if (expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5)) |
896 |
root |
1.40 |
{ |
897 |
root |
1.54 |
rt_now = rtmn_diff + mn_now; |
898 |
root |
1.40 |
return 0; |
899 |
|
|
} |
900 |
|
|
else |
901 |
|
|
{ |
902 |
root |
1.51 |
now_floor = mn_now; |
903 |
|
|
rt_now = ev_time (); |
904 |
root |
1.40 |
return 1; |
905 |
|
|
} |
906 |
|
|
} |
907 |
|
|
|
908 |
root |
1.4 |
static void |
909 |
root |
1.51 |
time_update (EV_P) |
910 |
root |
1.4 |
{ |
911 |
|
|
int i; |
912 |
root |
1.12 |
|
913 |
root |
1.40 |
#if EV_USE_MONOTONIC |
914 |
|
|
if (expect_true (have_monotonic)) |
915 |
|
|
{ |
916 |
root |
1.51 |
if (time_update_monotonic (EV_A)) |
917 |
root |
1.40 |
{ |
918 |
root |
1.54 |
ev_tstamp odiff = rtmn_diff; |
919 |
root |
1.4 |
|
920 |
root |
1.40 |
for (i = 4; --i; ) /* loop a few times, before making important decisions */ |
921 |
|
|
{ |
922 |
root |
1.54 |
rtmn_diff = rt_now - mn_now; |
923 |
root |
1.4 |
|
924 |
root |
1.54 |
if (fabs (odiff - rtmn_diff) < MIN_TIMEJUMP) |
925 |
root |
1.40 |
return; /* all is well */ |
926 |
root |
1.4 |
|
927 |
root |
1.51 |
rt_now = ev_time (); |
928 |
|
|
mn_now = get_clock (); |
929 |
|
|
now_floor = mn_now; |
930 |
root |
1.40 |
} |
931 |
root |
1.4 |
|
932 |
root |
1.54 |
periodics_reschedule (EV_A); |
933 |
root |
1.40 |
/* no timer adjustment, as the monotonic clock doesn't jump */ |
934 |
root |
1.54 |
/* timers_reschedule (EV_A_ rtmn_diff - odiff) */ |
935 |
root |
1.4 |
} |
936 |
|
|
} |
937 |
|
|
else |
938 |
root |
1.40 |
#endif |
939 |
root |
1.4 |
{ |
940 |
root |
1.51 |
rt_now = ev_time (); |
941 |
root |
1.40 |
|
942 |
root |
1.51 |
if (expect_false (mn_now > rt_now || mn_now < rt_now - MAX_BLOCKTIME - MIN_TIMEJUMP)) |
943 |
root |
1.13 |
{ |
944 |
root |
1.54 |
periodics_reschedule (EV_A); |
945 |
root |
1.13 |
|
946 |
|
|
/* adjust timers. this is easy, as the offset is the same for all */ |
947 |
|
|
for (i = 0; i < timercnt; ++i) |
948 |
root |
1.63 |
((WT)timers [i])->at += rt_now - mn_now; |
949 |
root |
1.13 |
} |
950 |
root |
1.4 |
|
951 |
root |
1.51 |
mn_now = rt_now; |
952 |
root |
1.4 |
} |
953 |
|
|
} |
954 |
|
|
|
955 |
root |
1.51 |
void |
956 |
|
|
ev_ref (EV_P) |
957 |
|
|
{ |
958 |
|
|
++activecnt; |
959 |
|
|
} |
960 |
root |
1.1 |
|
961 |
root |
1.51 |
void |
962 |
|
|
ev_unref (EV_P) |
963 |
|
|
{ |
964 |
|
|
--activecnt; |
965 |
|
|
} |
966 |
|
|
|
967 |
|
|
static int loop_done; |
968 |
|
|
|
969 |
|
|
void |
970 |
|
|
ev_loop (EV_P_ int flags) |
971 |
root |
1.1 |
{ |
972 |
|
|
double block; |
973 |
root |
1.51 |
loop_done = flags & (EVLOOP_ONESHOT | EVLOOP_NONBLOCK) ? 1 : 0; |
974 |
root |
1.1 |
|
975 |
root |
1.20 |
do |
976 |
root |
1.9 |
{ |
977 |
root |
1.20 |
/* queue check watchers (and execute them) */ |
978 |
root |
1.40 |
if (expect_false (preparecnt)) |
979 |
root |
1.20 |
{ |
980 |
root |
1.51 |
queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE); |
981 |
|
|
call_pending (EV_A); |
982 |
root |
1.20 |
} |
983 |
root |
1.9 |
|
984 |
root |
1.1 |
/* update fd-related kernel structures */ |
985 |
root |
1.51 |
fd_reify (EV_A); |
986 |
root |
1.1 |
|
987 |
|
|
/* calculate blocking time */ |
988 |
root |
1.12 |
|
989 |
root |
1.21 |
/* we only need this for !monotonic clockor timers, but as we basically |
990 |
|
|
always have timers, we just calculate it always */ |
991 |
root |
1.40 |
#if EV_USE_MONOTONIC |
992 |
|
|
if (expect_true (have_monotonic)) |
993 |
root |
1.51 |
time_update_monotonic (EV_A); |
994 |
root |
1.40 |
else |
995 |
|
|
#endif |
996 |
|
|
{ |
997 |
root |
1.51 |
rt_now = ev_time (); |
998 |
|
|
mn_now = rt_now; |
999 |
root |
1.40 |
} |
1000 |
root |
1.12 |
|
1001 |
root |
1.9 |
if (flags & EVLOOP_NONBLOCK || idlecnt) |
1002 |
root |
1.1 |
block = 0.; |
1003 |
|
|
else |
1004 |
|
|
{ |
1005 |
root |
1.4 |
block = MAX_BLOCKTIME; |
1006 |
|
|
|
1007 |
root |
1.12 |
if (timercnt) |
1008 |
root |
1.4 |
{ |
1009 |
root |
1.63 |
ev_tstamp to = ((WT)timers [0])->at - mn_now + method_fudge; |
1010 |
root |
1.4 |
if (block > to) block = to; |
1011 |
|
|
} |
1012 |
|
|
|
1013 |
root |
1.12 |
if (periodiccnt) |
1014 |
root |
1.4 |
{ |
1015 |
root |
1.63 |
ev_tstamp to = ((WT)periodics [0])->at - rt_now + method_fudge; |
1016 |
root |
1.4 |
if (block > to) block = to; |
1017 |
|
|
} |
1018 |
|
|
|
1019 |
root |
1.1 |
if (block < 0.) block = 0.; |
1020 |
|
|
} |
1021 |
|
|
|
1022 |
root |
1.51 |
method_poll (EV_A_ block); |
1023 |
root |
1.1 |
|
1024 |
root |
1.51 |
/* update rt_now, do magic */ |
1025 |
|
|
time_update (EV_A); |
1026 |
root |
1.4 |
|
1027 |
root |
1.9 |
/* queue pending timers and reschedule them */ |
1028 |
root |
1.51 |
timers_reify (EV_A); /* relative timers called last */ |
1029 |
|
|
periodics_reify (EV_A); /* absolute timers called first */ |
1030 |
root |
1.1 |
|
1031 |
root |
1.9 |
/* queue idle watchers unless io or timers are pending */ |
1032 |
|
|
if (!pendingcnt) |
1033 |
root |
1.51 |
queue_events (EV_A_ (W *)idles, idlecnt, EV_IDLE); |
1034 |
root |
1.9 |
|
1035 |
root |
1.20 |
/* queue check watchers, to be executed first */ |
1036 |
|
|
if (checkcnt) |
1037 |
root |
1.51 |
queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK); |
1038 |
root |
1.9 |
|
1039 |
root |
1.51 |
call_pending (EV_A); |
1040 |
root |
1.1 |
} |
1041 |
root |
1.51 |
while (activecnt && !loop_done); |
1042 |
root |
1.13 |
|
1043 |
root |
1.51 |
if (loop_done != 2) |
1044 |
|
|
loop_done = 0; |
1045 |
|
|
} |
1046 |
|
|
|
1047 |
|
|
void |
1048 |
|
|
ev_unloop (EV_P_ int how) |
1049 |
|
|
{ |
1050 |
|
|
loop_done = how; |
1051 |
root |
1.1 |
} |
1052 |
|
|
|
1053 |
root |
1.8 |
/*****************************************************************************/ |
1054 |
|
|
|
1055 |
root |
1.51 |
inline void |
1056 |
root |
1.10 |
wlist_add (WL *head, WL elem) |
1057 |
root |
1.1 |
{ |
1058 |
|
|
elem->next = *head; |
1059 |
|
|
*head = elem; |
1060 |
|
|
} |
1061 |
|
|
|
1062 |
root |
1.51 |
inline void |
1063 |
root |
1.10 |
wlist_del (WL *head, WL elem) |
1064 |
root |
1.1 |
{ |
1065 |
|
|
while (*head) |
1066 |
|
|
{ |
1067 |
|
|
if (*head == elem) |
1068 |
|
|
{ |
1069 |
|
|
*head = elem->next; |
1070 |
|
|
return; |
1071 |
|
|
} |
1072 |
|
|
|
1073 |
|
|
head = &(*head)->next; |
1074 |
|
|
} |
1075 |
|
|
} |
1076 |
|
|
|
1077 |
root |
1.51 |
inline void |
1078 |
|
|
ev_clear_pending (EV_P_ W w) |
1079 |
root |
1.16 |
{ |
1080 |
|
|
if (w->pending) |
1081 |
|
|
{ |
1082 |
root |
1.42 |
pendings [ABSPRI (w)][w->pending - 1].w = 0; |
1083 |
root |
1.16 |
w->pending = 0; |
1084 |
|
|
} |
1085 |
|
|
} |
1086 |
|
|
|
1087 |
root |
1.51 |
inline void |
1088 |
|
|
ev_start (EV_P_ W w, int active) |
1089 |
root |
1.1 |
{ |
1090 |
root |
1.43 |
if (w->priority < EV_MINPRI) w->priority = EV_MINPRI; |
1091 |
|
|
if (w->priority > EV_MAXPRI) w->priority = EV_MAXPRI; |
1092 |
|
|
|
1093 |
root |
1.1 |
w->active = active; |
1094 |
root |
1.51 |
ev_ref (EV_A); |
1095 |
root |
1.1 |
} |
1096 |
|
|
|
1097 |
root |
1.51 |
inline void |
1098 |
|
|
ev_stop (EV_P_ W w) |
1099 |
root |
1.1 |
{ |
1100 |
root |
1.51 |
ev_unref (EV_A); |
1101 |
root |
1.1 |
w->active = 0; |
1102 |
|
|
} |
1103 |
|
|
|
1104 |
root |
1.8 |
/*****************************************************************************/ |
1105 |
|
|
|
1106 |
root |
1.1 |
void |
1107 |
root |
1.51 |
ev_io_start (EV_P_ struct ev_io *w) |
1108 |
root |
1.1 |
{ |
1109 |
root |
1.37 |
int fd = w->fd; |
1110 |
|
|
|
1111 |
root |
1.1 |
if (ev_is_active (w)) |
1112 |
|
|
return; |
1113 |
|
|
|
1114 |
root |
1.33 |
assert (("ev_io_start called with negative fd", fd >= 0)); |
1115 |
|
|
|
1116 |
root |
1.51 |
ev_start (EV_A_ (W)w, 1); |
1117 |
root |
1.1 |
array_needsize (anfds, anfdmax, fd + 1, anfds_init); |
1118 |
root |
1.10 |
wlist_add ((WL *)&anfds[fd].head, (WL)w); |
1119 |
root |
1.1 |
|
1120 |
root |
1.51 |
fd_change (EV_A_ fd); |
1121 |
root |
1.1 |
} |
1122 |
|
|
|
1123 |
|
|
void |
1124 |
root |
1.51 |
ev_io_stop (EV_P_ struct ev_io *w) |
1125 |
root |
1.1 |
{ |
1126 |
root |
1.51 |
ev_clear_pending (EV_A_ (W)w); |
1127 |
root |
1.1 |
if (!ev_is_active (w)) |
1128 |
|
|
return; |
1129 |
|
|
|
1130 |
root |
1.10 |
wlist_del ((WL *)&anfds[w->fd].head, (WL)w); |
1131 |
root |
1.51 |
ev_stop (EV_A_ (W)w); |
1132 |
root |
1.1 |
|
1133 |
root |
1.51 |
fd_change (EV_A_ w->fd); |
1134 |
root |
1.1 |
} |
1135 |
|
|
|
1136 |
|
|
void |
1137 |
root |
1.51 |
ev_timer_start (EV_P_ struct ev_timer *w) |
1138 |
root |
1.1 |
{ |
1139 |
|
|
if (ev_is_active (w)) |
1140 |
|
|
return; |
1141 |
|
|
|
1142 |
root |
1.63 |
((WT)w)->at += mn_now; |
1143 |
root |
1.12 |
|
1144 |
root |
1.33 |
assert (("ev_timer_start called with negative timer repeat value", w->repeat >= 0.)); |
1145 |
root |
1.13 |
|
1146 |
root |
1.51 |
ev_start (EV_A_ (W)w, ++timercnt); |
1147 |
root |
1.12 |
array_needsize (timers, timermax, timercnt, ); |
1148 |
|
|
timers [timercnt - 1] = w; |
1149 |
|
|
upheap ((WT *)timers, timercnt - 1); |
1150 |
root |
1.62 |
|
1151 |
|
|
assert (("internal timer heap corruption", timers [((W)w)->active - 1] == w)); |
1152 |
root |
1.12 |
} |
1153 |
|
|
|
1154 |
|
|
void |
1155 |
root |
1.51 |
ev_timer_stop (EV_P_ struct ev_timer *w) |
1156 |
root |
1.12 |
{ |
1157 |
root |
1.51 |
ev_clear_pending (EV_A_ (W)w); |
1158 |
root |
1.12 |
if (!ev_is_active (w)) |
1159 |
|
|
return; |
1160 |
|
|
|
1161 |
root |
1.62 |
assert (("internal timer heap corruption", timers [((W)w)->active - 1] == w)); |
1162 |
|
|
|
1163 |
|
|
if (((W)w)->active < timercnt--) |
1164 |
root |
1.1 |
{ |
1165 |
root |
1.62 |
timers [((W)w)->active - 1] = timers [timercnt]; |
1166 |
|
|
downheap ((WT *)timers, timercnt, ((W)w)->active - 1); |
1167 |
root |
1.12 |
} |
1168 |
root |
1.4 |
|
1169 |
root |
1.63 |
((WT)w)->at = w->repeat; |
1170 |
root |
1.14 |
|
1171 |
root |
1.51 |
ev_stop (EV_A_ (W)w); |
1172 |
root |
1.12 |
} |
1173 |
root |
1.4 |
|
1174 |
root |
1.12 |
void |
1175 |
root |
1.51 |
ev_timer_again (EV_P_ struct ev_timer *w) |
1176 |
root |
1.14 |
{ |
1177 |
|
|
if (ev_is_active (w)) |
1178 |
|
|
{ |
1179 |
|
|
if (w->repeat) |
1180 |
|
|
{ |
1181 |
root |
1.63 |
((WT)w)->at = mn_now + w->repeat; |
1182 |
root |
1.62 |
downheap ((WT *)timers, timercnt, ((W)w)->active - 1); |
1183 |
root |
1.14 |
} |
1184 |
|
|
else |
1185 |
root |
1.51 |
ev_timer_stop (EV_A_ w); |
1186 |
root |
1.14 |
} |
1187 |
|
|
else if (w->repeat) |
1188 |
root |
1.51 |
ev_timer_start (EV_A_ w); |
1189 |
root |
1.14 |
} |
1190 |
|
|
|
1191 |
|
|
void |
1192 |
root |
1.51 |
ev_periodic_start (EV_P_ struct ev_periodic *w) |
1193 |
root |
1.12 |
{ |
1194 |
|
|
if (ev_is_active (w)) |
1195 |
|
|
return; |
1196 |
root |
1.1 |
|
1197 |
root |
1.33 |
assert (("ev_periodic_start called with negative interval value", w->interval >= 0.)); |
1198 |
root |
1.13 |
|
1199 |
root |
1.12 |
/* this formula differs from the one in periodic_reify because we do not always round up */ |
1200 |
|
|
if (w->interval) |
1201 |
root |
1.63 |
((WT)w)->at += ceil ((rt_now - ((WT)w)->at) / w->interval) * w->interval; |
1202 |
root |
1.12 |
|
1203 |
root |
1.51 |
ev_start (EV_A_ (W)w, ++periodiccnt); |
1204 |
root |
1.12 |
array_needsize (periodics, periodicmax, periodiccnt, ); |
1205 |
|
|
periodics [periodiccnt - 1] = w; |
1206 |
|
|
upheap ((WT *)periodics, periodiccnt - 1); |
1207 |
root |
1.62 |
|
1208 |
|
|
assert (("internal periodic heap corruption", periodics [((W)w)->active - 1] == w)); |
1209 |
root |
1.1 |
} |
1210 |
|
|
|
1211 |
|
|
void |
1212 |
root |
1.51 |
ev_periodic_stop (EV_P_ struct ev_periodic *w) |
1213 |
root |
1.1 |
{ |
1214 |
root |
1.51 |
ev_clear_pending (EV_A_ (W)w); |
1215 |
root |
1.1 |
if (!ev_is_active (w)) |
1216 |
|
|
return; |
1217 |
|
|
|
1218 |
root |
1.62 |
assert (("internal periodic heap corruption", periodics [((W)w)->active - 1] == w)); |
1219 |
|
|
|
1220 |
|
|
if (((W)w)->active < periodiccnt--) |
1221 |
root |
1.2 |
{ |
1222 |
root |
1.62 |
periodics [((W)w)->active - 1] = periodics [periodiccnt]; |
1223 |
|
|
downheap ((WT *)periodics, periodiccnt, ((W)w)->active - 1); |
1224 |
root |
1.2 |
} |
1225 |
|
|
|
1226 |
root |
1.51 |
ev_stop (EV_A_ (W)w); |
1227 |
root |
1.1 |
} |
1228 |
|
|
|
1229 |
root |
1.28 |
void |
1230 |
root |
1.51 |
ev_idle_start (EV_P_ struct ev_idle *w) |
1231 |
root |
1.9 |
{ |
1232 |
|
|
if (ev_is_active (w)) |
1233 |
|
|
return; |
1234 |
|
|
|
1235 |
root |
1.51 |
ev_start (EV_A_ (W)w, ++idlecnt); |
1236 |
root |
1.9 |
array_needsize (idles, idlemax, idlecnt, ); |
1237 |
|
|
idles [idlecnt - 1] = w; |
1238 |
|
|
} |
1239 |
|
|
|
1240 |
root |
1.28 |
void |
1241 |
root |
1.51 |
ev_idle_stop (EV_P_ struct ev_idle *w) |
1242 |
root |
1.9 |
{ |
1243 |
root |
1.51 |
ev_clear_pending (EV_A_ (W)w); |
1244 |
root |
1.16 |
if (ev_is_active (w)) |
1245 |
|
|
return; |
1246 |
|
|
|
1247 |
root |
1.62 |
idles [((W)w)->active - 1] = idles [--idlecnt]; |
1248 |
root |
1.51 |
ev_stop (EV_A_ (W)w); |
1249 |
root |
1.9 |
} |
1250 |
|
|
|
1251 |
root |
1.28 |
void |
1252 |
root |
1.51 |
ev_prepare_start (EV_P_ struct ev_prepare *w) |
1253 |
root |
1.20 |
{ |
1254 |
|
|
if (ev_is_active (w)) |
1255 |
|
|
return; |
1256 |
|
|
|
1257 |
root |
1.51 |
ev_start (EV_A_ (W)w, ++preparecnt); |
1258 |
root |
1.20 |
array_needsize (prepares, preparemax, preparecnt, ); |
1259 |
|
|
prepares [preparecnt - 1] = w; |
1260 |
|
|
} |
1261 |
|
|
|
1262 |
root |
1.28 |
void |
1263 |
root |
1.51 |
ev_prepare_stop (EV_P_ struct ev_prepare *w) |
1264 |
root |
1.20 |
{ |
1265 |
root |
1.51 |
ev_clear_pending (EV_A_ (W)w); |
1266 |
root |
1.20 |
if (ev_is_active (w)) |
1267 |
|
|
return; |
1268 |
|
|
|
1269 |
root |
1.62 |
prepares [((W)w)->active - 1] = prepares [--preparecnt]; |
1270 |
root |
1.51 |
ev_stop (EV_A_ (W)w); |
1271 |
root |
1.20 |
} |
1272 |
|
|
|
1273 |
root |
1.28 |
void |
1274 |
root |
1.51 |
ev_check_start (EV_P_ struct ev_check *w) |
1275 |
root |
1.9 |
{ |
1276 |
|
|
if (ev_is_active (w)) |
1277 |
|
|
return; |
1278 |
|
|
|
1279 |
root |
1.51 |
ev_start (EV_A_ (W)w, ++checkcnt); |
1280 |
root |
1.9 |
array_needsize (checks, checkmax, checkcnt, ); |
1281 |
|
|
checks [checkcnt - 1] = w; |
1282 |
|
|
} |
1283 |
|
|
|
1284 |
root |
1.28 |
void |
1285 |
root |
1.51 |
ev_check_stop (EV_P_ struct ev_check *w) |
1286 |
root |
1.9 |
{ |
1287 |
root |
1.51 |
ev_clear_pending (EV_A_ (W)w); |
1288 |
root |
1.16 |
if (ev_is_active (w)) |
1289 |
|
|
return; |
1290 |
|
|
|
1291 |
root |
1.62 |
checks [((W)w)->active - 1] = checks [--checkcnt]; |
1292 |
root |
1.51 |
ev_stop (EV_A_ (W)w); |
1293 |
root |
1.9 |
} |
1294 |
|
|
|
1295 |
root |
1.56 |
#ifndef SA_RESTART |
1296 |
|
|
# define SA_RESTART 0 |
1297 |
|
|
#endif |
1298 |
|
|
|
1299 |
|
|
void |
1300 |
|
|
ev_signal_start (EV_P_ struct ev_signal *w) |
1301 |
|
|
{ |
1302 |
|
|
#if EV_MULTIPLICITY |
1303 |
|
|
assert (("signal watchers are only supported in the default loop", loop == default_loop)); |
1304 |
|
|
#endif |
1305 |
|
|
if (ev_is_active (w)) |
1306 |
|
|
return; |
1307 |
|
|
|
1308 |
|
|
assert (("ev_signal_start called with illegal signal number", w->signum > 0)); |
1309 |
|
|
|
1310 |
|
|
ev_start (EV_A_ (W)w, 1); |
1311 |
|
|
array_needsize (signals, signalmax, w->signum, signals_init); |
1312 |
|
|
wlist_add ((WL *)&signals [w->signum - 1].head, (WL)w); |
1313 |
|
|
|
1314 |
root |
1.63 |
if (!((WL)w)->next) |
1315 |
root |
1.56 |
{ |
1316 |
|
|
struct sigaction sa; |
1317 |
|
|
sa.sa_handler = sighandler; |
1318 |
|
|
sigfillset (&sa.sa_mask); |
1319 |
|
|
sa.sa_flags = SA_RESTART; /* if restarting works we save one iteration */ |
1320 |
|
|
sigaction (w->signum, &sa, 0); |
1321 |
|
|
} |
1322 |
|
|
} |
1323 |
|
|
|
1324 |
|
|
void |
1325 |
|
|
ev_signal_stop (EV_P_ struct ev_signal *w) |
1326 |
|
|
{ |
1327 |
|
|
ev_clear_pending (EV_A_ (W)w); |
1328 |
|
|
if (!ev_is_active (w)) |
1329 |
|
|
return; |
1330 |
|
|
|
1331 |
|
|
wlist_del ((WL *)&signals [w->signum - 1].head, (WL)w); |
1332 |
|
|
ev_stop (EV_A_ (W)w); |
1333 |
|
|
|
1334 |
|
|
if (!signals [w->signum - 1].head) |
1335 |
|
|
signal (w->signum, SIG_DFL); |
1336 |
|
|
} |
1337 |
|
|
|
1338 |
root |
1.28 |
void |
1339 |
root |
1.51 |
ev_child_start (EV_P_ struct ev_child *w) |
1340 |
root |
1.22 |
{ |
1341 |
root |
1.56 |
#if EV_MULTIPLICITY |
1342 |
|
|
assert (("child watchers are only supported in the default loop", loop == default_loop)); |
1343 |
|
|
#endif |
1344 |
root |
1.22 |
if (ev_is_active (w)) |
1345 |
|
|
return; |
1346 |
|
|
|
1347 |
root |
1.51 |
ev_start (EV_A_ (W)w, 1); |
1348 |
root |
1.22 |
wlist_add ((WL *)&childs [w->pid & (PID_HASHSIZE - 1)], (WL)w); |
1349 |
|
|
} |
1350 |
|
|
|
1351 |
root |
1.28 |
void |
1352 |
root |
1.51 |
ev_child_stop (EV_P_ struct ev_child *w) |
1353 |
root |
1.22 |
{ |
1354 |
root |
1.51 |
ev_clear_pending (EV_A_ (W)w); |
1355 |
root |
1.22 |
if (ev_is_active (w)) |
1356 |
|
|
return; |
1357 |
|
|
|
1358 |
|
|
wlist_del ((WL *)&childs [w->pid & (PID_HASHSIZE - 1)], (WL)w); |
1359 |
root |
1.51 |
ev_stop (EV_A_ (W)w); |
1360 |
root |
1.22 |
} |
1361 |
|
|
|
1362 |
root |
1.1 |
/*****************************************************************************/ |
1363 |
root |
1.10 |
|
1364 |
root |
1.16 |
struct ev_once |
1365 |
|
|
{ |
1366 |
|
|
struct ev_io io; |
1367 |
|
|
struct ev_timer to; |
1368 |
|
|
void (*cb)(int revents, void *arg); |
1369 |
|
|
void *arg; |
1370 |
|
|
}; |
1371 |
|
|
|
1372 |
|
|
static void |
1373 |
root |
1.51 |
once_cb (EV_P_ struct ev_once *once, int revents) |
1374 |
root |
1.16 |
{ |
1375 |
|
|
void (*cb)(int revents, void *arg) = once->cb; |
1376 |
|
|
void *arg = once->arg; |
1377 |
|
|
|
1378 |
root |
1.51 |
ev_io_stop (EV_A_ &once->io); |
1379 |
|
|
ev_timer_stop (EV_A_ &once->to); |
1380 |
root |
1.16 |
free (once); |
1381 |
|
|
|
1382 |
|
|
cb (revents, arg); |
1383 |
|
|
} |
1384 |
|
|
|
1385 |
|
|
static void |
1386 |
root |
1.51 |
once_cb_io (EV_P_ struct ev_io *w, int revents) |
1387 |
root |
1.16 |
{ |
1388 |
root |
1.51 |
once_cb (EV_A_ (struct ev_once *)(((char *)w) - offsetof (struct ev_once, io)), revents); |
1389 |
root |
1.16 |
} |
1390 |
|
|
|
1391 |
|
|
static void |
1392 |
root |
1.51 |
once_cb_to (EV_P_ struct ev_timer *w, int revents) |
1393 |
root |
1.16 |
{ |
1394 |
root |
1.51 |
once_cb (EV_A_ (struct ev_once *)(((char *)w) - offsetof (struct ev_once, to)), revents); |
1395 |
root |
1.16 |
} |
1396 |
|
|
|
1397 |
|
|
void |
1398 |
root |
1.51 |
ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg) |
1399 |
root |
1.16 |
{ |
1400 |
|
|
struct ev_once *once = malloc (sizeof (struct ev_once)); |
1401 |
|
|
|
1402 |
|
|
if (!once) |
1403 |
root |
1.29 |
cb (EV_ERROR | EV_READ | EV_WRITE | EV_TIMEOUT, arg); |
1404 |
root |
1.16 |
else |
1405 |
|
|
{ |
1406 |
|
|
once->cb = cb; |
1407 |
|
|
once->arg = arg; |
1408 |
|
|
|
1409 |
root |
1.28 |
ev_watcher_init (&once->io, once_cb_io); |
1410 |
root |
1.16 |
if (fd >= 0) |
1411 |
|
|
{ |
1412 |
root |
1.28 |
ev_io_set (&once->io, fd, events); |
1413 |
root |
1.51 |
ev_io_start (EV_A_ &once->io); |
1414 |
root |
1.16 |
} |
1415 |
|
|
|
1416 |
root |
1.28 |
ev_watcher_init (&once->to, once_cb_to); |
1417 |
root |
1.16 |
if (timeout >= 0.) |
1418 |
|
|
{ |
1419 |
root |
1.28 |
ev_timer_set (&once->to, timeout, 0.); |
1420 |
root |
1.51 |
ev_timer_start (EV_A_ &once->to); |
1421 |
root |
1.16 |
} |
1422 |
|
|
} |
1423 |
|
|
} |
1424 |
|
|
|