… | |
… | |
57 | # endif |
57 | # endif |
58 | # ifndef EV_USE_MONOTONIC |
58 | # ifndef EV_USE_MONOTONIC |
59 | # define EV_USE_MONOTONIC 1 |
59 | # define EV_USE_MONOTONIC 1 |
60 | # endif |
60 | # endif |
61 | # endif |
61 | # endif |
|
|
62 | # elif !defined(EV_USE_CLOCK_SYSCALL) |
|
|
63 | # define EV_USE_CLOCK_SYSCALL 0 |
62 | # endif |
64 | # endif |
63 | |
65 | |
64 | # if HAVE_CLOCK_GETTIME |
66 | # if HAVE_CLOCK_GETTIME |
65 | # ifndef EV_USE_MONOTONIC |
67 | # ifndef EV_USE_MONOTONIC |
66 | # define EV_USE_MONOTONIC 1 |
68 | # define EV_USE_MONOTONIC 1 |
… | |
… | |
282 | |
284 | |
283 | #ifndef EV_HEAP_CACHE_AT |
285 | #ifndef EV_HEAP_CACHE_AT |
284 | # define EV_HEAP_CACHE_AT !EV_MINIMAL |
286 | # define EV_HEAP_CACHE_AT !EV_MINIMAL |
285 | #endif |
287 | #endif |
286 | |
288 | |
|
|
289 | /* on linux, we can use a (slow) syscall to avoid a dependency on pthread, */ |
|
|
290 | /* which makes programs even slower. might work on other unices, too. */ |
|
|
291 | #if EV_USE_CLOCK_SYSCALL |
|
|
292 | # include <syscall.h> |
|
|
293 | # ifdef SYS_clock_gettime |
|
|
294 | # define clock_gettime(id, ts) syscall (SYS_clock_gettime, (id), (ts)) |
|
|
295 | # undef EV_USE_MONOTONIC |
|
|
296 | # define EV_USE_MONOTONIC 1 |
|
|
297 | # else |
|
|
298 | # undef EV_USE_CLOCK_SYSCALL |
|
|
299 | # define EV_USE_CLOCK_SYSCALL 0 |
|
|
300 | # endif |
|
|
301 | #endif |
|
|
302 | |
287 | /* this block fixes any misconfiguration where we know we run into trouble otherwise */ |
303 | /* this block fixes any misconfiguration where we know we run into trouble otherwise */ |
288 | |
304 | |
289 | #ifndef CLOCK_MONOTONIC |
305 | #ifndef CLOCK_MONOTONIC |
290 | # undef EV_USE_MONOTONIC |
306 | # undef EV_USE_MONOTONIC |
291 | # define EV_USE_MONOTONIC 0 |
307 | # define EV_USE_MONOTONIC 0 |
… | |
… | |
320 | |
336 | |
321 | #if EV_SELECT_IS_WINSOCKET |
337 | #if EV_SELECT_IS_WINSOCKET |
322 | # include <winsock.h> |
338 | # include <winsock.h> |
323 | #endif |
339 | #endif |
324 | |
340 | |
325 | /* on linux, we can use a (slow) syscall to avoid a dependency on pthread, */ |
|
|
326 | /* which makes programs even slower. might work on other unices, too. */ |
|
|
327 | #if EV_USE_CLOCK_SYSCALL |
|
|
328 | # include <syscall.h> |
|
|
329 | # define clock_gettime(id, ts) syscall (SYS_clock_gettime, (id), (ts)) |
|
|
330 | # undef EV_USE_MONOTONIC |
|
|
331 | # define EV_USE_MONOTONIC 1 |
|
|
332 | #endif |
|
|
333 | |
|
|
334 | #if EV_USE_EVENTFD |
341 | #if EV_USE_EVENTFD |
335 | /* our minimum requirement is glibc 2.7 which has the stub, but not the header */ |
342 | /* our minimum requirement is glibc 2.7 which has the stub, but not the header */ |
336 | # include <stdint.h> |
343 | # include <stdint.h> |
337 | # ifdef __cplusplus |
344 | # ifdef __cplusplus |
338 | extern "C" { |
345 | extern "C" { |
… | |
… | |
557 | |
564 | |
558 | #endif |
565 | #endif |
559 | |
566 | |
560 | /*****************************************************************************/ |
567 | /*****************************************************************************/ |
561 | |
568 | |
|
|
569 | #ifndef EV_HAVE_EV_TIME |
562 | ev_tstamp |
570 | ev_tstamp |
563 | ev_time (void) |
571 | ev_time (void) |
564 | { |
572 | { |
565 | #if EV_USE_REALTIME |
573 | #if EV_USE_REALTIME |
566 | if (expect_true (have_realtime)) |
574 | if (expect_true (have_realtime)) |
… | |
… | |
573 | |
581 | |
574 | struct timeval tv; |
582 | struct timeval tv; |
575 | gettimeofday (&tv, 0); |
583 | gettimeofday (&tv, 0); |
576 | return tv.tv_sec + tv.tv_usec * 1e-6; |
584 | return tv.tv_sec + tv.tv_usec * 1e-6; |
577 | } |
585 | } |
|
|
586 | #endif |
578 | |
587 | |
579 | inline_size ev_tstamp |
588 | inline_size ev_tstamp |
580 | get_clock (void) |
589 | get_clock (void) |
581 | { |
590 | { |
582 | #if EV_USE_MONOTONIC |
591 | #if EV_USE_MONOTONIC |
… | |
… | |
618 | |
627 | |
619 | tv.tv_sec = (time_t)delay; |
628 | tv.tv_sec = (time_t)delay; |
620 | tv.tv_usec = (long)((delay - (ev_tstamp)(tv.tv_sec)) * 1e6); |
629 | tv.tv_usec = (long)((delay - (ev_tstamp)(tv.tv_sec)) * 1e6); |
621 | |
630 | |
622 | /* here we rely on sys/time.h + sys/types.h + unistd.h providing select */ |
631 | /* here we rely on sys/time.h + sys/types.h + unistd.h providing select */ |
623 | /* somehting nto guaranteed by newer posix versions, but guaranteed */ |
632 | /* somehting not guaranteed by newer posix versions, but guaranteed */ |
624 | /* by older ones */ |
633 | /* by older ones */ |
625 | select (0, 0, 0, 0, &tv); |
634 | select (0, 0, 0, 0, &tv); |
626 | #endif |
635 | #endif |
627 | } |
636 | } |
628 | } |
637 | } |
… | |
… | |
2066 | ev_tstamp waittime = 0.; |
2075 | ev_tstamp waittime = 0.; |
2067 | ev_tstamp sleeptime = 0.; |
2076 | ev_tstamp sleeptime = 0.; |
2068 | |
2077 | |
2069 | if (expect_true (!(flags & EVLOOP_NONBLOCK || idleall || !activecnt))) |
2078 | if (expect_true (!(flags & EVLOOP_NONBLOCK || idleall || !activecnt))) |
2070 | { |
2079 | { |
|
|
2080 | /* remember old timestamp for io_blocktime calculation */ |
|
|
2081 | ev_tstamp prev_mn_now = mn_now; |
|
|
2082 | |
2071 | /* update time to cancel out callback processing overhead */ |
2083 | /* update time to cancel out callback processing overhead */ |
2072 | time_update (EV_A_ 1e100); |
2084 | time_update (EV_A_ 1e100); |
2073 | |
2085 | |
2074 | waittime = MAX_BLOCKTIME; |
2086 | waittime = MAX_BLOCKTIME; |
2075 | |
2087 | |
… | |
… | |
2085 | ev_tstamp to = ANHE_at (periodics [HEAP0]) - ev_rt_now + backend_fudge; |
2097 | ev_tstamp to = ANHE_at (periodics [HEAP0]) - ev_rt_now + backend_fudge; |
2086 | if (waittime > to) waittime = to; |
2098 | if (waittime > to) waittime = to; |
2087 | } |
2099 | } |
2088 | #endif |
2100 | #endif |
2089 | |
2101 | |
|
|
2102 | /* don't let timeouts decrease the waittime below timeout_blocktime */ |
2090 | if (expect_false (waittime < timeout_blocktime)) |
2103 | if (expect_false (waittime < timeout_blocktime)) |
2091 | waittime = timeout_blocktime; |
2104 | waittime = timeout_blocktime; |
2092 | |
2105 | |
2093 | sleeptime = waittime - backend_fudge; |
2106 | /* extra check because io_blocktime is commonly 0 */ |
2094 | |
|
|
2095 | if (expect_true (sleeptime > io_blocktime)) |
2107 | if (expect_false (io_blocktime)) |
2096 | sleeptime = io_blocktime; |
|
|
2097 | |
|
|
2098 | if (sleeptime) |
|
|
2099 | { |
2108 | { |
|
|
2109 | sleeptime = io_blocktime - (mn_now - prev_mn_now); |
|
|
2110 | |
|
|
2111 | if (sleeptime > waittime - backend_fudge) |
|
|
2112 | sleeptime = waittime - backend_fudge; |
|
|
2113 | |
|
|
2114 | if (expect_true (sleeptime > 0.)) |
|
|
2115 | { |
2100 | ev_sleep (sleeptime); |
2116 | ev_sleep (sleeptime); |
2101 | waittime -= sleeptime; |
2117 | waittime -= sleeptime; |
|
|
2118 | } |
2102 | } |
2119 | } |
2103 | } |
2120 | } |
2104 | |
2121 | |
2105 | ++loop_count; |
2122 | ++loop_count; |
2106 | backend_poll (EV_A_ waittime); |
2123 | backend_poll (EV_A_ waittime); |