… | |
… | |
32 | #ifdef __cplusplus |
32 | #ifdef __cplusplus |
33 | extern "C" { |
33 | extern "C" { |
34 | #endif |
34 | #endif |
35 | |
35 | |
36 | #ifndef EV_STANDALONE |
36 | #ifndef EV_STANDALONE |
|
|
37 | # ifdef EV_CONFIG_H |
|
|
38 | # include EV_CONFIG_H |
|
|
39 | # else |
37 | # include "config.h" |
40 | # include "config.h" |
|
|
41 | # endif |
38 | |
42 | |
39 | # if HAVE_CLOCK_GETTIME |
43 | # if HAVE_CLOCK_GETTIME |
40 | # ifndef EV_USE_MONOTONIC |
44 | # ifndef EV_USE_MONOTONIC |
41 | # define EV_USE_MONOTONIC 1 |
45 | # define EV_USE_MONOTONIC 1 |
42 | # endif |
46 | # endif |
… | |
… | |
393 | { |
397 | { |
394 | pendings [ABSPRI (w_)][w_->pending - 1].events |= revents; |
398 | pendings [ABSPRI (w_)][w_->pending - 1].events |= revents; |
395 | return; |
399 | return; |
396 | } |
400 | } |
397 | |
401 | |
|
|
402 | if (expect_false (!w_->cb)) |
|
|
403 | return; |
|
|
404 | |
398 | w_->pending = ++pendingcnt [ABSPRI (w_)]; |
405 | w_->pending = ++pendingcnt [ABSPRI (w_)]; |
399 | array_needsize (ANPENDING, pendings [ABSPRI (w_)], pendingmax [ABSPRI (w_)], pendingcnt [ABSPRI (w_)], EMPTY2); |
406 | array_needsize (ANPENDING, pendings [ABSPRI (w_)], pendingmax [ABSPRI (w_)], pendingcnt [ABSPRI (w_)], EMPTY2); |
400 | pendings [ABSPRI (w_)][w_->pending - 1].w = w_; |
407 | pendings [ABSPRI (w_)][w_->pending - 1].w = w_; |
401 | pendings [ABSPRI (w_)][w_->pending - 1].events = revents; |
408 | pendings [ABSPRI (w_)][w_->pending - 1].events = revents; |
402 | } |
409 | } |
… | |
… | |
727 | int pid, status; |
734 | int pid, status; |
728 | |
735 | |
729 | if (0 < (pid = waitpid (-1, &status, WNOHANG | WUNTRACED | WCONTINUED))) |
736 | if (0 < (pid = waitpid (-1, &status, WNOHANG | WUNTRACED | WCONTINUED))) |
730 | { |
737 | { |
731 | /* make sure we are called again until all childs have been reaped */ |
738 | /* make sure we are called again until all childs have been reaped */ |
|
|
739 | /* we need to do it this way so that the callback gets called before we continue */ |
732 | ev_feed_event (EV_A_ (W)sw, EV_SIGNAL); |
740 | ev_feed_event (EV_A_ (W)sw, EV_SIGNAL); |
733 | |
741 | |
734 | child_reap (EV_A_ sw, pid, pid, status); |
742 | child_reap (EV_A_ sw, pid, pid, status); |
735 | child_reap (EV_A_ sw, 0, pid, status); /* this might trigger a watcher twice, but event catches that */ |
743 | child_reap (EV_A_ sw, 0, pid, status); /* this might trigger a watcher twice, but feed_event catches that */ |
736 | } |
744 | } |
737 | } |
745 | } |
738 | |
746 | |
739 | #endif |
747 | #endif |
740 | |
748 | |
… | |
… | |
808 | // flags &= ~EVBACKEND_KQUEUE; for documentation |
816 | // flags &= ~EVBACKEND_KQUEUE; for documentation |
809 | flags &= ~EVBACKEND_POLL; |
817 | flags &= ~EVBACKEND_POLL; |
810 | #endif |
818 | #endif |
811 | |
819 | |
812 | return flags; |
820 | return flags; |
|
|
821 | } |
|
|
822 | |
|
|
823 | unsigned int |
|
|
824 | ev_embeddable_backends (void) |
|
|
825 | { |
|
|
826 | return EVBACKEND_EPOLL |
|
|
827 | | EVBACKEND_KQUEUE |
|
|
828 | | EVBACKEND_PORT; |
813 | } |
829 | } |
814 | |
830 | |
815 | unsigned int |
831 | unsigned int |
816 | ev_backend (EV_P) |
832 | ev_backend (EV_P) |
817 | { |
833 | { |
… | |
… | |
1232 | static int loop_done; |
1248 | static int loop_done; |
1233 | |
1249 | |
1234 | void |
1250 | void |
1235 | ev_loop (EV_P_ int flags) |
1251 | ev_loop (EV_P_ int flags) |
1236 | { |
1252 | { |
1237 | double block; |
|
|
1238 | loop_done = flags & (EVLOOP_ONESHOT | EVLOOP_NONBLOCK) ? 1 : 0; |
1253 | loop_done = flags & (EVLOOP_ONESHOT | EVLOOP_NONBLOCK) |
|
|
1254 | ? EVUNLOOP_ONE |
|
|
1255 | : EVUNLOOP_CANCEL; |
1239 | |
1256 | |
1240 | while (activecnt) |
1257 | while (activecnt) |
1241 | { |
1258 | { |
1242 | /* queue check watchers (and execute them) */ |
1259 | /* queue check watchers (and execute them) */ |
1243 | if (expect_false (preparecnt)) |
1260 | if (expect_false (preparecnt)) |
… | |
… | |
1252 | |
1269 | |
1253 | /* update fd-related kernel structures */ |
1270 | /* update fd-related kernel structures */ |
1254 | fd_reify (EV_A); |
1271 | fd_reify (EV_A); |
1255 | |
1272 | |
1256 | /* calculate blocking time */ |
1273 | /* calculate blocking time */ |
|
|
1274 | { |
|
|
1275 | double block; |
1257 | |
1276 | |
1258 | /* we only need this for !monotonic clock or timers, but as we basically |
1277 | if (flags & EVLOOP_NONBLOCK || idlecnt) |
1259 | always have timers, we just calculate it always */ |
1278 | block = 0.; /* do not block at all */ |
|
|
1279 | else |
|
|
1280 | { |
|
|
1281 | /* update time to cancel out callback processing overhead */ |
1260 | #if EV_USE_MONOTONIC |
1282 | #if EV_USE_MONOTONIC |
1261 | if (expect_true (have_monotonic)) |
1283 | if (expect_true (have_monotonic)) |
1262 | time_update_monotonic (EV_A); |
1284 | time_update_monotonic (EV_A); |
1263 | else |
1285 | else |
1264 | #endif |
1286 | #endif |
1265 | { |
1287 | { |
1266 | ev_rt_now = ev_time (); |
1288 | ev_rt_now = ev_time (); |
1267 | mn_now = ev_rt_now; |
1289 | mn_now = ev_rt_now; |
1268 | } |
1290 | } |
1269 | |
1291 | |
1270 | if (flags & EVLOOP_NONBLOCK || idlecnt) |
|
|
1271 | block = 0.; |
|
|
1272 | else |
|
|
1273 | { |
|
|
1274 | block = MAX_BLOCKTIME; |
1292 | block = MAX_BLOCKTIME; |
1275 | |
1293 | |
1276 | if (timercnt) |
1294 | if (timercnt) |
1277 | { |
1295 | { |
1278 | ev_tstamp to = ((WT)timers [0])->at - mn_now + backend_fudge; |
1296 | ev_tstamp to = ((WT)timers [0])->at - mn_now + backend_fudge; |
1279 | if (block > to) block = to; |
1297 | if (block > to) block = to; |
1280 | } |
1298 | } |
1281 | |
1299 | |
1282 | #if EV_PERIODICS |
1300 | #if EV_PERIODICS |
1283 | if (periodiccnt) |
1301 | if (periodiccnt) |
1284 | { |
1302 | { |
1285 | ev_tstamp to = ((WT)periodics [0])->at - ev_rt_now + backend_fudge; |
1303 | ev_tstamp to = ((WT)periodics [0])->at - ev_rt_now + backend_fudge; |
1286 | if (block > to) block = to; |
1304 | if (block > to) block = to; |
1287 | } |
1305 | } |
1288 | #endif |
1306 | #endif |
1289 | |
1307 | |
1290 | if (expect_false (block < 0.)) block = 0.; |
1308 | if (expect_false (block < 0.)) block = 0.; |
1291 | } |
1309 | } |
1292 | |
1310 | |
1293 | backend_poll (EV_A_ block); |
1311 | backend_poll (EV_A_ block); |
|
|
1312 | } |
1294 | |
1313 | |
1295 | /* update ev_rt_now, do magic */ |
1314 | /* update ev_rt_now, do magic */ |
1296 | time_update (EV_A); |
1315 | time_update (EV_A); |
1297 | |
1316 | |
1298 | /* queue pending timers and reschedule them */ |
1317 | /* queue pending timers and reschedule them */ |
… | |
… | |
1313 | |
1332 | |
1314 | if (expect_false (loop_done)) |
1333 | if (expect_false (loop_done)) |
1315 | break; |
1334 | break; |
1316 | } |
1335 | } |
1317 | |
1336 | |
1318 | if (loop_done != 2) |
1337 | if (loop_done == EVUNLOOP_ONE) |
1319 | loop_done = 0; |
1338 | loop_done = EVUNLOOP_CANCEL; |
1320 | } |
1339 | } |
1321 | |
1340 | |
1322 | void |
1341 | void |
1323 | ev_unloop (EV_P_ int how) |
1342 | ev_unloop (EV_P_ int how) |
1324 | { |
1343 | { |
… | |
… | |
1654 | |
1673 | |
1655 | wlist_del ((WL *)&childs [w->pid & (PID_HASHSIZE - 1)], (WL)w); |
1674 | wlist_del ((WL *)&childs [w->pid & (PID_HASHSIZE - 1)], (WL)w); |
1656 | ev_stop (EV_A_ (W)w); |
1675 | ev_stop (EV_A_ (W)w); |
1657 | } |
1676 | } |
1658 | |
1677 | |
|
|
1678 | #if EV_MULTIPLICITY |
|
|
1679 | static void |
|
|
1680 | embed_cb (EV_P_ struct ev_io *io, int revents) |
|
|
1681 | { |
|
|
1682 | struct ev_embed *w = (struct ev_embed *)(((char *)io) - offsetof (struct ev_embed, io)); |
|
|
1683 | |
|
|
1684 | ev_feed_event (EV_A_ (W)w, EV_EMBED); |
|
|
1685 | ev_loop (w->loop, EVLOOP_NONBLOCK); |
|
|
1686 | } |
|
|
1687 | |
|
|
1688 | void |
|
|
1689 | ev_embed_start (EV_P_ struct ev_embed *w) |
|
|
1690 | { |
|
|
1691 | if (expect_false (ev_is_active (w))) |
|
|
1692 | return; |
|
|
1693 | |
|
|
1694 | { |
|
|
1695 | struct ev_loop *loop = w->loop; |
|
|
1696 | assert (("loop to be embedded is not embeddable", backend & ev_embeddable_backends ())); |
|
|
1697 | ev_io_init (&w->io, embed_cb, backend_fd, EV_READ); |
|
|
1698 | } |
|
|
1699 | |
|
|
1700 | ev_io_start (EV_A_ &w->io); |
|
|
1701 | ev_start (EV_A_ (W)w, 1); |
|
|
1702 | } |
|
|
1703 | |
|
|
1704 | void |
|
|
1705 | ev_embed_stop (EV_P_ struct ev_embed *w) |
|
|
1706 | { |
|
|
1707 | ev_clear_pending (EV_A_ (W)w); |
|
|
1708 | if (expect_false (!ev_is_active (w))) |
|
|
1709 | return; |
|
|
1710 | |
|
|
1711 | ev_io_stop (EV_A_ &w->io); |
|
|
1712 | ev_stop (EV_A_ (W)w); |
|
|
1713 | } |
|
|
1714 | #endif |
|
|
1715 | |
1659 | /*****************************************************************************/ |
1716 | /*****************************************************************************/ |
1660 | |
1717 | |
1661 | struct ev_once |
1718 | struct ev_once |
1662 | { |
1719 | { |
1663 | struct ev_io io; |
1720 | struct ev_io io; |