… | |
… | |
589 | static void noinline |
589 | static void noinline |
590 | fd_rearm_all (EV_P) |
590 | fd_rearm_all (EV_P) |
591 | { |
591 | { |
592 | int fd; |
592 | int fd; |
593 | |
593 | |
594 | /* this should be highly optimised to not do anything but set a flag */ |
|
|
595 | for (fd = 0; fd < anfdmax; ++fd) |
594 | for (fd = 0; fd < anfdmax; ++fd) |
596 | if (anfds [fd].events) |
595 | if (anfds [fd].events) |
597 | { |
596 | { |
598 | anfds [fd].events = 0; |
597 | anfds [fd].events = 0; |
599 | fd_change (EV_A_ fd); |
598 | fd_change (EV_A_ fd); |
… | |
… | |
904 | |
903 | |
905 | ev_rt_now = ev_time (); |
904 | ev_rt_now = ev_time (); |
906 | mn_now = get_clock (); |
905 | mn_now = get_clock (); |
907 | now_floor = mn_now; |
906 | now_floor = mn_now; |
908 | rtmn_diff = ev_rt_now - mn_now; |
907 | rtmn_diff = ev_rt_now - mn_now; |
|
|
908 | |
|
|
909 | /* pid check not overridable via env */ |
|
|
910 | #ifndef _WIN32 |
|
|
911 | if (flags & EVFLAG_FORKCHECK) |
|
|
912 | curpid = getpid (); |
|
|
913 | #endif |
909 | |
914 | |
910 | if (!(flags & EVFLAG_NOENV) |
915 | if (!(flags & EVFLAG_NOENV) |
911 | && !enable_secure () |
916 | && !enable_secure () |
912 | && getenv ("LIBEV_FLAGS")) |
917 | && getenv ("LIBEV_FLAGS")) |
913 | flags = atoi (getenv ("LIBEV_FLAGS")); |
918 | flags = atoi (getenv ("LIBEV_FLAGS")); |
… | |
… | |
1270 | ev_tstamp odiff = rtmn_diff; |
1275 | ev_tstamp odiff = rtmn_diff; |
1271 | |
1276 | |
1272 | /* loop a few times, before making important decisions. |
1277 | /* loop a few times, before making important decisions. |
1273 | * on the choice of "4": one iteration isn't enough, |
1278 | * on the choice of "4": one iteration isn't enough, |
1274 | * in case we get preempted during the calls to |
1279 | * in case we get preempted during the calls to |
1275 | * ev_time and get_clock. a second call is almost guarenteed |
1280 | * ev_time and get_clock. a second call is almost guaranteed |
1276 | * to succeed in that case, though. and looping a few more times |
1281 | * to succeed in that case, though. and looping a few more times |
1277 | * doesn't hurt either as we only do this on time-jumps or |
1282 | * doesn't hurt either as we only do this on time-jumps or |
1278 | * in the unlikely event of getting preempted here. |
1283 | * in the unlikely event of having been preempted here. |
1279 | */ |
1284 | */ |
1280 | for (i = 4; --i; ) |
1285 | for (i = 4; --i; ) |
1281 | { |
1286 | { |
1282 | rtmn_diff = ev_rt_now - mn_now; |
1287 | rtmn_diff = ev_rt_now - mn_now; |
1283 | |
1288 | |
… | |
… | |
1305 | { |
1310 | { |
1306 | #if EV_PERIODIC_ENABLE |
1311 | #if EV_PERIODIC_ENABLE |
1307 | periodics_reschedule (EV_A); |
1312 | periodics_reschedule (EV_A); |
1308 | #endif |
1313 | #endif |
1309 | |
1314 | |
1310 | /* adjust timers. this is easy, as the offset is the same for all */ |
1315 | /* adjust timers. this is easy, as the offset is the same for all of them */ |
1311 | for (i = 0; i < timercnt; ++i) |
1316 | for (i = 0; i < timercnt; ++i) |
1312 | ((WT)timers [i])->at += ev_rt_now - mn_now; |
1317 | ((WT)timers [i])->at += ev_rt_now - mn_now; |
1313 | } |
1318 | } |
1314 | |
1319 | |
1315 | mn_now = ev_rt_now; |
1320 | mn_now = ev_rt_now; |
… | |
… | |
1335 | { |
1340 | { |
1336 | loop_done = flags & (EVLOOP_ONESHOT | EVLOOP_NONBLOCK) |
1341 | loop_done = flags & (EVLOOP_ONESHOT | EVLOOP_NONBLOCK) |
1337 | ? EVUNLOOP_ONE |
1342 | ? EVUNLOOP_ONE |
1338 | : EVUNLOOP_CANCEL; |
1343 | : EVUNLOOP_CANCEL; |
1339 | |
1344 | |
1340 | while (activecnt) |
1345 | call_pending (EV_A); /* in case we recurse, ensure ordering stays nice and clean */ |
|
|
1346 | |
|
|
1347 | while (expect_false (!activecnt)) |
1341 | { |
1348 | { |
1342 | /* we might have forked, so reify kernel state if necessary */ |
1349 | #ifndef _WIN32 |
|
|
1350 | if (expect_false (curpid)) /* penalise the forking check even more */ |
|
|
1351 | if (expect_false (getpid () != curpid)) |
|
|
1352 | { |
|
|
1353 | curpid = getpid (); |
|
|
1354 | postfork = 1; |
|
|
1355 | } |
|
|
1356 | #endif |
|
|
1357 | |
1343 | #if EV_FORK_ENABLE |
1358 | #if EV_FORK_ENABLE |
|
|
1359 | /* we might have forked, so queue fork handlers */ |
1344 | if (expect_false (postfork)) |
1360 | if (expect_false (postfork)) |
1345 | if (forkcnt) |
1361 | if (forkcnt) |
1346 | { |
1362 | { |
1347 | queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK); |
1363 | queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK); |
1348 | call_pending (EV_A); |
1364 | call_pending (EV_A); |
1349 | } |
1365 | } |
1350 | #endif |
1366 | #endif |
1351 | |
1367 | |
1352 | /* queue check watchers (and execute them) */ |
1368 | /* queue check watchers (and execute them) */ |
1353 | if (expect_false (preparecnt)) |
1369 | if (expect_false (preparecnt)) |
1354 | { |
1370 | { |
1355 | queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE); |
1371 | queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE); |
1356 | call_pending (EV_A); |
1372 | call_pending (EV_A); |
1357 | } |
1373 | } |
1358 | |
1374 | |
|
|
1375 | if (expect_false (!activecnt)) |
|
|
1376 | break; |
|
|
1377 | |
1359 | /* we might have forked, so reify kernel state if necessary */ |
1378 | /* we might have forked, so reify kernel state if necessary */ |
1360 | if (expect_false (postfork)) |
1379 | if (expect_false (postfork)) |
1361 | loop_fork (EV_A); |
1380 | loop_fork (EV_A); |
1362 | |
1381 | |
1363 | /* update fd-related kernel structures */ |
1382 | /* update fd-related kernel structures */ |
1364 | fd_reify (EV_A); |
1383 | fd_reify (EV_A); |
1365 | |
1384 | |
1366 | /* calculate blocking time */ |
1385 | /* calculate blocking time */ |
1367 | { |
1386 | { |
1368 | double block; |
1387 | ev_tstamp block; |
1369 | |
1388 | |
1370 | if (flags & EVLOOP_NONBLOCK || idlecnt) |
1389 | if (expect_false (flags & EVLOOP_NONBLOCK || idlecnt || !activecnt)) |
1371 | block = 0.; /* do not block at all */ |
1390 | block = 0.; /* do not block at all */ |
1372 | else |
1391 | else |
1373 | { |
1392 | { |
1374 | /* update time to cancel out callback processing overhead */ |
1393 | /* update time to cancel out callback processing overhead */ |
1375 | #if EV_USE_MONOTONIC |
1394 | #if EV_USE_MONOTONIC |
… | |
… | |
1718 | # endif |
1737 | # endif |
1719 | |
1738 | |
1720 | #define DEF_STAT_INTERVAL 5.0074891 |
1739 | #define DEF_STAT_INTERVAL 5.0074891 |
1721 | #define MIN_STAT_INTERVAL 0.1074891 |
1740 | #define MIN_STAT_INTERVAL 0.1074891 |
1722 | |
1741 | |
1723 | void noinline stat_timer_cb (EV_P_ ev_timer *w_, int revents); |
1742 | static void noinline stat_timer_cb (EV_P_ ev_timer *w_, int revents); |
1724 | |
1743 | |
1725 | #if EV_USE_INOTIFY |
1744 | #if EV_USE_INOTIFY |
1726 | # define EV_INOTIFY_BUFSIZE 8192 |
1745 | # define EV_INOTIFY_BUFSIZE 8192 |
1727 | |
1746 | |
1728 | static void noinline |
1747 | static void noinline |
… | |
… | |
1879 | w->attr.st_nlink = 0; |
1898 | w->attr.st_nlink = 0; |
1880 | else if (!w->attr.st_nlink) |
1899 | else if (!w->attr.st_nlink) |
1881 | w->attr.st_nlink = 1; |
1900 | w->attr.st_nlink = 1; |
1882 | } |
1901 | } |
1883 | |
1902 | |
1884 | void noinline |
1903 | static void noinline |
1885 | stat_timer_cb (EV_P_ ev_timer *w_, int revents) |
1904 | stat_timer_cb (EV_P_ ev_timer *w_, int revents) |
1886 | { |
1905 | { |
1887 | ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer)); |
1906 | ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer)); |
1888 | |
1907 | |
1889 | /* we copy this here each the time so that */ |
1908 | /* we copy this here each the time so that */ |