… | |
… | |
589 | static void noinline |
589 | static void noinline |
590 | fd_rearm_all (EV_P) |
590 | fd_rearm_all (EV_P) |
591 | { |
591 | { |
592 | int fd; |
592 | int fd; |
593 | |
593 | |
594 | /* this should be highly optimised to not do anything but set a flag */ |
|
|
595 | for (fd = 0; fd < anfdmax; ++fd) |
594 | for (fd = 0; fd < anfdmax; ++fd) |
596 | if (anfds [fd].events) |
595 | if (anfds [fd].events) |
597 | { |
596 | { |
598 | anfds [fd].events = 0; |
597 | anfds [fd].events = 0; |
599 | fd_change (EV_A_ fd); |
598 | fd_change (EV_A_ fd); |
… | |
… | |
904 | |
903 | |
905 | ev_rt_now = ev_time (); |
904 | ev_rt_now = ev_time (); |
906 | mn_now = get_clock (); |
905 | mn_now = get_clock (); |
907 | now_floor = mn_now; |
906 | now_floor = mn_now; |
908 | rtmn_diff = ev_rt_now - mn_now; |
907 | rtmn_diff = ev_rt_now - mn_now; |
|
|
908 | |
|
|
909 | /* pid check not overridable via env */ |
|
|
910 | #ifndef _WIN32 |
|
|
911 | if (flags & EVFLAG_FORKCHECK) |
|
|
912 | curpid = getpid (); |
|
|
913 | #endif |
909 | |
914 | |
910 | if (!(flags & EVFLAG_NOENV) |
915 | if (!(flags & EVFLAG_NOENV) |
911 | && !enable_secure () |
916 | && !enable_secure () |
912 | && getenv ("LIBEV_FLAGS")) |
917 | && getenv ("LIBEV_FLAGS")) |
913 | flags = atoi (getenv ("LIBEV_FLAGS")); |
918 | flags = atoi (getenv ("LIBEV_FLAGS")); |
… | |
… | |
1270 | ev_tstamp odiff = rtmn_diff; |
1275 | ev_tstamp odiff = rtmn_diff; |
1271 | |
1276 | |
1272 | /* loop a few times, before making important decisions. |
1277 | /* loop a few times, before making important decisions. |
1273 | * on the choice of "4": one iteration isn't enough, |
1278 | * on the choice of "4": one iteration isn't enough, |
1274 | * in case we get preempted during the calls to |
1279 | * in case we get preempted during the calls to |
1275 | * ev_time and get_clock. a second call is almost guarenteed |
1280 | * ev_time and get_clock. a second call is almost guaranteed |
1276 | * to succeed in that case, though. and looping a few more times |
1281 | * to succeed in that case, though. and looping a few more times |
1277 | * doesn't hurt either as we only do this on time-jumps or |
1282 | * doesn't hurt either as we only do this on time-jumps or |
1278 | * in the unlikely event of getting preempted here. |
1283 | * in the unlikely event of having been preempted here. |
1279 | */ |
1284 | */ |
1280 | for (i = 4; --i; ) |
1285 | for (i = 4; --i; ) |
1281 | { |
1286 | { |
1282 | rtmn_diff = ev_rt_now - mn_now; |
1287 | rtmn_diff = ev_rt_now - mn_now; |
1283 | |
1288 | |
… | |
… | |
1305 | { |
1310 | { |
1306 | #if EV_PERIODIC_ENABLE |
1311 | #if EV_PERIODIC_ENABLE |
1307 | periodics_reschedule (EV_A); |
1312 | periodics_reschedule (EV_A); |
1308 | #endif |
1313 | #endif |
1309 | |
1314 | |
1310 | /* adjust timers. this is easy, as the offset is the same for all */ |
1315 | /* adjust timers. this is easy, as the offset is the same for all of them */ |
1311 | for (i = 0; i < timercnt; ++i) |
1316 | for (i = 0; i < timercnt; ++i) |
1312 | ((WT)timers [i])->at += ev_rt_now - mn_now; |
1317 | ((WT)timers [i])->at += ev_rt_now - mn_now; |
1313 | } |
1318 | } |
1314 | |
1319 | |
1315 | mn_now = ev_rt_now; |
1320 | mn_now = ev_rt_now; |
… | |
… | |
1335 | { |
1340 | { |
1336 | loop_done = flags & (EVLOOP_ONESHOT | EVLOOP_NONBLOCK) |
1341 | loop_done = flags & (EVLOOP_ONESHOT | EVLOOP_NONBLOCK) |
1337 | ? EVUNLOOP_ONE |
1342 | ? EVUNLOOP_ONE |
1338 | : EVUNLOOP_CANCEL; |
1343 | : EVUNLOOP_CANCEL; |
1339 | |
1344 | |
|
|
1345 | call_pending (EV_A); /* in case we recurse, ensure ordering stays nice and clean */ |
|
|
1346 | |
1340 | while (activecnt) |
1347 | while (activecnt) |
1341 | { |
1348 | { |
1342 | /* we might have forked, so reify kernel state if necessary */ |
1349 | #ifndef _WIN32 |
|
|
1350 | if (expect_false (curpid)) /* penalise the forking check even more */ |
|
|
1351 | if (expect_false (getpid () != curpid)) |
|
|
1352 | { |
|
|
1353 | curpid = getpid (); |
|
|
1354 | postfork = 1; |
|
|
1355 | } |
|
|
1356 | #endif |
|
|
1357 | |
1343 | #if EV_FORK_ENABLE |
1358 | #if EV_FORK_ENABLE |
|
|
1359 | /* we might have forked, so queue fork handlers */ |
1344 | if (expect_false (postfork)) |
1360 | if (expect_false (postfork)) |
1345 | if (forkcnt) |
1361 | if (forkcnt) |
1346 | { |
1362 | { |
1347 | queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK); |
1363 | queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK); |
1348 | call_pending (EV_A); |
1364 | call_pending (EV_A); |
1349 | } |
1365 | } |
1350 | #endif |
1366 | #endif |
1351 | |
1367 | |
1352 | /* queue check watchers (and execute them) */ |
1368 | /* queue check watchers (and execute them) */ |
1353 | if (expect_false (preparecnt)) |
1369 | if (expect_false (preparecnt)) |
1354 | { |
1370 | { |
1355 | queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE); |
1371 | queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE); |
… | |
… | |
1363 | /* update fd-related kernel structures */ |
1379 | /* update fd-related kernel structures */ |
1364 | fd_reify (EV_A); |
1380 | fd_reify (EV_A); |
1365 | |
1381 | |
1366 | /* calculate blocking time */ |
1382 | /* calculate blocking time */ |
1367 | { |
1383 | { |
1368 | double block; |
1384 | ev_tstamp block; |
1369 | |
1385 | |
1370 | if (flags & EVLOOP_NONBLOCK || idlecnt) |
1386 | if (flags & EVLOOP_NONBLOCK || idlecnt) |
1371 | block = 0.; /* do not block at all */ |
1387 | block = 0.; /* do not block at all */ |
1372 | else |
1388 | else |
1373 | { |
1389 | { |
… | |
… | |
1718 | # endif |
1734 | # endif |
1719 | |
1735 | |
1720 | #define DEF_STAT_INTERVAL 5.0074891 |
1736 | #define DEF_STAT_INTERVAL 5.0074891 |
1721 | #define MIN_STAT_INTERVAL 0.1074891 |
1737 | #define MIN_STAT_INTERVAL 0.1074891 |
1722 | |
1738 | |
1723 | void noinline stat_timer_cb (EV_P_ ev_timer *w_, int revents); |
1739 | static void noinline stat_timer_cb (EV_P_ ev_timer *w_, int revents); |
1724 | |
1740 | |
1725 | #if EV_USE_INOTIFY |
1741 | #if EV_USE_INOTIFY |
1726 | # define EV_INOTIFY_BUFSIZE 8192 |
1742 | # define EV_INOTIFY_BUFSIZE 8192 |
1727 | |
1743 | |
1728 | static void noinline |
1744 | static void noinline |
… | |
… | |
1879 | w->attr.st_nlink = 0; |
1895 | w->attr.st_nlink = 0; |
1880 | else if (!w->attr.st_nlink) |
1896 | else if (!w->attr.st_nlink) |
1881 | w->attr.st_nlink = 1; |
1897 | w->attr.st_nlink = 1; |
1882 | } |
1898 | } |
1883 | |
1899 | |
1884 | void noinline |
1900 | static void noinline |
1885 | stat_timer_cb (EV_P_ ev_timer *w_, int revents) |
1901 | stat_timer_cb (EV_P_ ev_timer *w_, int revents) |
1886 | { |
1902 | { |
1887 | ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer)); |
1903 | ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer)); |
1888 | |
1904 | |
1889 | /* we copy this here each the time so that */ |
1905 | /* we copy this here each the time so that */ |
1890 | /* prev has the old value when the callback gets invoked */ |
1906 | /* prev has the old value when the callback gets invoked */ |
1891 | w->prev = w->attr; |
1907 | w->prev = w->attr; |
1892 | ev_stat_stat (EV_A_ w); |
1908 | ev_stat_stat (EV_A_ w); |
1893 | |
1909 | |
1894 | if (memcmp (&w->prev, &w->attr, sizeof (ev_statdata))) |
1910 | /* memcmp doesn't work on netbsd, they.... do stuff to their struct stat */ |
|
|
1911 | if ( |
|
|
1912 | w->prev.st_dev != w->attr.st_dev |
|
|
1913 | || w->prev.st_ino != w->attr.st_ino |
|
|
1914 | || w->prev.st_mode != w->attr.st_mode |
|
|
1915 | || w->prev.st_nlink != w->attr.st_nlink |
|
|
1916 | || w->prev.st_uid != w->attr.st_uid |
|
|
1917 | || w->prev.st_gid != w->attr.st_gid |
|
|
1918 | || w->prev.st_rdev != w->attr.st_rdev |
|
|
1919 | || w->prev.st_size != w->attr.st_size |
|
|
1920 | || w->prev.st_atime != w->attr.st_atime |
|
|
1921 | || w->prev.st_mtime != w->attr.st_mtime |
|
|
1922 | || w->prev.st_ctime != w->attr.st_ctime |
1895 | { |
1923 | ) { |
1896 | #if EV_USE_INOTIFY |
1924 | #if EV_USE_INOTIFY |
1897 | infy_del (EV_A_ w); |
1925 | infy_del (EV_A_ w); |
1898 | infy_add (EV_A_ w); |
1926 | infy_add (EV_A_ w); |
1899 | ev_stat_stat (EV_A_ w); /* avoid race... */ |
1927 | ev_stat_stat (EV_A_ w); /* avoid race... */ |
1900 | #endif |
1928 | #endif |