… | |
… | |
396 | { |
396 | { |
397 | return ev_rt_now; |
397 | return ev_rt_now; |
398 | } |
398 | } |
399 | #endif |
399 | #endif |
400 | |
400 | |
401 | #define array_roundsize(type,n) (((n) | 4) & ~3) |
401 | int inline_size |
|
|
402 | array_nextsize (int elem, int cur, int cnt) |
|
|
403 | { |
|
|
404 | int ncur = cur + 1; |
|
|
405 | |
|
|
406 | do |
|
|
407 | ncur <<= 1; |
|
|
408 | while (cnt > ncur); |
|
|
409 | |
|
|
410 | /* if size > 4096, round to 4096 - 4 * longs to accomodate malloc overhead */ |
|
|
411 | if (elem * ncur > 4096) |
|
|
412 | { |
|
|
413 | ncur *= elem; |
|
|
414 | ncur = (ncur + elem + 4095 + sizeof (void *) * 4) & ~4095; |
|
|
415 | ncur = ncur - sizeof (void *) * 4; |
|
|
416 | ncur /= elem; |
|
|
417 | } |
|
|
418 | |
|
|
419 | return ncur; |
|
|
420 | } |
|
|
421 | |
|
|
422 | inline_speed void * |
|
|
423 | array_realloc (int elem, void *base, int *cur, int cnt) |
|
|
424 | { |
|
|
425 | *cur = array_nextsize (elem, *cur, cnt); |
|
|
426 | return ev_realloc (base, elem * *cur); |
|
|
427 | } |
402 | |
428 | |
403 | #define array_needsize(type,base,cur,cnt,init) \ |
429 | #define array_needsize(type,base,cur,cnt,init) \ |
404 | if (expect_false ((cnt) > cur)) \ |
430 | if (expect_false ((cnt) > (cur))) \ |
405 | { \ |
431 | { \ |
406 | int newcnt = cur; \ |
432 | int ocur_ = (cur); \ |
407 | do \ |
433 | (base) = (type *)array_realloc \ |
408 | { \ |
434 | (sizeof (type), (base), &(cur), (cnt)); \ |
409 | newcnt = array_roundsize (type, newcnt << 1); \ |
435 | init ((base) + (ocur_), (cur) - ocur_); \ |
410 | } \ |
|
|
411 | while ((cnt) > newcnt); \ |
|
|
412 | \ |
|
|
413 | base = (type *)ev_realloc (base, sizeof (type) * (newcnt));\ |
|
|
414 | init (base + cur, newcnt - cur); \ |
|
|
415 | cur = newcnt; \ |
|
|
416 | } |
436 | } |
417 | |
437 | |
|
|
438 | #if 0 |
418 | #define array_slim(type,stem) \ |
439 | #define array_slim(type,stem) \ |
419 | if (stem ## max < array_roundsize (stem ## cnt >> 2)) \ |
440 | if (stem ## max < array_roundsize (stem ## cnt >> 2)) \ |
420 | { \ |
441 | { \ |
421 | stem ## max = array_roundsize (stem ## cnt >> 1); \ |
442 | stem ## max = array_roundsize (stem ## cnt >> 1); \ |
422 | base = (type *)ev_realloc (base, sizeof (type) * (stem ## max));\ |
443 | base = (type *)ev_realloc (base, sizeof (type) * (stem ## max));\ |
423 | fprintf (stderr, "slimmed down " # stem " to %d\n", stem ## max);/*D*/\ |
444 | fprintf (stderr, "slimmed down " # stem " to %d\n", stem ## max);/*D*/\ |
424 | } |
445 | } |
|
|
446 | #endif |
425 | |
447 | |
426 | #define array_free(stem, idx) \ |
448 | #define array_free(stem, idx) \ |
427 | ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0; |
449 | ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0; |
428 | |
450 | |
429 | /*****************************************************************************/ |
451 | /*****************************************************************************/ |
… | |
… | |
589 | static void noinline |
611 | static void noinline |
590 | fd_rearm_all (EV_P) |
612 | fd_rearm_all (EV_P) |
591 | { |
613 | { |
592 | int fd; |
614 | int fd; |
593 | |
615 | |
594 | /* this should be highly optimised to not do anything but set a flag */ |
|
|
595 | for (fd = 0; fd < anfdmax; ++fd) |
616 | for (fd = 0; fd < anfdmax; ++fd) |
596 | if (anfds [fd].events) |
617 | if (anfds [fd].events) |
597 | { |
618 | { |
598 | anfds [fd].events = 0; |
619 | anfds [fd].events = 0; |
599 | fd_change (EV_A_ fd); |
620 | fd_change (EV_A_ fd); |
… | |
… | |
887 | ev_backend (EV_P) |
908 | ev_backend (EV_P) |
888 | { |
909 | { |
889 | return backend; |
910 | return backend; |
890 | } |
911 | } |
891 | |
912 | |
|
|
913 | unsigned int |
|
|
914 | ev_loop_count (EV_P) |
|
|
915 | { |
|
|
916 | return loop_count; |
|
|
917 | } |
|
|
918 | |
892 | static void noinline |
919 | static void noinline |
893 | loop_init (EV_P_ unsigned int flags) |
920 | loop_init (EV_P_ unsigned int flags) |
894 | { |
921 | { |
895 | if (!backend) |
922 | if (!backend) |
896 | { |
923 | { |
… | |
… | |
904 | |
931 | |
905 | ev_rt_now = ev_time (); |
932 | ev_rt_now = ev_time (); |
906 | mn_now = get_clock (); |
933 | mn_now = get_clock (); |
907 | now_floor = mn_now; |
934 | now_floor = mn_now; |
908 | rtmn_diff = ev_rt_now - mn_now; |
935 | rtmn_diff = ev_rt_now - mn_now; |
|
|
936 | |
|
|
937 | /* pid check not overridable via env */ |
|
|
938 | #ifndef _WIN32 |
|
|
939 | if (flags & EVFLAG_FORKCHECK) |
|
|
940 | curpid = getpid (); |
|
|
941 | #endif |
909 | |
942 | |
910 | if (!(flags & EVFLAG_NOENV) |
943 | if (!(flags & EVFLAG_NOENV) |
911 | && !enable_secure () |
944 | && !enable_secure () |
912 | && getenv ("LIBEV_FLAGS")) |
945 | && getenv ("LIBEV_FLAGS")) |
913 | flags = atoi (getenv ("LIBEV_FLAGS")); |
946 | flags = atoi (getenv ("LIBEV_FLAGS")); |
… | |
… | |
1270 | ev_tstamp odiff = rtmn_diff; |
1303 | ev_tstamp odiff = rtmn_diff; |
1271 | |
1304 | |
1272 | /* loop a few times, before making important decisions. |
1305 | /* loop a few times, before making important decisions. |
1273 | * on the choice of "4": one iteration isn't enough, |
1306 | * on the choice of "4": one iteration isn't enough, |
1274 | * in case we get preempted during the calls to |
1307 | * in case we get preempted during the calls to |
1275 | * ev_time and get_clock. a second call is almost guarenteed |
1308 | * ev_time and get_clock. a second call is almost guaranteed |
1276 | * to succeed in that case, though. and looping a few more times |
1309 | * to succeed in that case, though. and looping a few more times |
1277 | * doesn't hurt either as we only do this on time-jumps or |
1310 | * doesn't hurt either as we only do this on time-jumps or |
1278 | * in the unlikely event of getting preempted here. |
1311 | * in the unlikely event of having been preempted here. |
1279 | */ |
1312 | */ |
1280 | for (i = 4; --i; ) |
1313 | for (i = 4; --i; ) |
1281 | { |
1314 | { |
1282 | rtmn_diff = ev_rt_now - mn_now; |
1315 | rtmn_diff = ev_rt_now - mn_now; |
1283 | |
1316 | |
… | |
… | |
1305 | { |
1338 | { |
1306 | #if EV_PERIODIC_ENABLE |
1339 | #if EV_PERIODIC_ENABLE |
1307 | periodics_reschedule (EV_A); |
1340 | periodics_reschedule (EV_A); |
1308 | #endif |
1341 | #endif |
1309 | |
1342 | |
1310 | /* adjust timers. this is easy, as the offset is the same for all */ |
1343 | /* adjust timers. this is easy, as the offset is the same for all of them */ |
1311 | for (i = 0; i < timercnt; ++i) |
1344 | for (i = 0; i < timercnt; ++i) |
1312 | ((WT)timers [i])->at += ev_rt_now - mn_now; |
1345 | ((WT)timers [i])->at += ev_rt_now - mn_now; |
1313 | } |
1346 | } |
1314 | |
1347 | |
1315 | mn_now = ev_rt_now; |
1348 | mn_now = ev_rt_now; |
… | |
… | |
1335 | { |
1368 | { |
1336 | loop_done = flags & (EVLOOP_ONESHOT | EVLOOP_NONBLOCK) |
1369 | loop_done = flags & (EVLOOP_ONESHOT | EVLOOP_NONBLOCK) |
1337 | ? EVUNLOOP_ONE |
1370 | ? EVUNLOOP_ONE |
1338 | : EVUNLOOP_CANCEL; |
1371 | : EVUNLOOP_CANCEL; |
1339 | |
1372 | |
1340 | while (activecnt) |
1373 | call_pending (EV_A); /* in case we recurse, ensure ordering stays nice and clean */ |
|
|
1374 | |
|
|
1375 | do |
1341 | { |
1376 | { |
1342 | /* we might have forked, so reify kernel state if necessary */ |
1377 | #ifndef _WIN32 |
|
|
1378 | if (expect_false (curpid)) /* penalise the forking check even more */ |
|
|
1379 | if (expect_false (getpid () != curpid)) |
|
|
1380 | { |
|
|
1381 | curpid = getpid (); |
|
|
1382 | postfork = 1; |
|
|
1383 | } |
|
|
1384 | #endif |
|
|
1385 | |
1343 | #if EV_FORK_ENABLE |
1386 | #if EV_FORK_ENABLE |
|
|
1387 | /* we might have forked, so queue fork handlers */ |
1344 | if (expect_false (postfork)) |
1388 | if (expect_false (postfork)) |
1345 | if (forkcnt) |
1389 | if (forkcnt) |
1346 | { |
1390 | { |
1347 | queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK); |
1391 | queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK); |
1348 | call_pending (EV_A); |
1392 | call_pending (EV_A); |
1349 | } |
1393 | } |
1350 | #endif |
1394 | #endif |
1351 | |
1395 | |
1352 | /* queue check watchers (and execute them) */ |
1396 | /* queue check watchers (and execute them) */ |
1353 | if (expect_false (preparecnt)) |
1397 | if (expect_false (preparecnt)) |
1354 | { |
1398 | { |
1355 | queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE); |
1399 | queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE); |
1356 | call_pending (EV_A); |
1400 | call_pending (EV_A); |
1357 | } |
1401 | } |
1358 | |
1402 | |
|
|
1403 | if (expect_false (!activecnt)) |
|
|
1404 | break; |
|
|
1405 | |
1359 | /* we might have forked, so reify kernel state if necessary */ |
1406 | /* we might have forked, so reify kernel state if necessary */ |
1360 | if (expect_false (postfork)) |
1407 | if (expect_false (postfork)) |
1361 | loop_fork (EV_A); |
1408 | loop_fork (EV_A); |
1362 | |
1409 | |
1363 | /* update fd-related kernel structures */ |
1410 | /* update fd-related kernel structures */ |
1364 | fd_reify (EV_A); |
1411 | fd_reify (EV_A); |
1365 | |
1412 | |
1366 | /* calculate blocking time */ |
1413 | /* calculate blocking time */ |
1367 | { |
1414 | { |
1368 | double block; |
1415 | ev_tstamp block; |
1369 | |
1416 | |
1370 | if (flags & EVLOOP_NONBLOCK || idlecnt) |
1417 | if (expect_false (flags & EVLOOP_NONBLOCK || idlecnt || !activecnt)) |
1371 | block = 0.; /* do not block at all */ |
1418 | block = 0.; /* do not block at all */ |
1372 | else |
1419 | else |
1373 | { |
1420 | { |
1374 | /* update time to cancel out callback processing overhead */ |
1421 | /* update time to cancel out callback processing overhead */ |
1375 | #if EV_USE_MONOTONIC |
1422 | #if EV_USE_MONOTONIC |
… | |
… | |
1399 | #endif |
1446 | #endif |
1400 | |
1447 | |
1401 | if (expect_false (block < 0.)) block = 0.; |
1448 | if (expect_false (block < 0.)) block = 0.; |
1402 | } |
1449 | } |
1403 | |
1450 | |
|
|
1451 | ++loop_count; |
1404 | backend_poll (EV_A_ block); |
1452 | backend_poll (EV_A_ block); |
1405 | } |
1453 | } |
1406 | |
1454 | |
1407 | /* update ev_rt_now, do magic */ |
1455 | /* update ev_rt_now, do magic */ |
1408 | time_update (EV_A); |
1456 | time_update (EV_A); |
… | |
… | |
1421 | if (expect_false (checkcnt)) |
1469 | if (expect_false (checkcnt)) |
1422 | queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK); |
1470 | queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK); |
1423 | |
1471 | |
1424 | call_pending (EV_A); |
1472 | call_pending (EV_A); |
1425 | |
1473 | |
1426 | if (expect_false (loop_done)) |
|
|
1427 | break; |
|
|
1428 | } |
1474 | } |
|
|
1475 | while (expect_true (activecnt && !loop_done)); |
1429 | |
1476 | |
1430 | if (loop_done == EVUNLOOP_ONE) |
1477 | if (loop_done == EVUNLOOP_ONE) |
1431 | loop_done = EVUNLOOP_CANCEL; |
1478 | loop_done = EVUNLOOP_CANCEL; |
1432 | } |
1479 | } |
1433 | |
1480 | |
… | |
… | |
1718 | # endif |
1765 | # endif |
1719 | |
1766 | |
1720 | #define DEF_STAT_INTERVAL 5.0074891 |
1767 | #define DEF_STAT_INTERVAL 5.0074891 |
1721 | #define MIN_STAT_INTERVAL 0.1074891 |
1768 | #define MIN_STAT_INTERVAL 0.1074891 |
1722 | |
1769 | |
1723 | void noinline stat_timer_cb (EV_P_ ev_timer *w_, int revents); |
1770 | static void noinline stat_timer_cb (EV_P_ ev_timer *w_, int revents); |
1724 | |
1771 | |
1725 | #if EV_USE_INOTIFY |
1772 | #if EV_USE_INOTIFY |
1726 | # define EV_INOTIFY_BUFSIZE 8192 |
1773 | # define EV_INOTIFY_BUFSIZE 8192 |
1727 | |
1774 | |
1728 | static void noinline |
1775 | static void noinline |
… | |
… | |
1879 | w->attr.st_nlink = 0; |
1926 | w->attr.st_nlink = 0; |
1880 | else if (!w->attr.st_nlink) |
1927 | else if (!w->attr.st_nlink) |
1881 | w->attr.st_nlink = 1; |
1928 | w->attr.st_nlink = 1; |
1882 | } |
1929 | } |
1883 | |
1930 | |
1884 | void noinline |
1931 | static void noinline |
1885 | stat_timer_cb (EV_P_ ev_timer *w_, int revents) |
1932 | stat_timer_cb (EV_P_ ev_timer *w_, int revents) |
1886 | { |
1933 | { |
1887 | ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer)); |
1934 | ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer)); |
1888 | |
1935 | |
1889 | /* we copy this here each the time so that */ |
1936 | /* we copy this here each the time so that */ |