… | |
… | |
281 | perror (msg); |
281 | perror (msg); |
282 | abort (); |
282 | abort (); |
283 | } |
283 | } |
284 | } |
284 | } |
285 | |
285 | |
286 | static void *(*alloc)(void *ptr, size_t size) = realloc; |
286 | static void *(*alloc)(void *ptr, long size); |
287 | |
287 | |
288 | void |
288 | void |
289 | ev_set_allocator (void *(*cb)(void *ptr, size_t size)) |
289 | ev_set_allocator (void *(*cb)(void *ptr, long size)) |
290 | { |
290 | { |
291 | alloc = cb; |
291 | alloc = cb; |
292 | } |
292 | } |
293 | |
293 | |
294 | inline_speed void * |
294 | inline_speed void * |
295 | ev_realloc (void *ptr, size_t size) |
295 | ev_realloc (void *ptr, long size) |
296 | { |
296 | { |
297 | ptr = alloc (ptr, size); |
297 | ptr = alloc ? alloc (ptr, size) : realloc (ptr, size); |
298 | |
298 | |
299 | if (!ptr && size) |
299 | if (!ptr && size) |
300 | { |
300 | { |
301 | fprintf (stderr, "libev: cannot allocate %ld bytes, aborting.", (long)size); |
301 | fprintf (stderr, "libev: cannot allocate %ld bytes, aborting.", size); |
302 | abort (); |
302 | abort (); |
303 | } |
303 | } |
304 | |
304 | |
305 | return ptr; |
305 | return ptr; |
306 | } |
306 | } |
… | |
… | |
324 | { |
324 | { |
325 | W w; |
325 | W w; |
326 | int events; |
326 | int events; |
327 | } ANPENDING; |
327 | } ANPENDING; |
328 | |
328 | |
|
|
329 | #if EV_USE_INOTIFY |
329 | typedef struct |
330 | typedef struct |
330 | { |
331 | { |
331 | #if EV_USE_INOTIFY |
|
|
332 | WL head; |
332 | WL head; |
333 | #endif |
|
|
334 | } ANFS; |
333 | } ANFS; |
|
|
334 | #endif |
335 | |
335 | |
336 | #if EV_MULTIPLICITY |
336 | #if EV_MULTIPLICITY |
337 | |
337 | |
338 | struct ev_loop |
338 | struct ev_loop |
339 | { |
339 | { |
… | |
… | |
396 | { |
396 | { |
397 | return ev_rt_now; |
397 | return ev_rt_now; |
398 | } |
398 | } |
399 | #endif |
399 | #endif |
400 | |
400 | |
401 | #define array_roundsize(type,n) (((n) | 4) & ~3) |
401 | int inline_size |
|
|
402 | array_nextsize (int elem, int cur, int cnt) |
|
|
403 | { |
|
|
404 | int ncur = cur + 1; |
|
|
405 | |
|
|
406 | do |
|
|
407 | ncur <<= 1; |
|
|
408 | while (cnt > ncur); |
|
|
409 | |
|
|
410 | /* if size > 4096, round to 4096 - 4 * longs to accomodate malloc overhead */ |
|
|
411 | if (elem * ncur > 4096) |
|
|
412 | { |
|
|
413 | ncur *= elem; |
|
|
414 | ncur = (ncur + elem + 4095 + sizeof (void *) * 4) & ~4095; |
|
|
415 | ncur = ncur - sizeof (void *) * 4; |
|
|
416 | ncur /= elem; |
|
|
417 | } |
|
|
418 | |
|
|
419 | return ncur; |
|
|
420 | } |
|
|
421 | |
|
|
422 | inline_speed void * |
|
|
423 | array_realloc (int elem, void *base, int *cur, int cnt) |
|
|
424 | { |
|
|
425 | *cur = array_nextsize (elem, *cur, cnt); |
|
|
426 | return ev_realloc (base, elem * *cur); |
|
|
427 | } |
402 | |
428 | |
403 | #define array_needsize(type,base,cur,cnt,init) \ |
429 | #define array_needsize(type,base,cur,cnt,init) \ |
404 | if (expect_false ((cnt) > cur)) \ |
430 | if (expect_false ((cnt) > (cur))) \ |
405 | { \ |
431 | { \ |
406 | int newcnt = cur; \ |
432 | int ocur_ = (cur); \ |
407 | do \ |
433 | (base) = (type *)array_realloc \ |
408 | { \ |
434 | (sizeof (type), (base), &(cur), (cnt)); \ |
409 | newcnt = array_roundsize (type, newcnt << 1); \ |
435 | init ((base) + (ocur_), (cur) - ocur_); \ |
410 | } \ |
|
|
411 | while ((cnt) > newcnt); \ |
|
|
412 | \ |
|
|
413 | base = (type *)ev_realloc (base, sizeof (type) * (newcnt));\ |
|
|
414 | init (base + cur, newcnt - cur); \ |
|
|
415 | cur = newcnt; \ |
|
|
416 | } |
436 | } |
417 | |
437 | |
|
|
438 | #if 0 |
418 | #define array_slim(type,stem) \ |
439 | #define array_slim(type,stem) \ |
419 | if (stem ## max < array_roundsize (stem ## cnt >> 2)) \ |
440 | if (stem ## max < array_roundsize (stem ## cnt >> 2)) \ |
420 | { \ |
441 | { \ |
421 | stem ## max = array_roundsize (stem ## cnt >> 1); \ |
442 | stem ## max = array_roundsize (stem ## cnt >> 1); \ |
422 | base = (type *)ev_realloc (base, sizeof (type) * (stem ## max));\ |
443 | base = (type *)ev_realloc (base, sizeof (type) * (stem ## max));\ |
423 | fprintf (stderr, "slimmed down " # stem " to %d\n", stem ## max);/*D*/\ |
444 | fprintf (stderr, "slimmed down " # stem " to %d\n", stem ## max);/*D*/\ |
424 | } |
445 | } |
|
|
446 | #endif |
425 | |
447 | |
426 | #define array_free(stem, idx) \ |
448 | #define array_free(stem, idx) \ |
427 | ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0; |
449 | ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0; |
428 | |
450 | |
429 | /*****************************************************************************/ |
451 | /*****************************************************************************/ |
… | |
… | |
589 | static void noinline |
611 | static void noinline |
590 | fd_rearm_all (EV_P) |
612 | fd_rearm_all (EV_P) |
591 | { |
613 | { |
592 | int fd; |
614 | int fd; |
593 | |
615 | |
594 | /* this should be highly optimised to not do anything but set a flag */ |
|
|
595 | for (fd = 0; fd < anfdmax; ++fd) |
616 | for (fd = 0; fd < anfdmax; ++fd) |
596 | if (anfds [fd].events) |
617 | if (anfds [fd].events) |
597 | { |
618 | { |
598 | anfds [fd].events = 0; |
619 | anfds [fd].events = 0; |
599 | fd_change (EV_A_ fd); |
620 | fd_change (EV_A_ fd); |
… | |
… | |
887 | ev_backend (EV_P) |
908 | ev_backend (EV_P) |
888 | { |
909 | { |
889 | return backend; |
910 | return backend; |
890 | } |
911 | } |
891 | |
912 | |
|
|
913 | unsigned int |
|
|
914 | ev_loop_count (EV_P) |
|
|
915 | { |
|
|
916 | return loop_count; |
|
|
917 | } |
|
|
918 | |
892 | static void noinline |
919 | static void noinline |
893 | loop_init (EV_P_ unsigned int flags) |
920 | loop_init (EV_P_ unsigned int flags) |
894 | { |
921 | { |
895 | if (!backend) |
922 | if (!backend) |
896 | { |
923 | { |
… | |
… | |
905 | ev_rt_now = ev_time (); |
932 | ev_rt_now = ev_time (); |
906 | mn_now = get_clock (); |
933 | mn_now = get_clock (); |
907 | now_floor = mn_now; |
934 | now_floor = mn_now; |
908 | rtmn_diff = ev_rt_now - mn_now; |
935 | rtmn_diff = ev_rt_now - mn_now; |
909 | |
936 | |
|
|
937 | /* pid check not overridable via env */ |
|
|
938 | #ifndef _WIN32 |
|
|
939 | if (flags & EVFLAG_FORKCHECK) |
|
|
940 | curpid = getpid (); |
|
|
941 | #endif |
|
|
942 | |
910 | if (!(flags & EVFLAG_NOENV) |
943 | if (!(flags & EVFLAG_NOENV) |
911 | && !enable_secure () |
944 | && !enable_secure () |
912 | && getenv ("LIBEV_FLAGS")) |
945 | && getenv ("LIBEV_FLAGS")) |
913 | flags = atoi (getenv ("LIBEV_FLAGS")); |
946 | flags = atoi (getenv ("LIBEV_FLAGS")); |
914 | |
947 | |
… | |
… | |
985 | array_free (check, EMPTY0); |
1018 | array_free (check, EMPTY0); |
986 | |
1019 | |
987 | backend = 0; |
1020 | backend = 0; |
988 | } |
1021 | } |
989 | |
1022 | |
|
|
1023 | void inline_size infy_fork (EV_P); |
|
|
1024 | |
990 | void inline_size |
1025 | void inline_size |
991 | loop_fork (EV_P) |
1026 | loop_fork (EV_P) |
992 | { |
1027 | { |
993 | #if EV_USE_PORT |
1028 | #if EV_USE_PORT |
994 | if (backend == EVBACKEND_PORT ) port_fork (EV_A); |
1029 | if (backend == EVBACKEND_PORT ) port_fork (EV_A); |
… | |
… | |
996 | #if EV_USE_KQUEUE |
1031 | #if EV_USE_KQUEUE |
997 | if (backend == EVBACKEND_KQUEUE) kqueue_fork (EV_A); |
1032 | if (backend == EVBACKEND_KQUEUE) kqueue_fork (EV_A); |
998 | #endif |
1033 | #endif |
999 | #if EV_USE_EPOLL |
1034 | #if EV_USE_EPOLL |
1000 | if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A); |
1035 | if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A); |
|
|
1036 | #endif |
|
|
1037 | #if EV_USE_INOTIFY |
|
|
1038 | infy_fork (EV_A); |
1001 | #endif |
1039 | #endif |
1002 | |
1040 | |
1003 | if (ev_is_active (&sigev)) |
1041 | if (ev_is_active (&sigev)) |
1004 | { |
1042 | { |
1005 | /* default loop */ |
1043 | /* default loop */ |
… | |
… | |
1265 | ev_tstamp odiff = rtmn_diff; |
1303 | ev_tstamp odiff = rtmn_diff; |
1266 | |
1304 | |
1267 | /* loop a few times, before making important decisions. |
1305 | /* loop a few times, before making important decisions. |
1268 | * on the choice of "4": one iteration isn't enough, |
1306 | * on the choice of "4": one iteration isn't enough, |
1269 | * in case we get preempted during the calls to |
1307 | * in case we get preempted during the calls to |
1270 | * ev_time and get_clock. a second call is almost guarenteed |
1308 | * ev_time and get_clock. a second call is almost guaranteed |
1271 | * to succeed in that case, though. and looping a few more times |
1309 | * to succeed in that case, though. and looping a few more times |
1272 | * doesn't hurt either as we only do this on time-jumps or |
1310 | * doesn't hurt either as we only do this on time-jumps or |
1273 | * in the unlikely event of getting preempted here. |
1311 | * in the unlikely event of having been preempted here. |
1274 | */ |
1312 | */ |
1275 | for (i = 4; --i; ) |
1313 | for (i = 4; --i; ) |
1276 | { |
1314 | { |
1277 | rtmn_diff = ev_rt_now - mn_now; |
1315 | rtmn_diff = ev_rt_now - mn_now; |
1278 | |
1316 | |
… | |
… | |
1300 | { |
1338 | { |
1301 | #if EV_PERIODIC_ENABLE |
1339 | #if EV_PERIODIC_ENABLE |
1302 | periodics_reschedule (EV_A); |
1340 | periodics_reschedule (EV_A); |
1303 | #endif |
1341 | #endif |
1304 | |
1342 | |
1305 | /* adjust timers. this is easy, as the offset is the same for all */ |
1343 | /* adjust timers. this is easy, as the offset is the same for all of them */ |
1306 | for (i = 0; i < timercnt; ++i) |
1344 | for (i = 0; i < timercnt; ++i) |
1307 | ((WT)timers [i])->at += ev_rt_now - mn_now; |
1345 | ((WT)timers [i])->at += ev_rt_now - mn_now; |
1308 | } |
1346 | } |
1309 | |
1347 | |
1310 | mn_now = ev_rt_now; |
1348 | mn_now = ev_rt_now; |
… | |
… | |
1330 | { |
1368 | { |
1331 | loop_done = flags & (EVLOOP_ONESHOT | EVLOOP_NONBLOCK) |
1369 | loop_done = flags & (EVLOOP_ONESHOT | EVLOOP_NONBLOCK) |
1332 | ? EVUNLOOP_ONE |
1370 | ? EVUNLOOP_ONE |
1333 | : EVUNLOOP_CANCEL; |
1371 | : EVUNLOOP_CANCEL; |
1334 | |
1372 | |
1335 | while (activecnt) |
1373 | call_pending (EV_A); /* in case we recurse, ensure ordering stays nice and clean */ |
|
|
1374 | |
|
|
1375 | do |
1336 | { |
1376 | { |
1337 | /* we might have forked, so reify kernel state if necessary */ |
1377 | #ifndef _WIN32 |
|
|
1378 | if (expect_false (curpid)) /* penalise the forking check even more */ |
|
|
1379 | if (expect_false (getpid () != curpid)) |
|
|
1380 | { |
|
|
1381 | curpid = getpid (); |
|
|
1382 | postfork = 1; |
|
|
1383 | } |
|
|
1384 | #endif |
|
|
1385 | |
1338 | #if EV_FORK_ENABLE |
1386 | #if EV_FORK_ENABLE |
|
|
1387 | /* we might have forked, so queue fork handlers */ |
1339 | if (expect_false (postfork)) |
1388 | if (expect_false (postfork)) |
1340 | if (forkcnt) |
1389 | if (forkcnt) |
1341 | { |
1390 | { |
1342 | queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK); |
1391 | queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK); |
1343 | call_pending (EV_A); |
1392 | call_pending (EV_A); |
1344 | } |
1393 | } |
1345 | #endif |
1394 | #endif |
1346 | |
1395 | |
1347 | /* queue check watchers (and execute them) */ |
1396 | /* queue check watchers (and execute them) */ |
1348 | if (expect_false (preparecnt)) |
1397 | if (expect_false (preparecnt)) |
1349 | { |
1398 | { |
1350 | queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE); |
1399 | queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE); |
1351 | call_pending (EV_A); |
1400 | call_pending (EV_A); |
1352 | } |
1401 | } |
1353 | |
1402 | |
|
|
1403 | if (expect_false (!activecnt)) |
|
|
1404 | break; |
|
|
1405 | |
1354 | /* we might have forked, so reify kernel state if necessary */ |
1406 | /* we might have forked, so reify kernel state if necessary */ |
1355 | if (expect_false (postfork)) |
1407 | if (expect_false (postfork)) |
1356 | loop_fork (EV_A); |
1408 | loop_fork (EV_A); |
1357 | |
1409 | |
1358 | /* update fd-related kernel structures */ |
1410 | /* update fd-related kernel structures */ |
1359 | fd_reify (EV_A); |
1411 | fd_reify (EV_A); |
1360 | |
1412 | |
1361 | /* calculate blocking time */ |
1413 | /* calculate blocking time */ |
1362 | { |
1414 | { |
1363 | double block; |
1415 | ev_tstamp block; |
1364 | |
1416 | |
1365 | if (flags & EVLOOP_NONBLOCK || idlecnt) |
1417 | if (expect_false (flags & EVLOOP_NONBLOCK || idlecnt || !activecnt)) |
1366 | block = 0.; /* do not block at all */ |
1418 | block = 0.; /* do not block at all */ |
1367 | else |
1419 | else |
1368 | { |
1420 | { |
1369 | /* update time to cancel out callback processing overhead */ |
1421 | /* update time to cancel out callback processing overhead */ |
1370 | #if EV_USE_MONOTONIC |
1422 | #if EV_USE_MONOTONIC |
… | |
… | |
1394 | #endif |
1446 | #endif |
1395 | |
1447 | |
1396 | if (expect_false (block < 0.)) block = 0.; |
1448 | if (expect_false (block < 0.)) block = 0.; |
1397 | } |
1449 | } |
1398 | |
1450 | |
|
|
1451 | ++loop_count; |
1399 | backend_poll (EV_A_ block); |
1452 | backend_poll (EV_A_ block); |
1400 | } |
1453 | } |
1401 | |
1454 | |
1402 | /* update ev_rt_now, do magic */ |
1455 | /* update ev_rt_now, do magic */ |
1403 | time_update (EV_A); |
1456 | time_update (EV_A); |
… | |
… | |
1416 | if (expect_false (checkcnt)) |
1469 | if (expect_false (checkcnt)) |
1417 | queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK); |
1470 | queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK); |
1418 | |
1471 | |
1419 | call_pending (EV_A); |
1472 | call_pending (EV_A); |
1420 | |
1473 | |
1421 | if (expect_false (loop_done)) |
|
|
1422 | break; |
|
|
1423 | } |
1474 | } |
|
|
1475 | while (expect_true (activecnt && !loop_done)); |
1424 | |
1476 | |
1425 | if (loop_done == EVUNLOOP_ONE) |
1477 | if (loop_done == EVUNLOOP_ONE) |
1426 | loop_done = EVUNLOOP_CANCEL; |
1478 | loop_done = EVUNLOOP_CANCEL; |
1427 | } |
1479 | } |
1428 | |
1480 | |
… | |
… | |
1713 | # endif |
1765 | # endif |
1714 | |
1766 | |
1715 | #define DEF_STAT_INTERVAL 5.0074891 |
1767 | #define DEF_STAT_INTERVAL 5.0074891 |
1716 | #define MIN_STAT_INTERVAL 0.1074891 |
1768 | #define MIN_STAT_INTERVAL 0.1074891 |
1717 | |
1769 | |
1718 | void noinline stat_timer_cb (EV_P_ ev_timer *w_, int revents); |
1770 | static void noinline stat_timer_cb (EV_P_ ev_timer *w_, int revents); |
1719 | |
1771 | |
1720 | #if EV_USE_INOTIFY |
1772 | #if EV_USE_INOTIFY |
1721 | # define EV_INOTIFY_BUFSIZE 8192 |
1773 | # define EV_INOTIFY_BUFSIZE 8192 |
1722 | |
1774 | |
1723 | static void noinline |
1775 | static void noinline |
… | |
… | |
1831 | ev_set_priority (&fs_w, EV_MAXPRI); |
1883 | ev_set_priority (&fs_w, EV_MAXPRI); |
1832 | ev_io_start (EV_A_ &fs_w); |
1884 | ev_io_start (EV_A_ &fs_w); |
1833 | } |
1885 | } |
1834 | } |
1886 | } |
1835 | |
1887 | |
|
|
1888 | void inline_size |
|
|
1889 | infy_fork (EV_P) |
|
|
1890 | { |
|
|
1891 | int slot; |
|
|
1892 | |
|
|
1893 | if (fs_fd < 0) |
|
|
1894 | return; |
|
|
1895 | |
|
|
1896 | close (fs_fd); |
|
|
1897 | fs_fd = inotify_init (); |
|
|
1898 | |
|
|
1899 | for (slot = 0; slot < EV_INOTIFY_HASHSIZE; ++slot) |
|
|
1900 | { |
|
|
1901 | WL w_ = fs_hash [slot].head; |
|
|
1902 | fs_hash [slot].head = 0; |
|
|
1903 | |
|
|
1904 | while (w_) |
|
|
1905 | { |
|
|
1906 | ev_stat *w = (ev_stat *)w_; |
|
|
1907 | w_ = w_->next; /* lets us add this watcher */ |
|
|
1908 | |
|
|
1909 | w->wd = -1; |
|
|
1910 | |
|
|
1911 | if (fs_fd >= 0) |
|
|
1912 | infy_add (EV_A_ w); /* re-add, no matter what */ |
|
|
1913 | else |
|
|
1914 | ev_timer_start (EV_A_ &w->timer); |
|
|
1915 | } |
|
|
1916 | |
|
|
1917 | } |
|
|
1918 | } |
|
|
1919 | |
1836 | #endif |
1920 | #endif |
1837 | |
1921 | |
1838 | void |
1922 | void |
1839 | ev_stat_stat (EV_P_ ev_stat *w) |
1923 | ev_stat_stat (EV_P_ ev_stat *w) |
1840 | { |
1924 | { |
… | |
… | |
1842 | w->attr.st_nlink = 0; |
1926 | w->attr.st_nlink = 0; |
1843 | else if (!w->attr.st_nlink) |
1927 | else if (!w->attr.st_nlink) |
1844 | w->attr.st_nlink = 1; |
1928 | w->attr.st_nlink = 1; |
1845 | } |
1929 | } |
1846 | |
1930 | |
1847 | void noinline |
1931 | static void noinline |
1848 | stat_timer_cb (EV_P_ ev_timer *w_, int revents) |
1932 | stat_timer_cb (EV_P_ ev_timer *w_, int revents) |
1849 | { |
1933 | { |
1850 | ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer)); |
1934 | ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer)); |
1851 | |
1935 | |
1852 | /* we copy this here each the time so that */ |
1936 | /* we copy this here each the time so that */ |
1853 | /* prev has the old value when the callback gets invoked */ |
1937 | /* prev has the old value when the callback gets invoked */ |
1854 | w->prev = w->attr; |
1938 | w->prev = w->attr; |
1855 | ev_stat_stat (EV_A_ w); |
1939 | ev_stat_stat (EV_A_ w); |
1856 | |
1940 | |
1857 | if (memcmp (&w->prev, &w->attr, sizeof (ev_statdata))) |
1941 | /* memcmp doesn't work on netbsd, they.... do stuff to their struct stat */ |
|
|
1942 | if ( |
|
|
1943 | w->prev.st_dev != w->attr.st_dev |
|
|
1944 | || w->prev.st_ino != w->attr.st_ino |
|
|
1945 | || w->prev.st_mode != w->attr.st_mode |
|
|
1946 | || w->prev.st_nlink != w->attr.st_nlink |
|
|
1947 | || w->prev.st_uid != w->attr.st_uid |
|
|
1948 | || w->prev.st_gid != w->attr.st_gid |
|
|
1949 | || w->prev.st_rdev != w->attr.st_rdev |
|
|
1950 | || w->prev.st_size != w->attr.st_size |
|
|
1951 | || w->prev.st_atime != w->attr.st_atime |
|
|
1952 | || w->prev.st_mtime != w->attr.st_mtime |
|
|
1953 | || w->prev.st_ctime != w->attr.st_ctime |
1858 | { |
1954 | ) { |
1859 | #if EV_USE_INOTIFY |
1955 | #if EV_USE_INOTIFY |
1860 | infy_del (EV_A_ w); |
1956 | infy_del (EV_A_ w); |
1861 | infy_add (EV_A_ w); |
1957 | infy_add (EV_A_ w); |
1862 | ev_stat_stat (EV_A_ w); /* avoid race... */ |
1958 | ev_stat_stat (EV_A_ w); /* avoid race... */ |
1863 | #endif |
1959 | #endif |