… | |
… | |
281 | perror (msg); |
281 | perror (msg); |
282 | abort (); |
282 | abort (); |
283 | } |
283 | } |
284 | } |
284 | } |
285 | |
285 | |
286 | static void *(*alloc)(void *ptr, size_t size) = realloc; |
286 | static void *(*alloc)(void *ptr, long size); |
287 | |
287 | |
288 | void |
288 | void |
289 | ev_set_allocator (void *(*cb)(void *ptr, size_t size)) |
289 | ev_set_allocator (void *(*cb)(void *ptr, long size)) |
290 | { |
290 | { |
291 | alloc = cb; |
291 | alloc = cb; |
292 | } |
292 | } |
293 | |
293 | |
294 | inline_speed void * |
294 | inline_speed void * |
295 | ev_realloc (void *ptr, size_t size) |
295 | ev_realloc (void *ptr, long size) |
296 | { |
296 | { |
297 | ptr = alloc (ptr, size); |
297 | ptr = alloc ? alloc (ptr, size) : realloc (ptr, size); |
298 | |
298 | |
299 | if (!ptr && size) |
299 | if (!ptr && size) |
300 | { |
300 | { |
301 | fprintf (stderr, "libev: cannot allocate %ld bytes, aborting.", (long)size); |
301 | fprintf (stderr, "libev: cannot allocate %ld bytes, aborting.", size); |
302 | abort (); |
302 | abort (); |
303 | } |
303 | } |
304 | |
304 | |
305 | return ptr; |
305 | return ptr; |
306 | } |
306 | } |
… | |
… | |
324 | { |
324 | { |
325 | W w; |
325 | W w; |
326 | int events; |
326 | int events; |
327 | } ANPENDING; |
327 | } ANPENDING; |
328 | |
328 | |
|
|
329 | #if EV_USE_INOTIFY |
329 | typedef struct |
330 | typedef struct |
330 | { |
331 | { |
331 | #if EV_USE_INOTIFY |
|
|
332 | WL head; |
332 | WL head; |
333 | #endif |
|
|
334 | } ANFS; |
333 | } ANFS; |
|
|
334 | #endif |
335 | |
335 | |
336 | #if EV_MULTIPLICITY |
336 | #if EV_MULTIPLICITY |
337 | |
337 | |
338 | struct ev_loop |
338 | struct ev_loop |
339 | { |
339 | { |
… | |
… | |
589 | static void noinline |
589 | static void noinline |
590 | fd_rearm_all (EV_P) |
590 | fd_rearm_all (EV_P) |
591 | { |
591 | { |
592 | int fd; |
592 | int fd; |
593 | |
593 | |
594 | /* this should be highly optimised to not do anything but set a flag */ |
|
|
595 | for (fd = 0; fd < anfdmax; ++fd) |
594 | for (fd = 0; fd < anfdmax; ++fd) |
596 | if (anfds [fd].events) |
595 | if (anfds [fd].events) |
597 | { |
596 | { |
598 | anfds [fd].events = 0; |
597 | anfds [fd].events = 0; |
599 | fd_change (EV_A_ fd); |
598 | fd_change (EV_A_ fd); |
… | |
… | |
985 | array_free (check, EMPTY0); |
984 | array_free (check, EMPTY0); |
986 | |
985 | |
987 | backend = 0; |
986 | backend = 0; |
988 | } |
987 | } |
989 | |
988 | |
|
|
989 | void inline_size infy_fork (EV_P); |
|
|
990 | |
990 | void inline_size |
991 | void inline_size |
991 | loop_fork (EV_P) |
992 | loop_fork (EV_P) |
992 | { |
993 | { |
993 | #if EV_USE_PORT |
994 | #if EV_USE_PORT |
994 | if (backend == EVBACKEND_PORT ) port_fork (EV_A); |
995 | if (backend == EVBACKEND_PORT ) port_fork (EV_A); |
… | |
… | |
996 | #if EV_USE_KQUEUE |
997 | #if EV_USE_KQUEUE |
997 | if (backend == EVBACKEND_KQUEUE) kqueue_fork (EV_A); |
998 | if (backend == EVBACKEND_KQUEUE) kqueue_fork (EV_A); |
998 | #endif |
999 | #endif |
999 | #if EV_USE_EPOLL |
1000 | #if EV_USE_EPOLL |
1000 | if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A); |
1001 | if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A); |
|
|
1002 | #endif |
|
|
1003 | #if EV_USE_INOTIFY |
|
|
1004 | infy_fork (EV_A); |
1001 | #endif |
1005 | #endif |
1002 | |
1006 | |
1003 | if (ev_is_active (&sigev)) |
1007 | if (ev_is_active (&sigev)) |
1004 | { |
1008 | { |
1005 | /* default loop */ |
1009 | /* default loop */ |
… | |
… | |
1265 | ev_tstamp odiff = rtmn_diff; |
1269 | ev_tstamp odiff = rtmn_diff; |
1266 | |
1270 | |
1267 | /* loop a few times, before making important decisions. |
1271 | /* loop a few times, before making important decisions. |
1268 | * on the choice of "4": one iteration isn't enough, |
1272 | * on the choice of "4": one iteration isn't enough, |
1269 | * in case we get preempted during the calls to |
1273 | * in case we get preempted during the calls to |
1270 | * ev_time and get_clock. a second call is almost guarenteed |
1274 | * ev_time and get_clock. a second call is almost guaranteed |
1271 | * to succeed in that case, though. and looping a few more times |
1275 | * to succeed in that case, though. and looping a few more times |
1272 | * doesn't hurt either as we only do this on time-jumps or |
1276 | * doesn't hurt either as we only do this on time-jumps or |
1273 | * in the unlikely event of getting preempted here. |
1277 | * in the unlikely event of having been preempted here. |
1274 | */ |
1278 | */ |
1275 | for (i = 4; --i; ) |
1279 | for (i = 4; --i; ) |
1276 | { |
1280 | { |
1277 | rtmn_diff = ev_rt_now - mn_now; |
1281 | rtmn_diff = ev_rt_now - mn_now; |
1278 | |
1282 | |
… | |
… | |
1300 | { |
1304 | { |
1301 | #if EV_PERIODIC_ENABLE |
1305 | #if EV_PERIODIC_ENABLE |
1302 | periodics_reschedule (EV_A); |
1306 | periodics_reschedule (EV_A); |
1303 | #endif |
1307 | #endif |
1304 | |
1308 | |
1305 | /* adjust timers. this is easy, as the offset is the same for all */ |
1309 | /* adjust timers. this is easy, as the offset is the same for all of them */ |
1306 | for (i = 0; i < timercnt; ++i) |
1310 | for (i = 0; i < timercnt; ++i) |
1307 | ((WT)timers [i])->at += ev_rt_now - mn_now; |
1311 | ((WT)timers [i])->at += ev_rt_now - mn_now; |
1308 | } |
1312 | } |
1309 | |
1313 | |
1310 | mn_now = ev_rt_now; |
1314 | mn_now = ev_rt_now; |
… | |
… | |
1332 | ? EVUNLOOP_ONE |
1336 | ? EVUNLOOP_ONE |
1333 | : EVUNLOOP_CANCEL; |
1337 | : EVUNLOOP_CANCEL; |
1334 | |
1338 | |
1335 | while (activecnt) |
1339 | while (activecnt) |
1336 | { |
1340 | { |
1337 | /* we might have forked, so reify kernel state if necessary */ |
|
|
1338 | #if EV_FORK_ENABLE |
1341 | #if EV_FORK_ENABLE |
|
|
1342 | /* we might have forked, so queue fork handlers */ |
1339 | if (expect_false (postfork)) |
1343 | if (expect_false (postfork)) |
1340 | if (forkcnt) |
1344 | if (forkcnt) |
1341 | { |
1345 | { |
1342 | queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK); |
1346 | queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK); |
1343 | call_pending (EV_A); |
1347 | call_pending (EV_A); |
1344 | } |
1348 | } |
1345 | #endif |
1349 | #endif |
1346 | |
1350 | |
1347 | /* queue check watchers (and execute them) */ |
1351 | /* queue check watchers (and execute them) */ |
1348 | if (expect_false (preparecnt)) |
1352 | if (expect_false (preparecnt)) |
1349 | { |
1353 | { |
1350 | queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE); |
1354 | queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE); |
… | |
… | |
1358 | /* update fd-related kernel structures */ |
1362 | /* update fd-related kernel structures */ |
1359 | fd_reify (EV_A); |
1363 | fd_reify (EV_A); |
1360 | |
1364 | |
1361 | /* calculate blocking time */ |
1365 | /* calculate blocking time */ |
1362 | { |
1366 | { |
1363 | double block; |
1367 | ev_tstamp block; |
1364 | |
1368 | |
1365 | if (flags & EVLOOP_NONBLOCK || idlecnt) |
1369 | if (flags & EVLOOP_NONBLOCK || idlecnt) |
1366 | block = 0.; /* do not block at all */ |
1370 | block = 0.; /* do not block at all */ |
1367 | else |
1371 | else |
1368 | { |
1372 | { |
… | |
… | |
1713 | # endif |
1717 | # endif |
1714 | |
1718 | |
1715 | #define DEF_STAT_INTERVAL 5.0074891 |
1719 | #define DEF_STAT_INTERVAL 5.0074891 |
1716 | #define MIN_STAT_INTERVAL 0.1074891 |
1720 | #define MIN_STAT_INTERVAL 0.1074891 |
1717 | |
1721 | |
1718 | void noinline stat_timer_cb (EV_P_ ev_timer *w_, int revents); |
1722 | static void noinline stat_timer_cb (EV_P_ ev_timer *w_, int revents); |
1719 | |
1723 | |
1720 | #if EV_USE_INOTIFY |
1724 | #if EV_USE_INOTIFY |
1721 | # define EV_INOTIFY_BUFSIZE ((PATH_MAX + sizeof (struct inotify_event)) + 2048) |
1725 | # define EV_INOTIFY_BUFSIZE 8192 |
1722 | |
1726 | |
1723 | static void noinline |
1727 | static void noinline |
1724 | infy_add (EV_P_ ev_stat *w) |
1728 | infy_add (EV_P_ ev_stat *w) |
1725 | { |
1729 | { |
1726 | w->wd = inotify_add_watch (fs_fd, w->path, IN_ATTRIB | IN_DELETE_SELF | IN_MOVE_SELF | IN_MODIFY | IN_DONT_FOLLOW | IN_MASK_ADD); |
1730 | w->wd = inotify_add_watch (fs_fd, w->path, IN_ATTRIB | IN_DELETE_SELF | IN_MOVE_SELF | IN_MODIFY | IN_DONT_FOLLOW | IN_MASK_ADD); |
… | |
… | |
1728 | if (w->wd < 0) |
1732 | if (w->wd < 0) |
1729 | { |
1733 | { |
1730 | ev_timer_start (EV_A_ &w->timer); /* this is not race-free, so we still need to recheck periodically */ |
1734 | ev_timer_start (EV_A_ &w->timer); /* this is not race-free, so we still need to recheck periodically */ |
1731 | |
1735 | |
1732 | /* monitor some parent directory for speedup hints */ |
1736 | /* monitor some parent directory for speedup hints */ |
1733 | if (errno == ENOENT || errno == EACCES) |
1737 | if ((errno == ENOENT || errno == EACCES) && strlen (w->path) < 4096) |
1734 | { |
1738 | { |
1735 | char path [PATH_MAX]; |
1739 | char path [4096]; |
1736 | strcpy (path, w->path); |
1740 | strcpy (path, w->path); |
1737 | |
1741 | |
1738 | do |
1742 | do |
1739 | { |
1743 | { |
1740 | int mask = IN_MASK_ADD | IN_DELETE_SELF | IN_MOVE_SELF |
1744 | int mask = IN_MASK_ADD | IN_DELETE_SELF | IN_MOVE_SELF |
… | |
… | |
1744 | |
1748 | |
1745 | if (!pend) |
1749 | if (!pend) |
1746 | break; /* whoops, no '/', complain to your admin */ |
1750 | break; /* whoops, no '/', complain to your admin */ |
1747 | |
1751 | |
1748 | *pend = 0; |
1752 | *pend = 0; |
1749 | w->wd = inotify_add_watch (fs_fd, path, IN_DELETE_SELF | IN_CREATE | IN_MOVED_TO | IN_MASK_ADD); |
1753 | w->wd = inotify_add_watch (fs_fd, path, mask); |
1750 | } |
1754 | } |
1751 | while (w->wd < 0 && (errno == ENOENT || errno == EACCES)); |
1755 | while (w->wd < 0 && (errno == ENOENT || errno == EACCES)); |
1752 | } |
1756 | } |
1753 | } |
1757 | } |
1754 | else |
1758 | else |
… | |
… | |
1759 | } |
1763 | } |
1760 | |
1764 | |
1761 | static void noinline |
1765 | static void noinline |
1762 | infy_del (EV_P_ ev_stat *w) |
1766 | infy_del (EV_P_ ev_stat *w) |
1763 | { |
1767 | { |
1764 | WL w_; |
|
|
1765 | int slot; |
1768 | int slot; |
1766 | int wd = w->wd; |
1769 | int wd = w->wd; |
1767 | |
1770 | |
1768 | if (wd < 0) |
1771 | if (wd < 0) |
1769 | return; |
1772 | return; |
… | |
… | |
1798 | { |
1801 | { |
1799 | w->wd = -1; |
1802 | w->wd = -1; |
1800 | infy_add (EV_A_ w); /* re-add, no matter what */ |
1803 | infy_add (EV_A_ w); /* re-add, no matter what */ |
1801 | } |
1804 | } |
1802 | |
1805 | |
1803 | stat_timer_cb (EV_P_ &w->timer, 0); |
1806 | stat_timer_cb (EV_A_ &w->timer, 0); |
1804 | } |
1807 | } |
1805 | } |
1808 | } |
1806 | } |
1809 | } |
1807 | } |
1810 | } |
1808 | |
1811 | |
… | |
… | |
1832 | ev_set_priority (&fs_w, EV_MAXPRI); |
1835 | ev_set_priority (&fs_w, EV_MAXPRI); |
1833 | ev_io_start (EV_A_ &fs_w); |
1836 | ev_io_start (EV_A_ &fs_w); |
1834 | } |
1837 | } |
1835 | } |
1838 | } |
1836 | |
1839 | |
|
|
1840 | void inline_size |
|
|
1841 | infy_fork (EV_P) |
|
|
1842 | { |
|
|
1843 | int slot; |
|
|
1844 | |
|
|
1845 | if (fs_fd < 0) |
|
|
1846 | return; |
|
|
1847 | |
|
|
1848 | close (fs_fd); |
|
|
1849 | fs_fd = inotify_init (); |
|
|
1850 | |
|
|
1851 | for (slot = 0; slot < EV_INOTIFY_HASHSIZE; ++slot) |
|
|
1852 | { |
|
|
1853 | WL w_ = fs_hash [slot].head; |
|
|
1854 | fs_hash [slot].head = 0; |
|
|
1855 | |
|
|
1856 | while (w_) |
|
|
1857 | { |
|
|
1858 | ev_stat *w = (ev_stat *)w_; |
|
|
1859 | w_ = w_->next; /* lets us add this watcher */ |
|
|
1860 | |
|
|
1861 | w->wd = -1; |
|
|
1862 | |
|
|
1863 | if (fs_fd >= 0) |
|
|
1864 | infy_add (EV_A_ w); /* re-add, no matter what */ |
|
|
1865 | else |
|
|
1866 | ev_timer_start (EV_A_ &w->timer); |
|
|
1867 | } |
|
|
1868 | |
|
|
1869 | } |
|
|
1870 | } |
|
|
1871 | |
1837 | #endif |
1872 | #endif |
1838 | |
1873 | |
1839 | void |
1874 | void |
1840 | ev_stat_stat (EV_P_ ev_stat *w) |
1875 | ev_stat_stat (EV_P_ ev_stat *w) |
1841 | { |
1876 | { |
… | |
… | |
1843 | w->attr.st_nlink = 0; |
1878 | w->attr.st_nlink = 0; |
1844 | else if (!w->attr.st_nlink) |
1879 | else if (!w->attr.st_nlink) |
1845 | w->attr.st_nlink = 1; |
1880 | w->attr.st_nlink = 1; |
1846 | } |
1881 | } |
1847 | |
1882 | |
1848 | void noinline |
1883 | static void noinline |
1849 | stat_timer_cb (EV_P_ ev_timer *w_, int revents) |
1884 | stat_timer_cb (EV_P_ ev_timer *w_, int revents) |
1850 | { |
1885 | { |
1851 | ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer)); |
1886 | ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer)); |
1852 | |
1887 | |
1853 | /* we copy this here each the time so that */ |
1888 | /* we copy this here each the time so that */ |
1854 | /* prev has the old value when the callback gets invoked */ |
1889 | /* prev has the old value when the callback gets invoked */ |
1855 | w->prev = w->attr; |
1890 | w->prev = w->attr; |
1856 | ev_stat_stat (EV_A_ w); |
1891 | ev_stat_stat (EV_A_ w); |
1857 | |
1892 | |
1858 | if (memcmp (&w->prev, &w->attr, sizeof (ev_statdata))) |
1893 | /* memcmp doesn't work on netbsd, they.... do stuff to their struct stat */ |
|
|
1894 | if ( |
|
|
1895 | w->prev.st_dev != w->attr.st_dev |
|
|
1896 | || w->prev.st_ino != w->attr.st_ino |
|
|
1897 | || w->prev.st_mode != w->attr.st_mode |
|
|
1898 | || w->prev.st_nlink != w->attr.st_nlink |
|
|
1899 | || w->prev.st_uid != w->attr.st_uid |
|
|
1900 | || w->prev.st_gid != w->attr.st_gid |
|
|
1901 | || w->prev.st_rdev != w->attr.st_rdev |
|
|
1902 | || w->prev.st_size != w->attr.st_size |
|
|
1903 | || w->prev.st_atime != w->attr.st_atime |
|
|
1904 | || w->prev.st_mtime != w->attr.st_mtime |
|
|
1905 | || w->prev.st_ctime != w->attr.st_ctime |
1859 | { |
1906 | ) { |
1860 | #if EV_USE_INOTIFY |
1907 | #if EV_USE_INOTIFY |
1861 | infy_del (EV_A_ w); |
1908 | infy_del (EV_A_ w); |
1862 | infy_add (EV_A_ w); |
1909 | infy_add (EV_A_ w); |
1863 | ev_stat_stat (EV_A_ w); /* avoid race... */ |
1910 | ev_stat_stat (EV_A_ w); /* avoid race... */ |
1864 | #endif |
1911 | #endif |