ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev.c
(Generate patch)

Comparing libev/ev.c (file contents):
Revision 1.231 by root, Mon May 5 20:47:33 2008 UTC vs.
Revision 1.247 by root, Wed May 21 21:22:10 2008 UTC

235# else 235# else
236# define EV_USE_EVENTFD 0 236# define EV_USE_EVENTFD 0
237# endif 237# endif
238#endif 238#endif
239 239
240#ifndef EV_USE_4HEAP
241# define EV_USE_4HEAP !EV_MINIMAL
242#endif
243
244#ifndef EV_HEAP_CACHE_AT
245# define EV_HEAP_CACHE_AT !EV_MINIMAL
246#endif
247
240/* this block fixes any misconfiguration where we know we run into trouble otherwise */ 248/* this block fixes any misconfiguration where we know we run into trouble otherwise */
241 249
242#ifndef CLOCK_MONOTONIC 250#ifndef CLOCK_MONOTONIC
243# undef EV_USE_MONOTONIC 251# undef EV_USE_MONOTONIC
244# define EV_USE_MONOTONIC 0 252# define EV_USE_MONOTONIC 0
422 W w; 430 W w;
423 int events; 431 int events;
424} ANPENDING; 432} ANPENDING;
425 433
426#if EV_USE_INOTIFY 434#if EV_USE_INOTIFY
435/* hash table entry per inotify-id */
427typedef struct 436typedef struct
428{ 437{
429 WL head; 438 WL head;
430} ANFS; 439} ANFS;
440#endif
441
442/* Heap Entry */
443#if EV_HEAP_CACHE_AT
444 typedef struct {
445 ev_tstamp at;
446 WT w;
447 } ANHE;
448
449 #define ANHE_w(he) (he).w /* access watcher, read-write */
450 #define ANHE_at(he) (he).at /* access cached at, read-only */
451 #define ANHE_at_set(he) (he).at = (he).w->at /* update at from watcher */
452#else
453 typedef WT ANHE;
454
455 #define ANHE_w(he) (he)
456 #define ANHE_at(he) (he)->at
457 #define ANHE_at_set(he)
431#endif 458#endif
432 459
433#if EV_MULTIPLICITY 460#if EV_MULTIPLICITY
434 461
435 struct ev_loop 462 struct ev_loop
520 } 547 }
521} 548}
522 549
523/*****************************************************************************/ 550/*****************************************************************************/
524 551
552#define MALLOC_ROUND 4096 /* prefer to allocate in chunks of this size, must be 2**n and >> 4 longs */
553
525int inline_size 554int inline_size
526array_nextsize (int elem, int cur, int cnt) 555array_nextsize (int elem, int cur, int cnt)
527{ 556{
528 int ncur = cur + 1; 557 int ncur = cur + 1;
529 558
530 do 559 do
531 ncur <<= 1; 560 ncur <<= 1;
532 while (cnt > ncur); 561 while (cnt > ncur);
533 562
534 /* if size > 4096, round to 4096 - 4 * longs to accomodate malloc overhead */ 563 /* if size is large, round to MALLOC_ROUND - 4 * longs to accomodate malloc overhead */
535 if (elem * ncur > 4096) 564 if (elem * ncur > MALLOC_ROUND - sizeof (void *) * 4)
536 { 565 {
537 ncur *= elem; 566 ncur *= elem;
538 ncur = (ncur + elem + 4095 + sizeof (void *) * 4) & ~4095; 567 ncur = (ncur + elem + (MALLOC_ROUND - 1) + sizeof (void *) * 4) & ~(MALLOC_ROUND - 1);
539 ncur = ncur - sizeof (void *) * 4; 568 ncur = ncur - sizeof (void *) * 4;
540 ncur /= elem; 569 ncur /= elem;
541 } 570 }
542 571
543 return ncur; 572 return ncur;
757 } 786 }
758} 787}
759 788
760/*****************************************************************************/ 789/*****************************************************************************/
761 790
791/*
792 * the heap functions want a real array index. array index 0 uis guaranteed to not
793 * be in-use at any time. the first heap entry is at array [HEAP0]. DHEAP gives
794 * the branching factor of the d-tree.
795 */
796
797/*
798 * at the moment we allow libev the luxury of two heaps,
799 * a small-code-size 2-heap one and a ~1.5kb larger 4-heap
800 * which is more cache-efficient.
801 * the difference is about 5% with 50000+ watchers.
802 */
803#if EV_USE_4HEAP
804
805#define DHEAP 4
806#define HEAP0 (DHEAP - 1) /* index of first element in heap */
807#define HPARENT(k) ((((k) - HEAP0 - 1) / DHEAP) + HEAP0)
808
762/* towards the root */ 809/* towards the root */
763void inline_speed 810void inline_speed
764upheap (WT *heap, int k) 811upheap (ANHE *heap, int k)
765{ 812{
766 WT w = heap [k]; 813 ANHE he = heap [k];
767 814
768 for (;;) 815 for (;;)
769 { 816 {
770 int p = k >> 1; 817 int p = HPARENT (k);
771 818
772 /* maybe we could use a dummy element at heap [0]? */ 819 if (p == k || ANHE_at (heap [p]) <= ANHE_at (he))
773 if (!p || heap [p]->at <= w->at)
774 break; 820 break;
775 821
776 heap [k] = heap [p]; 822 heap [k] = heap [p];
777 ev_active (heap [k]) = k; 823 ev_active (ANHE_w (heap [k])) = k;
778 k = p; 824 k = p;
779 } 825 }
780 826
781 heap [k] = w; 827 heap [k] = he;
782 ev_active (heap [k]) = k; 828 ev_active (ANHE_w (he)) = k;
783} 829}
784 830
785/* away from the root */ 831/* away from the root */
786void inline_speed 832void inline_speed
787downheap (WT *heap, int N, int k) 833downheap (ANHE *heap, int N, int k)
788{ 834{
789 WT w = heap [k]; 835 ANHE he = heap [k];
836 ANHE *E = heap + N + HEAP0;
837
838 for (;;)
839 {
840 ev_tstamp minat;
841 ANHE *minpos;
842 ANHE *pos = heap + DHEAP * (k - HEAP0) + HEAP0;
843
844 // find minimum child
845 if (expect_true (pos + DHEAP - 1 < E))
846 {
847 /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
848 if ( ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos));
849 if ( ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos));
850 if ( ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos));
851 }
852 else if (pos < E)
853 {
854 /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
855 if (pos + 1 < E && ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos));
856 if (pos + 2 < E && ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos));
857 if (pos + 3 < E && ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos));
858 }
859 else
860 break;
861
862 if (ANHE_at (he) <= minat)
863 break;
864
865 heap [k] = *minpos;
866 ev_active (ANHE_w (*minpos)) = k;
867
868 k = minpos - heap;
869 }
870
871 heap [k] = he;
872 ev_active (ANHE_w (he)) = k;
873}
874
875#else // 4HEAP
876
877#define HEAP0 1
878#define HPARENT(k) ((k) >> 1)
879
880/* towards the root */
881void inline_speed
882upheap (ANHE *heap, int k)
883{
884 ANHE he = heap [k];
885
886 for (;;)
887 {
888 int p = HPARENT (k);
889
890 /* maybe we could use a dummy element at heap [0]? */
891 if (!p || ANHE_at (heap [p]) <= ANHE_at (he))
892 break;
893
894 heap [k] = heap [p];
895 ev_active (ANHE_w (heap [k])) = k;
896 k = p;
897 }
898
899 heap [k] = he;
900 ev_active (ANHE_w (heap [k])) = k;
901}
902
903/* away from the root */
904void inline_speed
905downheap (ANHE *heap, int N, int k)
906{
907 ANHE he = heap [k];
790 908
791 for (;;) 909 for (;;)
792 { 910 {
793 int c = k << 1; 911 int c = k << 1;
794 912
795 if (c > N) 913 if (c > N)
796 break; 914 break;
797 915
798 c += c < N && heap [c]->at > heap [c + 1]->at 916 c += c + 1 < N && ANHE_at (heap [c]) > ANHE_at (heap [c + 1])
799 ? 1 : 0; 917 ? 1 : 0;
800 918
801 if (w->at <= heap [c]->at) 919 if (ANHE_at (he) <= ANHE_at (heap [c]))
802 break; 920 break;
803 921
804 heap [k] = heap [c]; 922 heap [k] = heap [c];
805 ev_active (heap [k]) = k; 923 ev_active (ANHE_w (heap [k])) = k;
806 924
807 k = c; 925 k = c;
808 } 926 }
809 927
810 heap [k] = w; 928 heap [k] = he;
811 ev_active (heap [k]) = k; 929 ev_active (ANHE_w (he)) = k;
812} 930}
931#endif
813 932
814void inline_size 933void inline_size
815adjustheap (WT *heap, int N, int k) 934adjustheap (ANHE *heap, int N, int k)
816{ 935{
936 if (k > HEAP0 && ANHE_at (heap [HPARENT (k)]) >= ANHE_at (heap [k]))
817 upheap (heap, k); 937 upheap (heap, k);
938 else
818 downheap (heap, N, k); 939 downheap (heap, N, k);
819} 940}
820 941
821/*****************************************************************************/ 942/*****************************************************************************/
822 943
823typedef struct 944typedef struct
912pipecb (EV_P_ ev_io *iow, int revents) 1033pipecb (EV_P_ ev_io *iow, int revents)
913{ 1034{
914#if EV_USE_EVENTFD 1035#if EV_USE_EVENTFD
915 if (evfd >= 0) 1036 if (evfd >= 0)
916 { 1037 {
917 uint64_t counter = 1; 1038 uint64_t counter;
918 read (evfd, &counter, sizeof (uint64_t)); 1039 read (evfd, &counter, sizeof (uint64_t));
919 } 1040 }
920 else 1041 else
921#endif 1042#endif
922 { 1043 {
1368void 1489void
1369ev_loop_fork (EV_P) 1490ev_loop_fork (EV_P)
1370{ 1491{
1371 postfork = 1; /* must be in line with ev_default_fork */ 1492 postfork = 1; /* must be in line with ev_default_fork */
1372} 1493}
1373
1374#endif 1494#endif
1375 1495
1376#if EV_MULTIPLICITY 1496#if EV_MULTIPLICITY
1377struct ev_loop * 1497struct ev_loop *
1378ev_default_loop_init (unsigned int flags) 1498ev_default_loop_init (unsigned int flags)
1459 EV_CB_INVOKE (p->w, p->events); 1579 EV_CB_INVOKE (p->w, p->events);
1460 } 1580 }
1461 } 1581 }
1462} 1582}
1463 1583
1464void inline_size
1465timers_reify (EV_P)
1466{
1467 while (timercnt && ev_at (timers [1]) <= mn_now)
1468 {
1469 ev_timer *w = (ev_timer *)timers [1];
1470
1471 /*assert (("inactive timer on timer heap detected", ev_is_active (w)));*/
1472
1473 /* first reschedule or stop timer */
1474 if (w->repeat)
1475 {
1476 assert (("negative ev_timer repeat value found while processing timers", w->repeat > 0.));
1477
1478 ev_at (w) += w->repeat;
1479 if (ev_at (w) < mn_now)
1480 ev_at (w) = mn_now;
1481
1482 downheap (timers, timercnt, 1);
1483 }
1484 else
1485 ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */
1486
1487 ev_feed_event (EV_A_ (W)w, EV_TIMEOUT);
1488 }
1489}
1490
1491#if EV_PERIODIC_ENABLE
1492void inline_size
1493periodics_reify (EV_P)
1494{
1495 while (periodiccnt && ev_at (periodics [1]) <= ev_rt_now)
1496 {
1497 ev_periodic *w = (ev_periodic *)periodics [1];
1498
1499 /*assert (("inactive timer on periodic heap detected", ev_is_active (w)));*/
1500
1501 /* first reschedule or stop timer */
1502 if (w->reschedule_cb)
1503 {
1504 ev_at (w) = w->reschedule_cb (w, ev_rt_now + TIME_EPSILON);
1505 assert (("ev_periodic reschedule callback returned time in the past", ev_at (w) > ev_rt_now));
1506 downheap (periodics, periodiccnt, 1);
1507 }
1508 else if (w->interval)
1509 {
1510 ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
1511 if (ev_at (w) - ev_rt_now <= TIME_EPSILON) ev_at (w) += w->interval;
1512 assert (("ev_periodic timeout in the past detected while processing timers, negative interval?", ev_at (w) > ev_rt_now));
1513 downheap (periodics, periodiccnt, 1);
1514 }
1515 else
1516 ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */
1517
1518 ev_feed_event (EV_A_ (W)w, EV_PERIODIC);
1519 }
1520}
1521
1522static void noinline
1523periodics_reschedule (EV_P)
1524{
1525 int i;
1526
1527 /* adjust periodics after time jump */
1528 for (i = 1; i <= periodiccnt; ++i)
1529 {
1530 ev_periodic *w = (ev_periodic *)periodics [i];
1531
1532 if (w->reschedule_cb)
1533 ev_at (w) = w->reschedule_cb (w, ev_rt_now);
1534 else if (w->interval)
1535 ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
1536 }
1537
1538 /* now rebuild the heap */
1539 for (i = periodiccnt >> 1; i--; )
1540 downheap (periodics, periodiccnt, i);
1541}
1542#endif
1543
1544#if EV_IDLE_ENABLE 1584#if EV_IDLE_ENABLE
1545void inline_size 1585void inline_size
1546idle_reify (EV_P) 1586idle_reify (EV_P)
1547{ 1587{
1548 if (expect_false (idleall)) 1588 if (expect_false (idleall))
1559 queue_events (EV_A_ (W *)idles [pri], idlecnt [pri], EV_IDLE); 1599 queue_events (EV_A_ (W *)idles [pri], idlecnt [pri], EV_IDLE);
1560 break; 1600 break;
1561 } 1601 }
1562 } 1602 }
1563 } 1603 }
1604}
1605#endif
1606
1607void inline_size
1608timers_reify (EV_P)
1609{
1610 while (timercnt && ANHE_at (timers [HEAP0]) < mn_now)
1611 {
1612 ev_timer *w = (ev_timer *)ANHE_w (timers [HEAP0]);
1613
1614 /*assert (("inactive timer on timer heap detected", ev_is_active (w)));*/
1615
1616 /* first reschedule or stop timer */
1617 if (w->repeat)
1618 {
1619 ev_at (w) += w->repeat;
1620 if (ev_at (w) < mn_now)
1621 ev_at (w) = mn_now;
1622
1623 assert (("negative ev_timer repeat value found while processing timers", w->repeat > 0.));
1624
1625 ANHE_at_set (timers [HEAP0]);
1626 downheap (timers, timercnt, HEAP0);
1627 }
1628 else
1629 ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */
1630
1631 ev_feed_event (EV_A_ (W)w, EV_TIMEOUT);
1632 }
1633}
1634
1635#if EV_PERIODIC_ENABLE
1636void inline_size
1637periodics_reify (EV_P)
1638{
1639 while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now)
1640 {
1641 ev_periodic *w = (ev_periodic *)ANHE_w (periodics [HEAP0]);
1642
1643 /*assert (("inactive timer on periodic heap detected", ev_is_active (w)));*/
1644
1645 /* first reschedule or stop timer */
1646 if (w->reschedule_cb)
1647 {
1648 ev_at (w) = w->reschedule_cb (w, ev_rt_now);
1649
1650 assert (("ev_periodic reschedule callback returned time in the past", ev_at (w) >= ev_rt_now));
1651
1652 ANHE_at_set (periodics [HEAP0]);
1653 downheap (periodics, periodiccnt, HEAP0);
1654 }
1655 else if (w->interval)
1656 {
1657 ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
1658 /* if next trigger time is not sufficiently in the future, put it there */
1659 /* this might happen because of floating point inexactness */
1660 if (ev_at (w) - ev_rt_now < TIME_EPSILON)
1661 {
1662 ev_at (w) += w->interval;
1663
1664 /* if interval is unreasonably low we might still have a time in the past */
1665 /* so correct this. this will make the periodic very inexact, but the user */
1666 /* has effectively asked to get triggered more often than possible */
1667 if (ev_at (w) < ev_rt_now)
1668 ev_at (w) = ev_rt_now;
1669 }
1670
1671 ANHE_at_set (periodics [HEAP0]);
1672 downheap (periodics, periodiccnt, HEAP0);
1673 }
1674 else
1675 ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */
1676
1677 ev_feed_event (EV_A_ (W)w, EV_PERIODIC);
1678 }
1679}
1680
1681static void noinline
1682periodics_reschedule (EV_P)
1683{
1684 int i;
1685
1686 /* adjust periodics after time jump */
1687 for (i = HEAP0; i < periodiccnt + HEAP0; ++i)
1688 {
1689 ev_periodic *w = (ev_periodic *)ANHE_w (periodics [i]);
1690
1691 if (w->reschedule_cb)
1692 ev_at (w) = w->reschedule_cb (w, ev_rt_now);
1693 else if (w->interval)
1694 ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
1695
1696 ANHE_at_set (periodics [i]);
1697 }
1698
1699 /* we don't use floyds algorithm, uphead is simpler and is more cache-efficient */
1700 /* also, this is easy and corretc for both 2-heaps and 4-heaps */
1701 for (i = 0; i < periodiccnt; ++i)
1702 upheap (periodics, i + HEAP0);
1564} 1703}
1565#endif 1704#endif
1566 1705
1567void inline_speed 1706void inline_speed
1568time_update (EV_P_ ev_tstamp max_block) 1707time_update (EV_P_ ev_tstamp max_block)
1597 */ 1736 */
1598 for (i = 4; --i; ) 1737 for (i = 4; --i; )
1599 { 1738 {
1600 rtmn_diff = ev_rt_now - mn_now; 1739 rtmn_diff = ev_rt_now - mn_now;
1601 1740
1602 if (fabs (odiff - rtmn_diff) < MIN_TIMEJUMP) 1741 if (expect_true (fabs (odiff - rtmn_diff) < MIN_TIMEJUMP))
1603 return; /* all is well */ 1742 return; /* all is well */
1604 1743
1605 ev_rt_now = ev_time (); 1744 ev_rt_now = ev_time ();
1606 mn_now = get_clock (); 1745 mn_now = get_clock ();
1607 now_floor = mn_now; 1746 now_floor = mn_now;
1622 { 1761 {
1623#if EV_PERIODIC_ENABLE 1762#if EV_PERIODIC_ENABLE
1624 periodics_reschedule (EV_A); 1763 periodics_reschedule (EV_A);
1625#endif 1764#endif
1626 /* adjust timers. this is easy, as the offset is the same for all of them */ 1765 /* adjust timers. this is easy, as the offset is the same for all of them */
1627 for (i = 1; i <= timercnt; ++i) 1766 for (i = 0; i < timercnt; ++i)
1628 ev_at (timers [i]) += ev_rt_now - mn_now; 1767 {
1768 ANHE *he = timers + i + HEAP0;
1769 ANHE_w (*he)->at += ev_rt_now - mn_now;
1770 ANHE_at_set (*he);
1771 }
1629 } 1772 }
1630 1773
1631 mn_now = ev_rt_now; 1774 mn_now = ev_rt_now;
1632 } 1775 }
1633} 1776}
1703 1846
1704 waittime = MAX_BLOCKTIME; 1847 waittime = MAX_BLOCKTIME;
1705 1848
1706 if (timercnt) 1849 if (timercnt)
1707 { 1850 {
1708 ev_tstamp to = ev_at (timers [1]) - mn_now + backend_fudge; 1851 ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now + backend_fudge;
1709 if (waittime > to) waittime = to; 1852 if (waittime > to) waittime = to;
1710 } 1853 }
1711 1854
1712#if EV_PERIODIC_ENABLE 1855#if EV_PERIODIC_ENABLE
1713 if (periodiccnt) 1856 if (periodiccnt)
1714 { 1857 {
1715 ev_tstamp to = ev_at (periodics [1]) - ev_rt_now + backend_fudge; 1858 ev_tstamp to = ANHE_at (periodics [HEAP0]) - ev_rt_now + backend_fudge;
1716 if (waittime > to) waittime = to; 1859 if (waittime > to) waittime = to;
1717 } 1860 }
1718#endif 1861#endif
1719 1862
1720 if (expect_false (waittime < timeout_blocktime)) 1863 if (expect_false (waittime < timeout_blocktime))
1872{ 2015{
1873 clear_pending (EV_A_ (W)w); 2016 clear_pending (EV_A_ (W)w);
1874 if (expect_false (!ev_is_active (w))) 2017 if (expect_false (!ev_is_active (w)))
1875 return; 2018 return;
1876 2019
1877 assert (("ev_io_start called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax)); 2020 assert (("ev_io_stop called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax));
1878 2021
1879 wlist_del (&anfds[w->fd].head, (WL)w); 2022 wlist_del (&anfds[w->fd].head, (WL)w);
1880 ev_stop (EV_A_ (W)w); 2023 ev_stop (EV_A_ (W)w);
1881 2024
1882 fd_change (EV_A_ w->fd, 1); 2025 fd_change (EV_A_ w->fd, 1);
1890 2033
1891 ev_at (w) += mn_now; 2034 ev_at (w) += mn_now;
1892 2035
1893 assert (("ev_timer_start called with negative timer repeat value", w->repeat >= 0.)); 2036 assert (("ev_timer_start called with negative timer repeat value", w->repeat >= 0.));
1894 2037
1895 ev_start (EV_A_ (W)w, ++timercnt); 2038 ev_start (EV_A_ (W)w, ++timercnt + HEAP0 - 1);
1896 array_needsize (WT, timers, timermax, timercnt + 1, EMPTY2); 2039 array_needsize (ANHE, timers, timermax, ev_active (w) + 1, EMPTY2);
1897 timers [timercnt] = (WT)w; 2040 ANHE_w (timers [ev_active (w)]) = (WT)w;
2041 ANHE_at_set (timers [ev_active (w)]);
1898 upheap (timers, timercnt); 2042 upheap (timers, ev_active (w));
1899 2043
1900 /*assert (("internal timer heap corruption", timers [ev_active (w)] == w));*/ 2044 /*assert (("internal timer heap corruption", timers [ev_active (w)] == (WT)w));*/
1901} 2045}
1902 2046
1903void noinline 2047void noinline
1904ev_timer_stop (EV_P_ ev_timer *w) 2048ev_timer_stop (EV_P_ ev_timer *w)
1905{ 2049{
1908 return; 2052 return;
1909 2053
1910 { 2054 {
1911 int active = ev_active (w); 2055 int active = ev_active (w);
1912 2056
1913 assert (("internal timer heap corruption", timers [active] == (WT)w)); 2057 assert (("internal timer heap corruption", ANHE_w (timers [active]) == (WT)w));
1914 2058
1915 if (expect_true (active < timercnt)) 2059 if (expect_true (active < timercnt + HEAP0 - 1))
1916 { 2060 {
1917 timers [active] = timers [timercnt]; 2061 timers [active] = timers [timercnt + HEAP0 - 1];
1918 adjustheap (timers, timercnt, active); 2062 adjustheap (timers, timercnt, active);
1919 } 2063 }
1920 2064
1921 --timercnt; 2065 --timercnt;
1922 } 2066 }
1932 if (ev_is_active (w)) 2076 if (ev_is_active (w))
1933 { 2077 {
1934 if (w->repeat) 2078 if (w->repeat)
1935 { 2079 {
1936 ev_at (w) = mn_now + w->repeat; 2080 ev_at (w) = mn_now + w->repeat;
2081 ANHE_at_set (timers [ev_active (w)]);
1937 adjustheap (timers, timercnt, ev_active (w)); 2082 adjustheap (timers, timercnt, ev_active (w));
1938 } 2083 }
1939 else 2084 else
1940 ev_timer_stop (EV_A_ w); 2085 ev_timer_stop (EV_A_ w);
1941 } 2086 }
1962 ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval; 2107 ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
1963 } 2108 }
1964 else 2109 else
1965 ev_at (w) = w->offset; 2110 ev_at (w) = w->offset;
1966 2111
1967 ev_start (EV_A_ (W)w, ++periodiccnt); 2112 ev_start (EV_A_ (W)w, ++periodiccnt + HEAP0 - 1);
1968 array_needsize (WT, periodics, periodicmax, periodiccnt + 1, EMPTY2); 2113 array_needsize (ANHE, periodics, periodicmax, ev_active (w) + 1, EMPTY2);
1969 periodics [periodiccnt] = (WT)w; 2114 ANHE_w (periodics [ev_active (w)]) = (WT)w;
1970 upheap (periodics, periodiccnt); 2115 ANHE_at_set (periodics [ev_active (w)]);
2116 upheap (periodics, ev_active (w));
1971 2117
1972 /*assert (("internal periodic heap corruption", periodics [ev_active (w)] == w));*/ 2118 /*assert (("internal periodic heap corruption", ANHE_w (periodics [ev_active (w)]) == (WT)w));*/
1973} 2119}
1974 2120
1975void noinline 2121void noinline
1976ev_periodic_stop (EV_P_ ev_periodic *w) 2122ev_periodic_stop (EV_P_ ev_periodic *w)
1977{ 2123{
1980 return; 2126 return;
1981 2127
1982 { 2128 {
1983 int active = ev_active (w); 2129 int active = ev_active (w);
1984 2130
1985 assert (("internal periodic heap corruption", periodics [active] == (WT)w)); 2131 assert (("internal periodic heap corruption", ANHE_w (periodics [active]) == (WT)w));
1986 2132
1987 if (expect_true (active < periodiccnt)) 2133 if (expect_true (active < periodiccnt + HEAP0 - 1))
1988 { 2134 {
1989 periodics [active] = periodics [periodiccnt]; 2135 periodics [active] = periodics [periodiccnt + HEAP0 - 1];
1990 adjustheap (periodics, periodiccnt, active); 2136 adjustheap (periodics, periodiccnt, active);
1991 } 2137 }
1992 2138
1993 --periodiccnt; 2139 --periodiccnt;
1994 } 2140 }
2114 if (w->wd < 0) 2260 if (w->wd < 0)
2115 { 2261 {
2116 ev_timer_start (EV_A_ &w->timer); /* this is not race-free, so we still need to recheck periodically */ 2262 ev_timer_start (EV_A_ &w->timer); /* this is not race-free, so we still need to recheck periodically */
2117 2263
2118 /* monitor some parent directory for speedup hints */ 2264 /* monitor some parent directory for speedup hints */
2265 /* note that exceeding the hardcoded limit is not a correctness issue, */
2266 /* but an efficiency issue only */
2119 if ((errno == ENOENT || errno == EACCES) && strlen (w->path) < 4096) 2267 if ((errno == ENOENT || errno == EACCES) && strlen (w->path) < 4096)
2120 { 2268 {
2121 char path [4096]; 2269 char path [4096];
2122 strcpy (path, w->path); 2270 strcpy (path, w->path);
2123 2271

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines