ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev.c
(Generate patch)

Comparing libev/ev.c (file contents):
Revision 1.228 by root, Fri May 2 08:07:37 2008 UTC vs.
Revision 1.246 by root, Wed May 21 12:51:38 2008 UTC

235# else 235# else
236# define EV_USE_EVENTFD 0 236# define EV_USE_EVENTFD 0
237# endif 237# endif
238#endif 238#endif
239 239
240#ifndef EV_USE_4HEAP
241# define EV_USE_4HEAP !EV_MINIMAL
242#endif
243
244#ifndef EV_HEAP_CACHE_AT
245# define EV_HEAP_CACHE_AT !EV_MINIMAL
246#endif
247
240/* this block fixes any misconfiguration where we know we run into trouble otherwise */ 248/* this block fixes any misconfiguration where we know we run into trouble otherwise */
241 249
242#ifndef CLOCK_MONOTONIC 250#ifndef CLOCK_MONOTONIC
243# undef EV_USE_MONOTONIC 251# undef EV_USE_MONOTONIC
244# define EV_USE_MONOTONIC 0 252# define EV_USE_MONOTONIC 0
325 333
326typedef ev_watcher *W; 334typedef ev_watcher *W;
327typedef ev_watcher_list *WL; 335typedef ev_watcher_list *WL;
328typedef ev_watcher_time *WT; 336typedef ev_watcher_time *WT;
329 337
338#define ev_active(w) ((W)(w))->active
330#define ev_at(w) ((WT)(w))->at 339#define ev_at(w) ((WT)(w))->at
331 340
332#if EV_USE_MONOTONIC 341#if EV_USE_MONOTONIC
333/* sig_atomic_t is used to avoid per-thread variables or locking but still */ 342/* sig_atomic_t is used to avoid per-thread variables or locking but still */
334/* giving it a reasonably high chance of working on typical architetcures */ 343/* giving it a reasonably high chance of working on typical architetcures */
421 W w; 430 W w;
422 int events; 431 int events;
423} ANPENDING; 432} ANPENDING;
424 433
425#if EV_USE_INOTIFY 434#if EV_USE_INOTIFY
435/* hash table entry per inotify-id */
426typedef struct 436typedef struct
427{ 437{
428 WL head; 438 WL head;
429} ANFS; 439} ANFS;
440#endif
441
442/* Heap Entry */
443#if EV_HEAP_CACHE_AT
444 typedef struct {
445 ev_tstamp at;
446 WT w;
447 } ANHE;
448
449 #define ANHE_w(he) (he).w /* access watcher, read-write */
450 #define ANHE_at(he) (he).at /* access cached at, read-only */
451 #define ANHE_at_set(he) (he).at = (he).w->at /* update at from watcher */
452#else
453 typedef WT ANHE;
454
455 #define ANHE_w(he) (he)
456 #define ANHE_at(he) (he)->at
457 #define ANHE_at_set(he)
430#endif 458#endif
431 459
432#if EV_MULTIPLICITY 460#if EV_MULTIPLICITY
433 461
434 struct ev_loop 462 struct ev_loop
519 } 547 }
520} 548}
521 549
522/*****************************************************************************/ 550/*****************************************************************************/
523 551
552#define MALLOC_ROUND 4096 /* prefer to allocate in chunks of this size, must be 2**n and >> 4 longs */
553
524int inline_size 554int inline_size
525array_nextsize (int elem, int cur, int cnt) 555array_nextsize (int elem, int cur, int cnt)
526{ 556{
527 int ncur = cur + 1; 557 int ncur = cur + 1;
528 558
529 do 559 do
530 ncur <<= 1; 560 ncur <<= 1;
531 while (cnt > ncur); 561 while (cnt > ncur);
532 562
533 /* if size > 4096, round to 4096 - 4 * longs to accomodate malloc overhead */ 563 /* if size is large, round to MALLOC_ROUND - 4 * longs to accomodate malloc overhead */
534 if (elem * ncur > 4096) 564 if (elem * ncur > MALLOC_ROUND - sizeof (void *) * 4)
535 { 565 {
536 ncur *= elem; 566 ncur *= elem;
537 ncur = (ncur + elem + 4095 + sizeof (void *) * 4) & ~4095; 567 ncur = (ncur + elem + (MALLOC_ROUND - 1) + sizeof (void *) * 4) & ~(MALLOC_ROUND - 1);
538 ncur = ncur - sizeof (void *) * 4; 568 ncur = ncur - sizeof (void *) * 4;
539 ncur /= elem; 569 ncur /= elem;
540 } 570 }
541 571
542 return ncur; 572 return ncur;
756 } 786 }
757} 787}
758 788
759/*****************************************************************************/ 789/*****************************************************************************/
760 790
791/*
792 * the heap functions want a real array index. array index 0 uis guaranteed to not
793 * be in-use at any time. the first heap entry is at array [HEAP0]. DHEAP gives
794 * the branching factor of the d-tree.
795 */
796
797/*
798 * at the moment we allow libev the luxury of two heaps,
799 * a small-code-size 2-heap one and a ~1.5kb larger 4-heap
800 * which is more cache-efficient.
801 * the difference is about 5% with 50000+ watchers.
802 */
803#if EV_USE_4HEAP
804
805#define DHEAP 4
806#define HEAP0 (DHEAP - 1) /* index of first element in heap */
807
761/* towards the root */ 808/* towards the root */
762void inline_speed 809void inline_speed
763upheap (WT *heap, int k) 810upheap (ANHE *heap, int k)
764{ 811{
765 WT w = heap [k]; 812 ANHE he = heap [k];
766 813
767 for (;;) 814 for (;;)
768 { 815 {
769 int p = k >> 1; 816 int p = ((k - HEAP0 - 1) / DHEAP) + HEAP0;
770 817
771 /* maybe we could use a dummy element at heap [0]? */ 818 if (p == k || ANHE_at (heap [p]) <= ANHE_at (he))
772 if (!p || heap [p]->at <= w->at)
773 break; 819 break;
774 820
775 heap [k] = heap [p]; 821 heap [k] = heap [p];
776 ((W)heap [k])->active = k; 822 ev_active (ANHE_w (heap [k])) = k;
777 k = p; 823 k = p;
778 } 824 }
779 825
826 ev_active (ANHE_w (he)) = k;
780 heap [k] = w; 827 heap [k] = he;
781 ((W)heap [k])->active = k;
782} 828}
783 829
784/* away from the root */ 830/* away from the root */
785void inline_speed 831void inline_speed
786downheap (WT *heap, int N, int k) 832downheap (ANHE *heap, int N, int k)
787{ 833{
788 WT w = heap [k]; 834 ANHE he = heap [k];
835 ANHE *E = heap + N + HEAP0;
836
837 for (;;)
838 {
839 ev_tstamp minat;
840 ANHE *minpos;
841 ANHE *pos = heap + DHEAP * (k - HEAP0) + HEAP0;
842
843 // find minimum child
844 if (expect_true (pos + DHEAP - 1 < E))
845 {
846 /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
847 if ( ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos));
848 if ( ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos));
849 if ( ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos));
850 }
851 else if (pos < E)
852 {
853 /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
854 if (pos + 1 < E && ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos));
855 if (pos + 2 < E && ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos));
856 if (pos + 3 < E && ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos));
857 }
858 else
859 break;
860
861 if (ANHE_at (he) <= minat)
862 break;
863
864 ev_active (ANHE_w (*minpos)) = k;
865 heap [k] = *minpos;
866
867 k = minpos - heap;
868 }
869
870 ev_active (ANHE_w (he)) = k;
871 heap [k] = he;
872}
873
874#else // 4HEAP
875
876#define HEAP0 1
877
878/* towards the root */
879void inline_speed
880upheap (ANHE *heap, int k)
881{
882 ANHE he = heap [k];
883
884 for (;;)
885 {
886 int p = k >> 1;
887
888 /* maybe we could use a dummy element at heap [0]? */
889 if (!p || ANHE_at (heap [p]) <= ANHE_at (he))
890 break;
891
892 heap [k] = heap [p];
893 ev_active (ANHE_w (heap [k])) = k;
894 k = p;
895 }
896
897 heap [k] = he;
898 ev_active (ANHE_w (heap [k])) = k;
899}
900
901/* away from the root */
902void inline_speed
903downheap (ANHE *heap, int N, int k)
904{
905 ANHE he = heap [k];
789 906
790 for (;;) 907 for (;;)
791 { 908 {
792 int c = k << 1; 909 int c = k << 1;
793 910
794 if (c > N) 911 if (c > N)
795 break; 912 break;
796 913
797 c += c < N && heap [c]->at > heap [c + 1]->at 914 c += c + 1 < N && ANHE_at (heap [c]) > ANHE_at (heap [c + 1])
798 ? 1 : 0; 915 ? 1 : 0;
799 916
800 if (w->at <= heap [c]->at) 917 if (ANHE_at (he) <= ANHE_at (heap [c]))
801 break; 918 break;
802 919
803 heap [k] = heap [c]; 920 heap [k] = heap [c];
804 ((W)heap [k])->active = k; 921 ev_active (ANHE_w (heap [k])) = k;
805 922
806 k = c; 923 k = c;
807 } 924 }
808 925
809 heap [k] = w; 926 heap [k] = he;
810 ((W)heap [k])->active = k; 927 ev_active (ANHE_w (he)) = k;
811} 928}
929#endif
812 930
813void inline_size 931void inline_size
814adjustheap (WT *heap, int N, int k) 932adjustheap (ANHE *heap, int N, int k)
815{ 933{
816 upheap (heap, k); 934 upheap (heap, k);
817 downheap (heap, N, k); 935 downheap (heap, N, k);
818} 936}
819 937
911pipecb (EV_P_ ev_io *iow, int revents) 1029pipecb (EV_P_ ev_io *iow, int revents)
912{ 1030{
913#if EV_USE_EVENTFD 1031#if EV_USE_EVENTFD
914 if (evfd >= 0) 1032 if (evfd >= 0)
915 { 1033 {
916 uint64_t counter = 1; 1034 uint64_t counter;
917 read (evfd, &counter, sizeof (uint64_t)); 1035 read (evfd, &counter, sizeof (uint64_t));
918 } 1036 }
919 else 1037 else
920#endif 1038#endif
921 { 1039 {
1367void 1485void
1368ev_loop_fork (EV_P) 1486ev_loop_fork (EV_P)
1369{ 1487{
1370 postfork = 1; /* must be in line with ev_default_fork */ 1488 postfork = 1; /* must be in line with ev_default_fork */
1371} 1489}
1372
1373#endif 1490#endif
1374 1491
1375#if EV_MULTIPLICITY 1492#if EV_MULTIPLICITY
1376struct ev_loop * 1493struct ev_loop *
1377ev_default_loop_init (unsigned int flags) 1494ev_default_loop_init (unsigned int flags)
1458 EV_CB_INVOKE (p->w, p->events); 1575 EV_CB_INVOKE (p->w, p->events);
1459 } 1576 }
1460 } 1577 }
1461} 1578}
1462 1579
1463void inline_size
1464timers_reify (EV_P)
1465{
1466 while (timercnt && ev_at (timers [1]) <= mn_now)
1467 {
1468 ev_timer *w = (ev_timer *)timers [1];
1469
1470 /*assert (("inactive timer on timer heap detected", ev_is_active (w)));*/
1471
1472 /* first reschedule or stop timer */
1473 if (w->repeat)
1474 {
1475 assert (("negative ev_timer repeat value found while processing timers", w->repeat > 0.));
1476
1477 ev_at (w) += w->repeat;
1478 if (ev_at (w) < mn_now)
1479 ev_at (w) = mn_now;
1480
1481 downheap (timers, timercnt, 1);
1482 }
1483 else
1484 ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */
1485
1486 ev_feed_event (EV_A_ (W)w, EV_TIMEOUT);
1487 }
1488}
1489
1490#if EV_PERIODIC_ENABLE
1491void inline_size
1492periodics_reify (EV_P)
1493{
1494 while (periodiccnt && ev_at (periodics [1]) <= ev_rt_now)
1495 {
1496 ev_periodic *w = (ev_periodic *)periodics [1];
1497
1498 /*assert (("inactive timer on periodic heap detected", ev_is_active (w)));*/
1499
1500 /* first reschedule or stop timer */
1501 if (w->reschedule_cb)
1502 {
1503 ev_at (w) = w->reschedule_cb (w, ev_rt_now + TIME_EPSILON);
1504 assert (("ev_periodic reschedule callback returned time in the past", ev_at (w) > ev_rt_now));
1505 downheap (periodics, periodiccnt, 1);
1506 }
1507 else if (w->interval)
1508 {
1509 ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
1510 if (ev_at (w) - ev_rt_now <= TIME_EPSILON) ev_at (w) += w->interval;
1511 assert (("ev_periodic timeout in the past detected while processing timers, negative interval?", ev_at (w) > ev_rt_now));
1512 downheap (periodics, periodiccnt, 1);
1513 }
1514 else
1515 ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */
1516
1517 ev_feed_event (EV_A_ (W)w, EV_PERIODIC);
1518 }
1519}
1520
1521static void noinline
1522periodics_reschedule (EV_P)
1523{
1524 int i;
1525
1526 /* adjust periodics after time jump */
1527 for (i = 0; i < periodiccnt; ++i)
1528 {
1529 ev_periodic *w = (ev_periodic *)periodics [i];
1530
1531 if (w->reschedule_cb)
1532 ev_at (w) = w->reschedule_cb (w, ev_rt_now);
1533 else if (w->interval)
1534 ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
1535 }
1536
1537 /* now rebuild the heap */
1538 for (i = periodiccnt >> 1; i--; )
1539 downheap (periodics, periodiccnt, i);
1540}
1541#endif
1542
1543#if EV_IDLE_ENABLE 1580#if EV_IDLE_ENABLE
1544void inline_size 1581void inline_size
1545idle_reify (EV_P) 1582idle_reify (EV_P)
1546{ 1583{
1547 if (expect_false (idleall)) 1584 if (expect_false (idleall))
1558 queue_events (EV_A_ (W *)idles [pri], idlecnt [pri], EV_IDLE); 1595 queue_events (EV_A_ (W *)idles [pri], idlecnt [pri], EV_IDLE);
1559 break; 1596 break;
1560 } 1597 }
1561 } 1598 }
1562 } 1599 }
1600}
1601#endif
1602
1603void inline_size
1604timers_reify (EV_P)
1605{
1606 while (timercnt && ANHE_at (timers [HEAP0]) < mn_now)
1607 {
1608 ev_timer *w = (ev_timer *)ANHE_w (timers [HEAP0]);
1609
1610 /*assert (("inactive timer on timer heap detected", ev_is_active (w)));*/
1611
1612 /* first reschedule or stop timer */
1613 if (w->repeat)
1614 {
1615 ev_at (w) += w->repeat;
1616 if (ev_at (w) < mn_now)
1617 ev_at (w) = mn_now;
1618
1619 assert (("negative ev_timer repeat value found while processing timers", w->repeat > 0.));
1620
1621 ANHE_at_set (timers [HEAP0]);
1622 downheap (timers, timercnt, HEAP0);
1623 }
1624 else
1625 ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */
1626
1627 ev_feed_event (EV_A_ (W)w, EV_TIMEOUT);
1628 }
1629}
1630
1631#if EV_PERIODIC_ENABLE
1632void inline_size
1633periodics_reify (EV_P)
1634{
1635 while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now)
1636 {
1637 ev_periodic *w = (ev_periodic *)ANHE_w (periodics [HEAP0]);
1638
1639 /*assert (("inactive timer on periodic heap detected", ev_is_active (w)));*/
1640
1641 /* first reschedule or stop timer */
1642 if (w->reschedule_cb)
1643 {
1644 ev_at (w) = w->reschedule_cb (w, ev_rt_now);
1645
1646 assert (("ev_periodic reschedule callback returned time in the past", ev_at (w) >= ev_rt_now));
1647
1648 ANHE_at_set (periodics [HEAP0]);
1649 downheap (periodics, periodiccnt, HEAP0);
1650 }
1651 else if (w->interval)
1652 {
1653 ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
1654 /* if next trigger time is not sufficiently in the future, put it there */
1655 /* this might happen because of floating point inexactness */
1656 if (ev_at (w) - ev_rt_now < TIME_EPSILON)
1657 {
1658 ev_at (w) += w->interval;
1659
1660 /* if interval is unreasonably low we might still have a time in the past */
1661 /* so correct this. this will make the periodic very inexact, but the user */
1662 /* has effectively asked to get triggered more often than possible */
1663 if (ev_at (w) < ev_rt_now)
1664 ev_at (w) = ev_rt_now;
1665 }
1666
1667 ANHE_at_set (periodics [HEAP0]);
1668 downheap (periodics, periodiccnt, HEAP0);
1669 }
1670 else
1671 ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */
1672
1673 ev_feed_event (EV_A_ (W)w, EV_PERIODIC);
1674 }
1675}
1676
1677static void noinline
1678periodics_reschedule (EV_P)
1679{
1680 int i;
1681
1682 /* adjust periodics after time jump */
1683 for (i = HEAP0; i < periodiccnt + HEAP0; ++i)
1684 {
1685 ev_periodic *w = (ev_periodic *)ANHE_w (periodics [i]);
1686
1687 if (w->reschedule_cb)
1688 ev_at (w) = w->reschedule_cb (w, ev_rt_now);
1689 else if (w->interval)
1690 ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
1691
1692 ANHE_at_set (periodics [i]);
1693 }
1694
1695 /* we don't use floyds algorithm, uphead is simpler and is more cache-efficient */
1696 /* also, this is easy and corretc for both 2-heaps and 4-heaps */
1697 for (i = 0; i < periodiccnt; ++i)
1698 upheap (periodics, i + HEAP0);
1563} 1699}
1564#endif 1700#endif
1565 1701
1566void inline_speed 1702void inline_speed
1567time_update (EV_P_ ev_tstamp max_block) 1703time_update (EV_P_ ev_tstamp max_block)
1596 */ 1732 */
1597 for (i = 4; --i; ) 1733 for (i = 4; --i; )
1598 { 1734 {
1599 rtmn_diff = ev_rt_now - mn_now; 1735 rtmn_diff = ev_rt_now - mn_now;
1600 1736
1601 if (fabs (odiff - rtmn_diff) < MIN_TIMEJUMP) 1737 if (expect_true (fabs (odiff - rtmn_diff) < MIN_TIMEJUMP))
1602 return; /* all is well */ 1738 return; /* all is well */
1603 1739
1604 ev_rt_now = ev_time (); 1740 ev_rt_now = ev_time ();
1605 mn_now = get_clock (); 1741 mn_now = get_clock ();
1606 now_floor = mn_now; 1742 now_floor = mn_now;
1621 { 1757 {
1622#if EV_PERIODIC_ENABLE 1758#if EV_PERIODIC_ENABLE
1623 periodics_reschedule (EV_A); 1759 periodics_reschedule (EV_A);
1624#endif 1760#endif
1625 /* adjust timers. this is easy, as the offset is the same for all of them */ 1761 /* adjust timers. this is easy, as the offset is the same for all of them */
1626 for (i = 1; i <= timercnt; ++i) 1762 for (i = 0; i < timercnt; ++i)
1627 ev_at (timers [i]) += ev_rt_now - mn_now; 1763 {
1764 ANHE *he = timers + i + HEAP0;
1765 ANHE_w (*he)->at += ev_rt_now - mn_now;
1766 ANHE_at_set (*he);
1767 }
1628 } 1768 }
1629 1769
1630 mn_now = ev_rt_now; 1770 mn_now = ev_rt_now;
1631 } 1771 }
1632} 1772}
1702 1842
1703 waittime = MAX_BLOCKTIME; 1843 waittime = MAX_BLOCKTIME;
1704 1844
1705 if (timercnt) 1845 if (timercnt)
1706 { 1846 {
1707 ev_tstamp to = ev_at (timers [1]) - mn_now + backend_fudge; 1847 ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now + backend_fudge;
1708 if (waittime > to) waittime = to; 1848 if (waittime > to) waittime = to;
1709 } 1849 }
1710 1850
1711#if EV_PERIODIC_ENABLE 1851#if EV_PERIODIC_ENABLE
1712 if (periodiccnt) 1852 if (periodiccnt)
1713 { 1853 {
1714 ev_tstamp to = ev_at (periodics [1]) - ev_rt_now + backend_fudge; 1854 ev_tstamp to = ANHE_at (periodics [HEAP0]) - ev_rt_now + backend_fudge;
1715 if (waittime > to) waittime = to; 1855 if (waittime > to) waittime = to;
1716 } 1856 }
1717#endif 1857#endif
1718 1858
1719 if (expect_false (waittime < timeout_blocktime)) 1859 if (expect_false (waittime < timeout_blocktime))
1871{ 2011{
1872 clear_pending (EV_A_ (W)w); 2012 clear_pending (EV_A_ (W)w);
1873 if (expect_false (!ev_is_active (w))) 2013 if (expect_false (!ev_is_active (w)))
1874 return; 2014 return;
1875 2015
1876 assert (("ev_io_start called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax)); 2016 assert (("ev_io_stop called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax));
1877 2017
1878 wlist_del (&anfds[w->fd].head, (WL)w); 2018 wlist_del (&anfds[w->fd].head, (WL)w);
1879 ev_stop (EV_A_ (W)w); 2019 ev_stop (EV_A_ (W)w);
1880 2020
1881 fd_change (EV_A_ w->fd, 1); 2021 fd_change (EV_A_ w->fd, 1);
1889 2029
1890 ev_at (w) += mn_now; 2030 ev_at (w) += mn_now;
1891 2031
1892 assert (("ev_timer_start called with negative timer repeat value", w->repeat >= 0.)); 2032 assert (("ev_timer_start called with negative timer repeat value", w->repeat >= 0.));
1893 2033
1894 ev_start (EV_A_ (W)w, ++timercnt); 2034 ev_start (EV_A_ (W)w, ++timercnt + HEAP0 - 1);
1895 array_needsize (WT, timers, timermax, timercnt + 1, EMPTY2); 2035 array_needsize (ANHE, timers, timermax, ev_active (w) + 1, EMPTY2);
1896 timers [timercnt] = (WT)w; 2036 ANHE_w (timers [ev_active (w)]) = (WT)w;
2037 ANHE_at_set (timers [ev_active (w)]);
1897 upheap (timers, timercnt); 2038 upheap (timers, ev_active (w));
1898 2039
1899 /*assert (("internal timer heap corruption", timers [((W)w)->active] == w));*/ 2040 /*assert (("internal timer heap corruption", timers [ev_active (w)] == (WT)w));*/
1900} 2041}
1901 2042
1902void noinline 2043void noinline
1903ev_timer_stop (EV_P_ ev_timer *w) 2044ev_timer_stop (EV_P_ ev_timer *w)
1904{ 2045{
1905 clear_pending (EV_A_ (W)w); 2046 clear_pending (EV_A_ (W)w);
1906 if (expect_false (!ev_is_active (w))) 2047 if (expect_false (!ev_is_active (w)))
1907 return; 2048 return;
1908 2049
1909 assert (("internal timer heap corruption", timers [((W)w)->active] == (WT)w));
1910
1911 { 2050 {
1912 int active = ((W)w)->active; 2051 int active = ev_active (w);
1913 2052
2053 assert (("internal timer heap corruption", ANHE_w (timers [active]) == (WT)w));
2054
1914 if (expect_true (active < timercnt)) 2055 if (expect_true (active < timercnt + HEAP0 - 1))
1915 { 2056 {
1916 timers [active] = timers [timercnt]; 2057 timers [active] = timers [timercnt + HEAP0 - 1];
1917 adjustheap (timers, timercnt, active); 2058 adjustheap (timers, timercnt, active);
1918 } 2059 }
1919 2060
1920 --timercnt; 2061 --timercnt;
1921 } 2062 }
1931 if (ev_is_active (w)) 2072 if (ev_is_active (w))
1932 { 2073 {
1933 if (w->repeat) 2074 if (w->repeat)
1934 { 2075 {
1935 ev_at (w) = mn_now + w->repeat; 2076 ev_at (w) = mn_now + w->repeat;
2077 ANHE_at_set (timers [ev_active (w)]);
1936 adjustheap (timers, timercnt, ((W)w)->active); 2078 adjustheap (timers, timercnt, ev_active (w));
1937 } 2079 }
1938 else 2080 else
1939 ev_timer_stop (EV_A_ w); 2081 ev_timer_stop (EV_A_ w);
1940 } 2082 }
1941 else if (w->repeat) 2083 else if (w->repeat)
1942 { 2084 {
1943 w->at = w->repeat; 2085 ev_at (w) = w->repeat;
1944 ev_timer_start (EV_A_ w); 2086 ev_timer_start (EV_A_ w);
1945 } 2087 }
1946} 2088}
1947 2089
1948#if EV_PERIODIC_ENABLE 2090#if EV_PERIODIC_ENABLE
1961 ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval; 2103 ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
1962 } 2104 }
1963 else 2105 else
1964 ev_at (w) = w->offset; 2106 ev_at (w) = w->offset;
1965 2107
1966 ev_start (EV_A_ (W)w, ++periodiccnt); 2108 ev_start (EV_A_ (W)w, ++periodiccnt + HEAP0 - 1);
1967 array_needsize (WT, periodics, periodicmax, periodiccnt + 1, EMPTY2); 2109 array_needsize (ANHE, periodics, periodicmax, ev_active (w) + 1, EMPTY2);
1968 periodics [periodiccnt] = (WT)w; 2110 ANHE_w (periodics [ev_active (w)]) = (WT)w;
1969 upheap (periodics, periodiccnt); 2111 ANHE_at_set (periodics [ev_active (w)]);
2112 upheap (periodics, ev_active (w));
1970 2113
1971 /*assert (("internal periodic heap corruption", periodics [((W)w)->active - 1] == w));*/ 2114 /*assert (("internal periodic heap corruption", ANHE_w (periodics [ev_active (w)]) == (WT)w));*/
1972} 2115}
1973 2116
1974void noinline 2117void noinline
1975ev_periodic_stop (EV_P_ ev_periodic *w) 2118ev_periodic_stop (EV_P_ ev_periodic *w)
1976{ 2119{
1977 clear_pending (EV_A_ (W)w); 2120 clear_pending (EV_A_ (W)w);
1978 if (expect_false (!ev_is_active (w))) 2121 if (expect_false (!ev_is_active (w)))
1979 return; 2122 return;
1980 2123
1981 assert (("internal periodic heap corruption", periodics [((W)w)->active] == (WT)w));
1982
1983 { 2124 {
1984 int active = ((W)w)->active; 2125 int active = ev_active (w);
1985 2126
2127 assert (("internal periodic heap corruption", ANHE_w (periodics [active]) == (WT)w));
2128
1986 if (expect_true (active < periodiccnt)) 2129 if (expect_true (active < periodiccnt + HEAP0 - 1))
1987 { 2130 {
1988 periodics [active] = periodics [periodiccnt]; 2131 periodics [active] = periodics [periodiccnt + HEAP0 - 1];
1989 adjustheap (periodics, periodiccnt, active); 2132 adjustheap (periodics, periodiccnt, active);
1990 } 2133 }
1991 2134
1992 --periodiccnt; 2135 --periodiccnt;
1993 } 2136 }
2113 if (w->wd < 0) 2256 if (w->wd < 0)
2114 { 2257 {
2115 ev_timer_start (EV_A_ &w->timer); /* this is not race-free, so we still need to recheck periodically */ 2258 ev_timer_start (EV_A_ &w->timer); /* this is not race-free, so we still need to recheck periodically */
2116 2259
2117 /* monitor some parent directory for speedup hints */ 2260 /* monitor some parent directory for speedup hints */
2261 /* note that exceeding the hardcoded limit is not a correctness issue, */
2262 /* but an efficiency issue only */
2118 if ((errno == ENOENT || errno == EACCES) && strlen (w->path) < 4096) 2263 if ((errno == ENOENT || errno == EACCES) && strlen (w->path) < 4096)
2119 { 2264 {
2120 char path [4096]; 2265 char path [4096];
2121 strcpy (path, w->path); 2266 strcpy (path, w->path);
2122 2267
2367 clear_pending (EV_A_ (W)w); 2512 clear_pending (EV_A_ (W)w);
2368 if (expect_false (!ev_is_active (w))) 2513 if (expect_false (!ev_is_active (w)))
2369 return; 2514 return;
2370 2515
2371 { 2516 {
2372 int active = ((W)w)->active; 2517 int active = ev_active (w);
2373 2518
2374 idles [ABSPRI (w)][active - 1] = idles [ABSPRI (w)][--idlecnt [ABSPRI (w)]]; 2519 idles [ABSPRI (w)][active - 1] = idles [ABSPRI (w)][--idlecnt [ABSPRI (w)]];
2375 ((W)idles [ABSPRI (w)][active - 1])->active = active; 2520 ev_active (idles [ABSPRI (w)][active - 1]) = active;
2376 2521
2377 ev_stop (EV_A_ (W)w); 2522 ev_stop (EV_A_ (W)w);
2378 --idleall; 2523 --idleall;
2379 } 2524 }
2380} 2525}
2397 clear_pending (EV_A_ (W)w); 2542 clear_pending (EV_A_ (W)w);
2398 if (expect_false (!ev_is_active (w))) 2543 if (expect_false (!ev_is_active (w)))
2399 return; 2544 return;
2400 2545
2401 { 2546 {
2402 int active = ((W)w)->active; 2547 int active = ev_active (w);
2548
2403 prepares [active - 1] = prepares [--preparecnt]; 2549 prepares [active - 1] = prepares [--preparecnt];
2404 ((W)prepares [active - 1])->active = active; 2550 ev_active (prepares [active - 1]) = active;
2405 } 2551 }
2406 2552
2407 ev_stop (EV_A_ (W)w); 2553 ev_stop (EV_A_ (W)w);
2408} 2554}
2409 2555
2424 clear_pending (EV_A_ (W)w); 2570 clear_pending (EV_A_ (W)w);
2425 if (expect_false (!ev_is_active (w))) 2571 if (expect_false (!ev_is_active (w)))
2426 return; 2572 return;
2427 2573
2428 { 2574 {
2429 int active = ((W)w)->active; 2575 int active = ev_active (w);
2576
2430 checks [active - 1] = checks [--checkcnt]; 2577 checks [active - 1] = checks [--checkcnt];
2431 ((W)checks [active - 1])->active = active; 2578 ev_active (checks [active - 1]) = active;
2432 } 2579 }
2433 2580
2434 ev_stop (EV_A_ (W)w); 2581 ev_stop (EV_A_ (W)w);
2435} 2582}
2436 2583
2532 clear_pending (EV_A_ (W)w); 2679 clear_pending (EV_A_ (W)w);
2533 if (expect_false (!ev_is_active (w))) 2680 if (expect_false (!ev_is_active (w)))
2534 return; 2681 return;
2535 2682
2536 { 2683 {
2537 int active = ((W)w)->active; 2684 int active = ev_active (w);
2685
2538 forks [active - 1] = forks [--forkcnt]; 2686 forks [active - 1] = forks [--forkcnt];
2539 ((W)forks [active - 1])->active = active; 2687 ev_active (forks [active - 1]) = active;
2540 } 2688 }
2541 2689
2542 ev_stop (EV_A_ (W)w); 2690 ev_stop (EV_A_ (W)w);
2543} 2691}
2544#endif 2692#endif
2563 clear_pending (EV_A_ (W)w); 2711 clear_pending (EV_A_ (W)w);
2564 if (expect_false (!ev_is_active (w))) 2712 if (expect_false (!ev_is_active (w)))
2565 return; 2713 return;
2566 2714
2567 { 2715 {
2568 int active = ((W)w)->active; 2716 int active = ev_active (w);
2717
2569 asyncs [active - 1] = asyncs [--asynccnt]; 2718 asyncs [active - 1] = asyncs [--asynccnt];
2570 ((W)asyncs [active - 1])->active = active; 2719 ev_active (asyncs [active - 1]) = active;
2571 } 2720 }
2572 2721
2573 ev_stop (EV_A_ (W)w); 2722 ev_stop (EV_A_ (W)w);
2574} 2723}
2575 2724

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines