ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev.c
(Generate patch)

Comparing libev/ev.c (file contents):
Revision 1.223 by root, Sun Apr 6 14:34:50 2008 UTC vs.
Revision 1.235 by root, Wed May 7 14:45:17 2008 UTC

325 325
326typedef ev_watcher *W; 326typedef ev_watcher *W;
327typedef ev_watcher_list *WL; 327typedef ev_watcher_list *WL;
328typedef ev_watcher_time *WT; 328typedef ev_watcher_time *WT;
329 329
330#define ev_active(w) ((W)(w))->active
331#define ev_at(w) ((WT)(w))->at
332
330#if EV_USE_MONOTONIC 333#if EV_USE_MONOTONIC
331/* sig_atomic_t is used to avoid per-thread variables or locking but still */ 334/* sig_atomic_t is used to avoid per-thread variables or locking but still */
332/* giving it a reasonably high chance of working on typical architetcures */ 335/* giving it a reasonably high chance of working on typical architetcures */
333static EV_ATOMIC_T have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work? */ 336static EV_ATOMIC_T have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work? */
334#endif 337#endif
360 perror (msg); 363 perror (msg);
361 abort (); 364 abort ();
362 } 365 }
363} 366}
364 367
368static void *
369ev_realloc_emul (void *ptr, long size)
370{
371 /* some systems, notably openbsd and darwin, fail to properly
372 * implement realloc (x, 0) (as required by both ansi c-98 and
373 * the single unix specification, so work around them here.
374 */
375
376 if (size)
377 return realloc (ptr, size);
378
379 free (ptr);
380 return 0;
381}
382
365static void *(*alloc)(void *ptr, long size); 383static void *(*alloc)(void *ptr, long size) = ev_realloc_emul;
366 384
367void 385void
368ev_set_allocator (void *(*cb)(void *ptr, long size)) 386ev_set_allocator (void *(*cb)(void *ptr, long size))
369{ 387{
370 alloc = cb; 388 alloc = cb;
371} 389}
372 390
373inline_speed void * 391inline_speed void *
374ev_realloc (void *ptr, long size) 392ev_realloc (void *ptr, long size)
375{ 393{
376 ptr = alloc ? alloc (ptr, size) : realloc (ptr, size); 394 ptr = alloc (ptr, size);
377 395
378 if (!ptr && size) 396 if (!ptr && size)
379 { 397 {
380 fprintf (stderr, "libev: cannot allocate %ld bytes, aborting.", size); 398 fprintf (stderr, "libev: cannot allocate %ld bytes, aborting.", size);
381 abort (); 399 abort ();
502 } 520 }
503} 521}
504 522
505/*****************************************************************************/ 523/*****************************************************************************/
506 524
525#define MALLOC_ROUND 4096 /* prefer to allocate in chunks of this size, must be 2**n and >> 4 longs */
526
507int inline_size 527int inline_size
508array_nextsize (int elem, int cur, int cnt) 528array_nextsize (int elem, int cur, int cnt)
509{ 529{
510 int ncur = cur + 1; 530 int ncur = cur + 1;
511 531
512 do 532 do
513 ncur <<= 1; 533 ncur <<= 1;
514 while (cnt > ncur); 534 while (cnt > ncur);
515 535
516 /* if size > 4096, round to 4096 - 4 * longs to accomodate malloc overhead */ 536 /* if size is large, round to MALLOC_ROUND - 4 * longs to accomodate malloc overhead */
517 if (elem * ncur > 4096) 537 if (elem * ncur > MALLOC_ROUND - sizeof (void *) * 4)
518 { 538 {
519 ncur *= elem; 539 ncur *= elem;
520 ncur = (ncur + elem + 4095 + sizeof (void *) * 4) & ~4095; 540 ncur = (ncur + elem + (MALLOC_ROUND - 1) + sizeof (void *) * 4) & ~(MALLOC_ROUND - 1);
521 ncur = ncur - sizeof (void *) * 4; 541 ncur = ncur - sizeof (void *) * 4;
522 ncur /= elem; 542 ncur /= elem;
523 } 543 }
524 544
525 return ncur; 545 return ncur;
739 } 759 }
740} 760}
741 761
742/*****************************************************************************/ 762/*****************************************************************************/
743 763
764/*
765 * at the moment we allow libev the luxury of two heaps,
766 * a small-code-size 2-heap one and a ~1.5kb larger 4-heap
767 * which is more cache-efficient.
768 * the difference is about 5% with 50000+ watchers.
769 */
770#define USE_4HEAP !EV_MINIMAL
771#if USE_4HEAP
772
773#define HEAP0 3 /* index of first element in heap */
774
775/* towards the root */
744void inline_speed 776void inline_speed
745upheap (WT *heap, int k) 777upheap (WT *heap, int k)
746{ 778{
747 WT w = heap [k]; 779 WT w = heap [k];
748 780
749 while (k) 781 for (;;)
750 { 782 {
751 int p = (k - 1) >> 1; 783 int p = ((k - HEAP0 - 1) / 4) + HEAP0;
752 784
753 if (heap [p]->at <= w->at) 785 if (p >= HEAP0 || heap [p]->at <= w->at)
754 break; 786 break;
755 787
756 heap [k] = heap [p]; 788 heap [k] = heap [p];
757 ((W)heap [k])->active = k + 1; 789 ev_active (heap [k]) = k;
758 k = p; 790 k = p;
759 } 791 }
760 792
761 heap [k] = w; 793 heap [k] = w;
762 ((W)heap [k])->active = k + 1; 794 ev_active (heap [k]) = k;
763} 795}
764 796
797/* away from the root */
765void inline_speed 798void inline_speed
766downheap (WT *heap, int N, int k) 799downheap (WT *heap, int N, int k)
767{ 800{
768 WT w = heap [k]; 801 WT w = heap [k];
802 WT *E = heap + N + HEAP0;
769 803
770 for (;;) 804 for (;;)
771 { 805 {
806 ev_tstamp minat;
807 WT *minpos;
808 WT *pos = heap + 4 * (k - HEAP0) + HEAP0;
809
810 // find minimum child
811 if (expect_true (pos +3 < E))
812 {
813 (minpos = pos + 0), (minat = (*minpos)->at);
814 if (pos [1]->at < minat) (minpos = pos + 1), (minat = (*minpos)->at);
815 if (pos [2]->at < minat) (minpos = pos + 2), (minat = (*minpos)->at);
816 if (pos [3]->at < minat) (minpos = pos + 3), (minat = (*minpos)->at);
817 }
818 else
819 {
820 if (pos >= E)
821 break;
822
823 (minpos = pos + 0), (minat = (*minpos)->at);
824 if (pos + 1 < E && pos [1]->at < minat) (minpos = pos + 1), (minat = (*minpos)->at);
825 if (pos + 2 < E && pos [2]->at < minat) (minpos = pos + 2), (minat = (*minpos)->at);
826 if (pos + 3 < E && pos [3]->at < minat) (minpos = pos + 3), (minat = (*minpos)->at);
827 }
828
829 if (w->at <= minat)
830 break;
831
832 ev_active (*minpos) = k;
833 heap [k] = *minpos;
834
835 k = minpos - heap;
836 }
837
838 heap [k] = w;
839 ev_active (heap [k]) = k;
840}
841
842#else // 4HEAP
843
844#define HEAP0 1
845
846/* towards the root */
847void inline_speed
848upheap (WT *heap, int k)
849{
850 WT w = heap [k];
851
852 for (;;)
853 {
854 int p = k >> 1;
855
856 /* maybe we could use a dummy element at heap [0]? */
857 if (!p || heap [p]->at <= w->at)
858 break;
859
860 heap [k] = heap [p];
861 ev_active (heap [k]) = k;
862 k = p;
863 }
864
865 heap [k] = w;
866 ev_active (heap [k]) = k;
867}
868
869/* away from the root */
870void inline_speed
871downheap (WT *heap, int N, int k)
872{
873 WT w = heap [k];
874
875 for (;;)
876 {
772 int c = (k << 1) + 1; 877 int c = k << 1;
773 878
774 if (c >= N) 879 if (c > N)
775 break; 880 break;
776 881
777 c += c + 1 < N && heap [c]->at > heap [c + 1]->at 882 c += c + 1 < N && heap [c]->at > heap [c + 1]->at
778 ? 1 : 0; 883 ? 1 : 0;
779 884
780 if (w->at <= heap [c]->at) 885 if (w->at <= heap [c]->at)
781 break; 886 break;
782 887
783 heap [k] = heap [c]; 888 heap [k] = heap [c];
784 ((W)heap [k])->active = k + 1; 889 ((W)heap [k])->active = k;
785 890
786 k = c; 891 k = c;
787 } 892 }
788 893
789 heap [k] = w; 894 heap [k] = w;
790 ((W)heap [k])->active = k + 1; 895 ev_active (heap [k]) = k;
791} 896}
897#endif
792 898
793void inline_size 899void inline_size
794adjustheap (WT *heap, int N, int k) 900adjustheap (WT *heap, int N, int k)
795{ 901{
796 upheap (heap, k); 902 upheap (heap, k);
891pipecb (EV_P_ ev_io *iow, int revents) 997pipecb (EV_P_ ev_io *iow, int revents)
892{ 998{
893#if EV_USE_EVENTFD 999#if EV_USE_EVENTFD
894 if (evfd >= 0) 1000 if (evfd >= 0)
895 { 1001 {
896 uint64_t counter = 1; 1002 uint64_t counter;
897 read (evfd, &counter, sizeof (uint64_t)); 1003 read (evfd, &counter, sizeof (uint64_t));
898 } 1004 }
899 else 1005 else
900#endif 1006#endif
901 { 1007 {
1170 if (!(flags & EVFLAG_NOENV) 1276 if (!(flags & EVFLAG_NOENV)
1171 && !enable_secure () 1277 && !enable_secure ()
1172 && getenv ("LIBEV_FLAGS")) 1278 && getenv ("LIBEV_FLAGS"))
1173 flags = atoi (getenv ("LIBEV_FLAGS")); 1279 flags = atoi (getenv ("LIBEV_FLAGS"));
1174 1280
1175 if (!(flags & 0x0000ffffUL)) 1281 if (!(flags & 0x0000ffffU))
1176 flags |= ev_recommended_backends (); 1282 flags |= ev_recommended_backends ();
1177 1283
1178#if EV_USE_PORT 1284#if EV_USE_PORT
1179 if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags); 1285 if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags);
1180#endif 1286#endif
1268#endif 1374#endif
1269 1375
1270 backend = 0; 1376 backend = 0;
1271} 1377}
1272 1378
1379#if EV_USE_INOTIFY
1273void inline_size infy_fork (EV_P); 1380void inline_size infy_fork (EV_P);
1381#endif
1274 1382
1275void inline_size 1383void inline_size
1276loop_fork (EV_P) 1384loop_fork (EV_P)
1277{ 1385{
1278#if EV_USE_PORT 1386#if EV_USE_PORT
1345void 1453void
1346ev_loop_fork (EV_P) 1454ev_loop_fork (EV_P)
1347{ 1455{
1348 postfork = 1; /* must be in line with ev_default_fork */ 1456 postfork = 1; /* must be in line with ev_default_fork */
1349} 1457}
1350
1351#endif 1458#endif
1352 1459
1353#if EV_MULTIPLICITY 1460#if EV_MULTIPLICITY
1354struct ev_loop * 1461struct ev_loop *
1355ev_default_loop_init (unsigned int flags) 1462ev_default_loop_init (unsigned int flags)
1436 EV_CB_INVOKE (p->w, p->events); 1543 EV_CB_INVOKE (p->w, p->events);
1437 } 1544 }
1438 } 1545 }
1439} 1546}
1440 1547
1441void inline_size
1442timers_reify (EV_P)
1443{
1444 while (timercnt && ((WT)timers [0])->at <= mn_now)
1445 {
1446 ev_timer *w = (ev_timer *)timers [0];
1447
1448 /*assert (("inactive timer on timer heap detected", ev_is_active (w)));*/
1449
1450 /* first reschedule or stop timer */
1451 if (w->repeat)
1452 {
1453 assert (("negative ev_timer repeat value found while processing timers", w->repeat > 0.));
1454
1455 ((WT)w)->at += w->repeat;
1456 if (((WT)w)->at < mn_now)
1457 ((WT)w)->at = mn_now;
1458
1459 downheap (timers, timercnt, 0);
1460 }
1461 else
1462 ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */
1463
1464 ev_feed_event (EV_A_ (W)w, EV_TIMEOUT);
1465 }
1466}
1467
1468#if EV_PERIODIC_ENABLE
1469void inline_size
1470periodics_reify (EV_P)
1471{
1472 while (periodiccnt && ((WT)periodics [0])->at <= ev_rt_now)
1473 {
1474 ev_periodic *w = (ev_periodic *)periodics [0];
1475
1476 /*assert (("inactive timer on periodic heap detected", ev_is_active (w)));*/
1477
1478 /* first reschedule or stop timer */
1479 if (w->reschedule_cb)
1480 {
1481 ((WT)w)->at = w->reschedule_cb (w, ev_rt_now + TIME_EPSILON);
1482 assert (("ev_periodic reschedule callback returned time in the past", ((WT)w)->at > ev_rt_now));
1483 downheap (periodics, periodiccnt, 0);
1484 }
1485 else if (w->interval)
1486 {
1487 ((WT)w)->at = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
1488 if (((WT)w)->at - ev_rt_now <= TIME_EPSILON) ((WT)w)->at += w->interval;
1489 assert (("ev_periodic timeout in the past detected while processing timers, negative interval?", ((WT)w)->at > ev_rt_now));
1490 downheap (periodics, periodiccnt, 0);
1491 }
1492 else
1493 ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */
1494
1495 ev_feed_event (EV_A_ (W)w, EV_PERIODIC);
1496 }
1497}
1498
1499static void noinline
1500periodics_reschedule (EV_P)
1501{
1502 int i;
1503
1504 /* adjust periodics after time jump */
1505 for (i = 0; i < periodiccnt; ++i)
1506 {
1507 ev_periodic *w = (ev_periodic *)periodics [i];
1508
1509 if (w->reschedule_cb)
1510 ((WT)w)->at = w->reschedule_cb (w, ev_rt_now);
1511 else if (w->interval)
1512 ((WT)w)->at = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
1513 }
1514
1515 /* now rebuild the heap */
1516 for (i = periodiccnt >> 1; i--; )
1517 downheap (periodics, periodiccnt, i);
1518}
1519#endif
1520
1521#if EV_IDLE_ENABLE 1548#if EV_IDLE_ENABLE
1522void inline_size 1549void inline_size
1523idle_reify (EV_P) 1550idle_reify (EV_P)
1524{ 1551{
1525 if (expect_false (idleall)) 1552 if (expect_false (idleall))
1536 queue_events (EV_A_ (W *)idles [pri], idlecnt [pri], EV_IDLE); 1563 queue_events (EV_A_ (W *)idles [pri], idlecnt [pri], EV_IDLE);
1537 break; 1564 break;
1538 } 1565 }
1539 } 1566 }
1540 } 1567 }
1568}
1569#endif
1570
1571void inline_size
1572timers_reify (EV_P)
1573{
1574 while (timercnt && ev_at (timers [HEAP0]) <= mn_now)
1575 {
1576 ev_timer *w = (ev_timer *)timers [HEAP0];
1577
1578 /*assert (("inactive timer on timer heap detected", ev_is_active (w)));*/
1579
1580 /* first reschedule or stop timer */
1581 if (w->repeat)
1582 {
1583 assert (("negative ev_timer repeat value found while processing timers", w->repeat > 0.));
1584
1585 ev_at (w) += w->repeat;
1586 if (ev_at (w) < mn_now)
1587 ev_at (w) = mn_now;
1588
1589 downheap (timers, timercnt, HEAP0);
1590 }
1591 else
1592 ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */
1593
1594 ev_feed_event (EV_A_ (W)w, EV_TIMEOUT);
1595 }
1596}
1597
1598#if EV_PERIODIC_ENABLE
1599void inline_size
1600periodics_reify (EV_P)
1601{
1602 while (periodiccnt && ev_at (periodics [HEAP0]) <= ev_rt_now)
1603 {
1604 ev_periodic *w = (ev_periodic *)periodics [HEAP0];
1605
1606 /*assert (("inactive timer on periodic heap detected", ev_is_active (w)));*/
1607
1608 /* first reschedule or stop timer */
1609 if (w->reschedule_cb)
1610 {
1611 ev_at (w) = w->reschedule_cb (w, ev_rt_now + TIME_EPSILON);
1612 assert (("ev_periodic reschedule callback returned time in the past", ev_at (w) > ev_rt_now));
1613 downheap (periodics, periodiccnt, 1);
1614 }
1615 else if (w->interval)
1616 {
1617 ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
1618 if (ev_at (w) - ev_rt_now <= TIME_EPSILON) ev_at (w) += w->interval;
1619 assert (("ev_periodic timeout in the past detected while processing timers, negative interval?", ev_at (w) > ev_rt_now));
1620 downheap (periodics, periodiccnt, HEAP0);
1621 }
1622 else
1623 ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */
1624
1625 ev_feed_event (EV_A_ (W)w, EV_PERIODIC);
1626 }
1627}
1628
1629static void noinline
1630periodics_reschedule (EV_P)
1631{
1632 int i;
1633
1634 /* adjust periodics after time jump */
1635 for (i = 1; i <= periodiccnt; ++i)
1636 {
1637 ev_periodic *w = (ev_periodic *)periodics [i];
1638
1639 if (w->reschedule_cb)
1640 ev_at (w) = w->reschedule_cb (w, ev_rt_now);
1641 else if (w->interval)
1642 ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
1643 }
1644
1645 /* now rebuild the heap */
1646 for (i = periodiccnt >> 1; --i; )
1647 downheap (periodics, periodiccnt, i + HEAP0);
1541} 1648}
1542#endif 1649#endif
1543 1650
1544void inline_speed 1651void inline_speed
1545time_update (EV_P_ ev_tstamp max_block) 1652time_update (EV_P_ ev_tstamp max_block)
1574 */ 1681 */
1575 for (i = 4; --i; ) 1682 for (i = 4; --i; )
1576 { 1683 {
1577 rtmn_diff = ev_rt_now - mn_now; 1684 rtmn_diff = ev_rt_now - mn_now;
1578 1685
1579 if (fabs (odiff - rtmn_diff) < MIN_TIMEJUMP) 1686 if (expect_true (fabs (odiff - rtmn_diff) < MIN_TIMEJUMP))
1580 return; /* all is well */ 1687 return; /* all is well */
1581 1688
1582 ev_rt_now = ev_time (); 1689 ev_rt_now = ev_time ();
1583 mn_now = get_clock (); 1690 mn_now = get_clock ();
1584 now_floor = mn_now; 1691 now_floor = mn_now;
1599 { 1706 {
1600#if EV_PERIODIC_ENABLE 1707#if EV_PERIODIC_ENABLE
1601 periodics_reschedule (EV_A); 1708 periodics_reschedule (EV_A);
1602#endif 1709#endif
1603 /* adjust timers. this is easy, as the offset is the same for all of them */ 1710 /* adjust timers. this is easy, as the offset is the same for all of them */
1604 for (i = 0; i < timercnt; ++i) 1711 for (i = 1; i <= timercnt; ++i)
1605 ((WT)timers [i])->at += ev_rt_now - mn_now; 1712 ev_at (timers [i]) += ev_rt_now - mn_now;
1606 } 1713 }
1607 1714
1608 mn_now = ev_rt_now; 1715 mn_now = ev_rt_now;
1609 } 1716 }
1610} 1717}
1680 1787
1681 waittime = MAX_BLOCKTIME; 1788 waittime = MAX_BLOCKTIME;
1682 1789
1683 if (timercnt) 1790 if (timercnt)
1684 { 1791 {
1685 ev_tstamp to = ((WT)timers [0])->at - mn_now + backend_fudge; 1792 ev_tstamp to = ev_at (timers [HEAP0]) - mn_now + backend_fudge;
1686 if (waittime > to) waittime = to; 1793 if (waittime > to) waittime = to;
1687 } 1794 }
1688 1795
1689#if EV_PERIODIC_ENABLE 1796#if EV_PERIODIC_ENABLE
1690 if (periodiccnt) 1797 if (periodiccnt)
1691 { 1798 {
1692 ev_tstamp to = ((WT)periodics [0])->at - ev_rt_now + backend_fudge; 1799 ev_tstamp to = ev_at (periodics [HEAP0]) - ev_rt_now + backend_fudge;
1693 if (waittime > to) waittime = to; 1800 if (waittime > to) waittime = to;
1694 } 1801 }
1695#endif 1802#endif
1696 1803
1697 if (expect_false (waittime < timeout_blocktime)) 1804 if (expect_false (waittime < timeout_blocktime))
1863ev_timer_start (EV_P_ ev_timer *w) 1970ev_timer_start (EV_P_ ev_timer *w)
1864{ 1971{
1865 if (expect_false (ev_is_active (w))) 1972 if (expect_false (ev_is_active (w)))
1866 return; 1973 return;
1867 1974
1868 ((WT)w)->at += mn_now; 1975 ev_at (w) += mn_now;
1869 1976
1870 assert (("ev_timer_start called with negative timer repeat value", w->repeat >= 0.)); 1977 assert (("ev_timer_start called with negative timer repeat value", w->repeat >= 0.));
1871 1978
1872 ev_start (EV_A_ (W)w, ++timercnt); 1979 ev_start (EV_A_ (W)w, ++timercnt + HEAP0 - 1);
1873 array_needsize (WT, timers, timermax, timercnt, EMPTY2); 1980 array_needsize (WT, timers, timermax, timercnt + HEAP0, EMPTY2);
1874 timers [timercnt - 1] = (WT)w; 1981 timers [ev_active (w)] = (WT)w;
1875 upheap (timers, timercnt - 1); 1982 upheap (timers, ev_active (w));
1876 1983
1877 /*assert (("internal timer heap corruption", timers [((W)w)->active - 1] == w));*/ 1984 /*assert (("internal timer heap corruption", timers [ev_active (w)] == w));*/
1878} 1985}
1879 1986
1880void noinline 1987void noinline
1881ev_timer_stop (EV_P_ ev_timer *w) 1988ev_timer_stop (EV_P_ ev_timer *w)
1882{ 1989{
1883 clear_pending (EV_A_ (W)w); 1990 clear_pending (EV_A_ (W)w);
1884 if (expect_false (!ev_is_active (w))) 1991 if (expect_false (!ev_is_active (w)))
1885 return; 1992 return;
1886 1993
1887 assert (("internal timer heap corruption", timers [((W)w)->active - 1] == (WT)w));
1888
1889 { 1994 {
1890 int active = ((W)w)->active; 1995 int active = ev_active (w);
1891 1996
1997 assert (("internal timer heap corruption", timers [active] == (WT)w));
1998
1892 if (expect_true (--active < --timercnt)) 1999 if (expect_true (active < timercnt + HEAP0 - 1))
1893 { 2000 {
1894 timers [active] = timers [timercnt]; 2001 timers [active] = timers [timercnt + HEAP0 - 1];
1895 adjustheap (timers, timercnt, active); 2002 adjustheap (timers, timercnt, active);
1896 } 2003 }
2004
2005 --timercnt;
1897 } 2006 }
1898 2007
1899 ((WT)w)->at -= mn_now; 2008 ev_at (w) -= mn_now;
1900 2009
1901 ev_stop (EV_A_ (W)w); 2010 ev_stop (EV_A_ (W)w);
1902} 2011}
1903 2012
1904void noinline 2013void noinline
1906{ 2015{
1907 if (ev_is_active (w)) 2016 if (ev_is_active (w))
1908 { 2017 {
1909 if (w->repeat) 2018 if (w->repeat)
1910 { 2019 {
1911 ((WT)w)->at = mn_now + w->repeat; 2020 ev_at (w) = mn_now + w->repeat;
1912 adjustheap (timers, timercnt, ((W)w)->active - 1); 2021 adjustheap (timers, timercnt, ev_active (w));
1913 } 2022 }
1914 else 2023 else
1915 ev_timer_stop (EV_A_ w); 2024 ev_timer_stop (EV_A_ w);
1916 } 2025 }
1917 else if (w->repeat) 2026 else if (w->repeat)
1918 { 2027 {
1919 w->at = w->repeat; 2028 ev_at (w) = w->repeat;
1920 ev_timer_start (EV_A_ w); 2029 ev_timer_start (EV_A_ w);
1921 } 2030 }
1922} 2031}
1923 2032
1924#if EV_PERIODIC_ENABLE 2033#if EV_PERIODIC_ENABLE
1927{ 2036{
1928 if (expect_false (ev_is_active (w))) 2037 if (expect_false (ev_is_active (w)))
1929 return; 2038 return;
1930 2039
1931 if (w->reschedule_cb) 2040 if (w->reschedule_cb)
1932 ((WT)w)->at = w->reschedule_cb (w, ev_rt_now); 2041 ev_at (w) = w->reschedule_cb (w, ev_rt_now);
1933 else if (w->interval) 2042 else if (w->interval)
1934 { 2043 {
1935 assert (("ev_periodic_start called with negative interval value", w->interval >= 0.)); 2044 assert (("ev_periodic_start called with negative interval value", w->interval >= 0.));
1936 /* this formula differs from the one in periodic_reify because we do not always round up */ 2045 /* this formula differs from the one in periodic_reify because we do not always round up */
1937 ((WT)w)->at = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval; 2046 ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
1938 } 2047 }
1939 else 2048 else
1940 ((WT)w)->at = w->offset; 2049 ev_at (w) = w->offset;
1941 2050
1942 ev_start (EV_A_ (W)w, ++periodiccnt); 2051 ev_start (EV_A_ (W)w, ++periodiccnt + HEAP0 - 1);
1943 array_needsize (WT, periodics, periodicmax, periodiccnt, EMPTY2); 2052 array_needsize (WT, periodics, periodicmax, periodiccnt + HEAP0, EMPTY2);
1944 periodics [periodiccnt - 1] = (WT)w; 2053 periodics [ev_active (w)] = (WT)w;
1945 upheap (periodics, periodiccnt - 1); 2054 upheap (periodics, ev_active (w));
1946 2055
1947 /*assert (("internal periodic heap corruption", periodics [((W)w)->active - 1] == w));*/ 2056 /*assert (("internal periodic heap corruption", periodics [ev_active (w)] == w));*/
1948} 2057}
1949 2058
1950void noinline 2059void noinline
1951ev_periodic_stop (EV_P_ ev_periodic *w) 2060ev_periodic_stop (EV_P_ ev_periodic *w)
1952{ 2061{
1953 clear_pending (EV_A_ (W)w); 2062 clear_pending (EV_A_ (W)w);
1954 if (expect_false (!ev_is_active (w))) 2063 if (expect_false (!ev_is_active (w)))
1955 return; 2064 return;
1956 2065
1957 assert (("internal periodic heap corruption", periodics [((W)w)->active - 1] == (WT)w));
1958
1959 { 2066 {
1960 int active = ((W)w)->active; 2067 int active = ev_active (w);
1961 2068
2069 assert (("internal periodic heap corruption", periodics [active] == (WT)w));
2070
1962 if (expect_true (--active < --periodiccnt)) 2071 if (expect_true (active < periodiccnt + HEAP0 - 1))
1963 { 2072 {
1964 periodics [active] = periodics [periodiccnt]; 2073 periodics [active] = periodics [periodiccnt + HEAP0 - 1];
1965 adjustheap (periodics, periodiccnt, active); 2074 adjustheap (periodics, periodiccnt, active);
1966 } 2075 }
2076
2077 --periodiccnt;
1967 } 2078 }
1968 2079
1969 ev_stop (EV_A_ (W)w); 2080 ev_stop (EV_A_ (W)w);
1970} 2081}
1971 2082
2087 if (w->wd < 0) 2198 if (w->wd < 0)
2088 { 2199 {
2089 ev_timer_start (EV_A_ &w->timer); /* this is not race-free, so we still need to recheck periodically */ 2200 ev_timer_start (EV_A_ &w->timer); /* this is not race-free, so we still need to recheck periodically */
2090 2201
2091 /* monitor some parent directory for speedup hints */ 2202 /* monitor some parent directory for speedup hints */
2203 /* note that exceeding the hardcoded limit is not a correctness issue, */
2204 /* but an efficiency issue only */
2092 if ((errno == ENOENT || errno == EACCES) && strlen (w->path) < 4096) 2205 if ((errno == ENOENT || errno == EACCES) && strlen (w->path) < 4096)
2093 { 2206 {
2094 char path [4096]; 2207 char path [4096];
2095 strcpy (path, w->path); 2208 strcpy (path, w->path);
2096 2209
2341 clear_pending (EV_A_ (W)w); 2454 clear_pending (EV_A_ (W)w);
2342 if (expect_false (!ev_is_active (w))) 2455 if (expect_false (!ev_is_active (w)))
2343 return; 2456 return;
2344 2457
2345 { 2458 {
2346 int active = ((W)w)->active; 2459 int active = ev_active (w);
2347 2460
2348 idles [ABSPRI (w)][active - 1] = idles [ABSPRI (w)][--idlecnt [ABSPRI (w)]]; 2461 idles [ABSPRI (w)][active - 1] = idles [ABSPRI (w)][--idlecnt [ABSPRI (w)]];
2349 ((W)idles [ABSPRI (w)][active - 1])->active = active; 2462 ev_active (idles [ABSPRI (w)][active - 1]) = active;
2350 2463
2351 ev_stop (EV_A_ (W)w); 2464 ev_stop (EV_A_ (W)w);
2352 --idleall; 2465 --idleall;
2353 } 2466 }
2354} 2467}
2371 clear_pending (EV_A_ (W)w); 2484 clear_pending (EV_A_ (W)w);
2372 if (expect_false (!ev_is_active (w))) 2485 if (expect_false (!ev_is_active (w)))
2373 return; 2486 return;
2374 2487
2375 { 2488 {
2376 int active = ((W)w)->active; 2489 int active = ev_active (w);
2490
2377 prepares [active - 1] = prepares [--preparecnt]; 2491 prepares [active - 1] = prepares [--preparecnt];
2378 ((W)prepares [active - 1])->active = active; 2492 ev_active (prepares [active - 1]) = active;
2379 } 2493 }
2380 2494
2381 ev_stop (EV_A_ (W)w); 2495 ev_stop (EV_A_ (W)w);
2382} 2496}
2383 2497
2398 clear_pending (EV_A_ (W)w); 2512 clear_pending (EV_A_ (W)w);
2399 if (expect_false (!ev_is_active (w))) 2513 if (expect_false (!ev_is_active (w)))
2400 return; 2514 return;
2401 2515
2402 { 2516 {
2403 int active = ((W)w)->active; 2517 int active = ev_active (w);
2518
2404 checks [active - 1] = checks [--checkcnt]; 2519 checks [active - 1] = checks [--checkcnt];
2405 ((W)checks [active - 1])->active = active; 2520 ev_active (checks [active - 1]) = active;
2406 } 2521 }
2407 2522
2408 ev_stop (EV_A_ (W)w); 2523 ev_stop (EV_A_ (W)w);
2409} 2524}
2410 2525
2506 clear_pending (EV_A_ (W)w); 2621 clear_pending (EV_A_ (W)w);
2507 if (expect_false (!ev_is_active (w))) 2622 if (expect_false (!ev_is_active (w)))
2508 return; 2623 return;
2509 2624
2510 { 2625 {
2511 int active = ((W)w)->active; 2626 int active = ev_active (w);
2627
2512 forks [active - 1] = forks [--forkcnt]; 2628 forks [active - 1] = forks [--forkcnt];
2513 ((W)forks [active - 1])->active = active; 2629 ev_active (forks [active - 1]) = active;
2514 } 2630 }
2515 2631
2516 ev_stop (EV_A_ (W)w); 2632 ev_stop (EV_A_ (W)w);
2517} 2633}
2518#endif 2634#endif
2537 clear_pending (EV_A_ (W)w); 2653 clear_pending (EV_A_ (W)w);
2538 if (expect_false (!ev_is_active (w))) 2654 if (expect_false (!ev_is_active (w)))
2539 return; 2655 return;
2540 2656
2541 { 2657 {
2542 int active = ((W)w)->active; 2658 int active = ev_active (w);
2659
2543 asyncs [active - 1] = asyncs [--asynccnt]; 2660 asyncs [active - 1] = asyncs [--asynccnt];
2544 ((W)asyncs [active - 1])->active = active; 2661 ev_active (asyncs [active - 1]) = active;
2545 } 2662 }
2546 2663
2547 ev_stop (EV_A_ (W)w); 2664 ev_stop (EV_A_ (W)w);
2548} 2665}
2549 2666

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines