ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev.c
(Generate patch)

Comparing libev/ev.c (file contents):
Revision 1.501 by root, Mon Jul 1 21:47:42 2019 UTC vs.
Revision 1.521 by root, Sat Dec 28 07:47:35 2019 UTC

117# define EV_USE_EPOLL 0 117# define EV_USE_EPOLL 0
118# endif 118# endif
119 119
120# if HAVE_LINUX_AIO_ABI_H 120# if HAVE_LINUX_AIO_ABI_H
121# ifndef EV_USE_LINUXAIO 121# ifndef EV_USE_LINUXAIO
122# define EV_USE_LINUXAIO EV_FEATURE_BACKENDS 122# define EV_USE_LINUXAIO 0 /* was: EV_FEATURE_BACKENDS, always off by default */
123# endif 123# endif
124# else 124# else
125# undef EV_USE_LINUXAIO 125# undef EV_USE_LINUXAIO
126# define EV_USE_LINUXAIO 0 126# define EV_USE_LINUXAIO 0
127# endif 127# endif
128 128
129# if HAVE_LINUX_FS_H && HAVE_SYS_TIMERFD_H && HAVE_KERNEL_RWF_T
130# ifndef EV_USE_IOURING
131# define EV_USE_IOURING EV_FEATURE_BACKENDS
132# endif
133# else
134# undef EV_USE_IOURING
135# define EV_USE_IOURING 0
136# endif
137
129# if HAVE_KQUEUE && HAVE_SYS_EVENT_H 138# if HAVE_KQUEUE && HAVE_SYS_EVENT_H
130# ifndef EV_USE_KQUEUE 139# ifndef EV_USE_KQUEUE
131# define EV_USE_KQUEUE EV_FEATURE_BACKENDS 140# define EV_USE_KQUEUE EV_FEATURE_BACKENDS
132# endif 141# endif
133# else 142# else
168# endif 177# endif
169# else 178# else
170# undef EV_USE_EVENTFD 179# undef EV_USE_EVENTFD
171# define EV_USE_EVENTFD 0 180# define EV_USE_EVENTFD 0
172# endif 181# endif
173 182
183# if HAVE_SYS_TIMERFD_H
184# ifndef EV_USE_TIMERFD
185# define EV_USE_TIMERFD EV_FEATURE_OS
186# endif
187# else
188# undef EV_USE_TIMERFD
189# define EV_USE_TIMERFD 0
190# endif
191
174#endif 192#endif
175 193
176/* OS X, in its infinite idiocy, actually HARDCODES 194/* OS X, in its infinite idiocy, actually HARDCODES
177 * a limit of 1024 into their select. Where people have brains, 195 * a limit of 1024 into their select. Where people have brains,
178 * OS X engineers apparently have a vacuum. Or maybe they were 196 * OS X engineers apparently have a vacuum. Or maybe they were
326# define EV_USE_PORT 0 344# define EV_USE_PORT 0
327#endif 345#endif
328 346
329#ifndef EV_USE_LINUXAIO 347#ifndef EV_USE_LINUXAIO
330# if __linux /* libev currently assumes linux/aio_abi.h is always available on linux */ 348# if __linux /* libev currently assumes linux/aio_abi.h is always available on linux */
331# define EV_USE_LINUXAIO 1 349# define EV_USE_LINUXAIO 0 /* was: 1, always off by default */
332# else 350# else
333# define EV_USE_LINUXAIO 0 351# define EV_USE_LINUXAIO 0
334# endif 352# endif
335#endif 353#endif
336 354
337#ifndef EV_USE_IOURING 355#ifndef EV_USE_IOURING
338# if __linux 356# if __linux /* later checks might disable again */
339# define EV_USE_IOURING 0 357# define EV_USE_IOURING 1
340# else 358# else
341# define EV_USE_IOURING 0 359# define EV_USE_IOURING 0
342# endif 360# endif
343#endif 361#endif
344 362
369#ifndef EV_USE_SIGNALFD 387#ifndef EV_USE_SIGNALFD
370# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7)) 388# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7))
371# define EV_USE_SIGNALFD EV_FEATURE_OS 389# define EV_USE_SIGNALFD EV_FEATURE_OS
372# else 390# else
373# define EV_USE_SIGNALFD 0 391# define EV_USE_SIGNALFD 0
392# endif
393#endif
394
395#ifndef EV_USE_TIMERFD
396# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 8))
397# define EV_USE_TIMERFD EV_FEATURE_OS
398# else
399# define EV_USE_TIMERFD 0
374# endif 400# endif
375#endif 401#endif
376 402
377#if 0 /* debugging */ 403#if 0 /* debugging */
378# define EV_VERIFY 3 404# define EV_VERIFY 3
438#if !EV_STAT_ENABLE 464#if !EV_STAT_ENABLE
439# undef EV_USE_INOTIFY 465# undef EV_USE_INOTIFY
440# define EV_USE_INOTIFY 0 466# define EV_USE_INOTIFY 0
441#endif 467#endif
442 468
469#if __linux && EV_USE_IOURING
470# include <linux/version.h>
471# if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)
472# undef EV_USE_IOURING
473# define EV_USE_IOURING 0
474# endif
475#endif
476
443#if !EV_USE_NANOSLEEP 477#if !EV_USE_NANOSLEEP
444/* hp-ux has it in sys/time.h, which we unconditionally include above */ 478/* hp-ux has it in sys/time.h, which we unconditionally include above */
445# if !defined _WIN32 && !defined __hpux 479# if !defined _WIN32 && !defined __hpux
446# include <sys/select.h> 480# include <sys/select.h>
447# endif 481# endif
448#endif 482#endif
449 483
450#if EV_USE_LINUXAIO 484#if EV_USE_LINUXAIO
451# include <sys/syscall.h> 485# include <sys/syscall.h>
452# if !SYS_io_getevents || !EV_USE_EPOLL /* ev_linxaio uses ev_poll.c:ev_epoll_create */ 486# if SYS_io_getevents && EV_USE_EPOLL /* linuxaio backend requires epoll backend */
487# define EV_NEED_SYSCALL 1
488# else
453# undef EV_USE_LINUXAIO 489# undef EV_USE_LINUXAIO
454# define EV_USE_LINUXAIO 0 490# define EV_USE_LINUXAIO 0
455# else
456# define EV_NEED_SYSCALL 1
457# endif 491# endif
458#endif 492#endif
459 493
460#if EV_USE_IOURING 494#if EV_USE_IOURING
461# include <sys/syscall.h> 495# include <sys/syscall.h>
462# if !__alpha && !SYS_io_uring_setup 496# if !SYS_io_uring_setup && __linux && !__alpha
463# define SYS_io_uring_setup 425 497# define SYS_io_uring_setup 425
464# define SYS_io_uring_enter 426 498# define SYS_io_uring_enter 426
465# define SYS_io_uring_wregister 427 499# define SYS_io_uring_wregister 427
466# endif 500# endif
467# if SYS_io_uring_setup 501# if SYS_io_uring_setup && EV_USE_EPOLL /* iouring backend requires epoll backend */
468# define EV_NEED_SYSCALL 1 502# define EV_NEED_SYSCALL 1
469# else 503# else
470# undef EV_USE_IOURING 504# undef EV_USE_IOURING
471# define EV_USE_IOURING 0 505# define EV_USE_IOURING 0
472# endif 506# endif
481# define EV_USE_INOTIFY 0 515# define EV_USE_INOTIFY 0
482# endif 516# endif
483#endif 517#endif
484 518
485#if EV_USE_EVENTFD 519#if EV_USE_EVENTFD
486/* our minimum requirement is glibc 2.7 which has the stub, but not the header */ 520/* our minimum requirement is glibc 2.7 which has the stub, but not the full header */
487# include <stdint.h> 521# include <stdint.h>
488# ifndef EFD_NONBLOCK 522# ifndef EFD_NONBLOCK
489# define EFD_NONBLOCK O_NONBLOCK 523# define EFD_NONBLOCK O_NONBLOCK
490# endif 524# endif
491# ifndef EFD_CLOEXEC 525# ifndef EFD_CLOEXEC
497# endif 531# endif
498EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags); 532EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags);
499#endif 533#endif
500 534
501#if EV_USE_SIGNALFD 535#if EV_USE_SIGNALFD
502/* our minimum requirement is glibc 2.7 which has the stub, but not the header */ 536/* our minimum requirement is glibc 2.7 which has the stub, but not the full header */
503# include <stdint.h> 537# include <stdint.h>
504# ifndef SFD_NONBLOCK 538# ifndef SFD_NONBLOCK
505# define SFD_NONBLOCK O_NONBLOCK 539# define SFD_NONBLOCK O_NONBLOCK
506# endif 540# endif
507# ifndef SFD_CLOEXEC 541# ifndef SFD_CLOEXEC
509# define SFD_CLOEXEC O_CLOEXEC 543# define SFD_CLOEXEC O_CLOEXEC
510# else 544# else
511# define SFD_CLOEXEC 02000000 545# define SFD_CLOEXEC 02000000
512# endif 546# endif
513# endif 547# endif
514EV_CPP (extern "C") int signalfd (int fd, const sigset_t *mask, int flags); 548EV_CPP (extern "C") int (signalfd) (int fd, const sigset_t *mask, int flags);
515 549
516struct signalfd_siginfo 550struct signalfd_siginfo
517{ 551{
518 uint32_t ssi_signo; 552 uint32_t ssi_signo;
519 char pad[128 - sizeof (uint32_t)]; 553 char pad[128 - sizeof (uint32_t)];
520}; 554};
521#endif 555#endif
522 556
523/*****************************************************************************/ 557/* for timerfd, libev core requires TFD_TIMER_CANCEL_ON_SET &c */
524 558#if EV_USE_TIMERFD
525#if EV_NEED_SYSCALL 559# include <sys/timerfd.h>
526 560/* timerfd is only used for periodics */
527#include <sys/syscall.h> 561# if !(defined (TFD_TIMER_CANCEL_ON_SET) && defined (TFD_CLOEXEC) && defined (TFD_NONBLOCK)) || !EV_PERIODIC_ENABLE
528 562# undef EV_USE_TIMERFD
529/* 563# define EV_USE_TIMERFD 0
530 * define some syscall wrappers for common architectures
531 * this is mostly for nice looks during debugging, not performance.
532 * our syscalls return < 0, not == -1, on error. which is good
533 * enough for linux aio.
534 * TODO: arm is also common nowadays, maybe even mips and x86
535 * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove...
536 */
537#if __GNUC__ && __linux && ECB_AMD64 && !defined __OPTIMIZE_SIZE__
538 /* the costly errno access probably kills this for size optimisation */
539
540 #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5) \
541 ({ \
542 long res; \
543 register unsigned long r5 __asm__ ("r8" ); \
544 register unsigned long r4 __asm__ ("r10"); \
545 register unsigned long r3 __asm__ ("rdx"); \
546 register unsigned long r2 __asm__ ("rsi"); \
547 register unsigned long r1 __asm__ ("rdi"); \
548 if (narg >= 5) r5 = (unsigned long)(arg5); \
549 if (narg >= 4) r4 = (unsigned long)(arg4); \
550 if (narg >= 3) r3 = (unsigned long)(arg3); \
551 if (narg >= 2) r2 = (unsigned long)(arg2); \
552 if (narg >= 1) r1 = (unsigned long)(arg1); \
553 __asm__ __volatile__ ( \
554 "syscall\n\t" \
555 : "=a" (res) \
556 : "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \
557 : "cc", "r11", "cx", "memory"); \
558 errno = -res; \
559 res; \
560 })
561
562#endif 564# endif
563
564#ifdef ev_syscall
565 #define ev_syscall0(nr) ev_syscall (nr, 0, 0, 0, 0, 0, 0
566 #define ev_syscall1(nr,arg1) ev_syscall (nr, 1, arg1, 0, 0, 0, 0)
567 #define ev_syscall2(nr,arg1,arg2) ev_syscall (nr, 2, arg1, arg2, 0, 0, 0)
568 #define ev_syscall3(nr,arg1,arg2,arg3) ev_syscall (nr, 3, arg1, arg2, arg3, 0, 0)
569 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) ev_syscall (nr, 3, arg1, arg2, arg3, arg4, 0)
570 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5)
571#else
572 #define ev_syscall0(nr) syscall (nr)
573 #define ev_syscall1(nr,arg1) syscall (nr, arg1)
574 #define ev_syscall2(nr,arg1,arg2) syscall (nr, arg1, arg2)
575 #define ev_syscall3(nr,arg1,arg2,arg3) syscall (nr, arg1, arg2, arg3)
576 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) syscall (nr, arg1, arg2, arg3, arg4)
577 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) syscall (nr, arg1, arg2, arg3, arg4, arg5)
578#endif
579
580#endif 565#endif
581 566
582/*****************************************************************************/ 567/*****************************************************************************/
583 568
584#if EV_VERIFY >= 3 569#if EV_VERIFY >= 3
592 * This value is good at least till the year 4000. 577 * This value is good at least till the year 4000.
593 */ 578 */
594#define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */ 579#define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */
595/*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */ 580/*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */
596 581
597#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */ 582#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
598#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */ 583#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
599 584
585/* find a portable timestamp that is "always" in the future but fits into time_t.
586 * this is quite hard, and we are mostly guessing - we handle 32 bit signed/unsigned time_t,
587 * and sizes larger than 32 bit, and maybe the unlikely floating point time_t */
588#define EV_TSTAMP_HUGE \
589 (sizeof (time_t) >= 8 ? 10000000000000. \
590 : 0 < (time_t)4294967295 ? 4294967295. \
591 : 2147483647.) \
592
593#ifndef EV_TS_CONST
594# define EV_TS_CONST(nv) nv
595# define EV_TS_TO_MSEC(a) a * 1e3 + 0.9999
596# define EV_TS_FROM_USEC(us) us * 1e-6
600#define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0) 597# define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0)
601#define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0) 598# define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0)
599# define EV_TV_GET(tv) ((tv).tv_sec + (tv).tv_usec * 1e-6)
600# define EV_TS_GET(ts) ((ts).tv_sec + (ts).tv_nsec * 1e-9)
601#endif
602 602
603/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */ 603/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */
604/* ECB.H BEGIN */ 604/* ECB.H BEGIN */
605/* 605/*
606 * libecb - http://software.schmorp.de/pkg/libecb 606 * libecb - http://software.schmorp.de/pkg/libecb
1643# define inline_speed ecb_inline 1643# define inline_speed ecb_inline
1644#else 1644#else
1645# define inline_speed ecb_noinline static 1645# define inline_speed ecb_noinline static
1646#endif 1646#endif
1647 1647
1648/*****************************************************************************/
1649/* raw syscall wrappers */
1650
1651#if EV_NEED_SYSCALL
1652
1653#include <sys/syscall.h>
1654
1655/*
1656 * define some syscall wrappers for common architectures
1657 * this is mostly for nice looks during debugging, not performance.
1658 * our syscalls return < 0, not == -1, on error. which is good
1659 * enough for linux aio.
1660 * TODO: arm is also common nowadays, maybe even mips and x86
1661 * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove...
1662 */
1663#if __GNUC__ && __linux && ECB_AMD64 && !EV_FEATURE_CODE
1664 /* the costly errno access probably kills this for size optimisation */
1665
1666 #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5,arg6) \
1667 ({ \
1668 long res; \
1669 register unsigned long r6 __asm__ ("r9" ); \
1670 register unsigned long r5 __asm__ ("r8" ); \
1671 register unsigned long r4 __asm__ ("r10"); \
1672 register unsigned long r3 __asm__ ("rdx"); \
1673 register unsigned long r2 __asm__ ("rsi"); \
1674 register unsigned long r1 __asm__ ("rdi"); \
1675 if (narg >= 6) r6 = (unsigned long)(arg6); \
1676 if (narg >= 5) r5 = (unsigned long)(arg5); \
1677 if (narg >= 4) r4 = (unsigned long)(arg4); \
1678 if (narg >= 3) r3 = (unsigned long)(arg3); \
1679 if (narg >= 2) r2 = (unsigned long)(arg2); \
1680 if (narg >= 1) r1 = (unsigned long)(arg1); \
1681 __asm__ __volatile__ ( \
1682 "syscall\n\t" \
1683 : "=a" (res) \
1684 : "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \
1685 : "cc", "r11", "cx", "memory"); \
1686 errno = -res; \
1687 res; \
1688 })
1689
1690#endif
1691
1692#ifdef ev_syscall
1693 #define ev_syscall0(nr) ev_syscall (nr, 0, 0, 0, 0, 0, 0, 0)
1694 #define ev_syscall1(nr,arg1) ev_syscall (nr, 1, arg1, 0, 0, 0, 0, 0)
1695 #define ev_syscall2(nr,arg1,arg2) ev_syscall (nr, 2, arg1, arg2, 0, 0, 0, 0)
1696 #define ev_syscall3(nr,arg1,arg2,arg3) ev_syscall (nr, 3, arg1, arg2, arg3, 0, 0, 0)
1697 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) ev_syscall (nr, 3, arg1, arg2, arg3, arg4, 0, 0)
1698 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5, 0)
1699 #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) ev_syscall (nr, 6, arg1, arg2, arg3, arg4, arg5,arg6)
1700#else
1701 #define ev_syscall0(nr) syscall (nr)
1702 #define ev_syscall1(nr,arg1) syscall (nr, arg1)
1703 #define ev_syscall2(nr,arg1,arg2) syscall (nr, arg1, arg2)
1704 #define ev_syscall3(nr,arg1,arg2,arg3) syscall (nr, arg1, arg2, arg3)
1705 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) syscall (nr, arg1, arg2, arg3, arg4)
1706 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) syscall (nr, arg1, arg2, arg3, arg4, arg5)
1707 #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) syscall (nr, arg1, arg2, arg3, arg4, arg5,arg6)
1708#endif
1709
1710#endif
1711
1712/*****************************************************************************/
1713
1648#define NUMPRI (EV_MAXPRI - EV_MINPRI + 1) 1714#define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)
1649 1715
1650#if EV_MINPRI == EV_MAXPRI 1716#if EV_MINPRI == EV_MAXPRI
1651# define ABSPRI(w) (((W)w), 0) 1717# define ABSPRI(w) (((W)w), 0)
1652#else 1718#else
1711 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 10000000000000000000. : 1000000000.; 1777 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 10000000000000000000. : 1000000000.;
1712#else 1778#else
1713 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.; 1779 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.;
1714#endif 1780#endif
1715 1781
1782 /* special treatment for negative arguments */
1783 if (ecb_expect_false (v < 0.))
1784 {
1785 ev_tstamp f = -ev_floor (-v);
1786
1787 return f - (f == v ? 0 : 1);
1788 }
1789
1716 /* argument too large for an unsigned long? */ 1790 /* argument too large for an unsigned long? then reduce it */
1717 if (ecb_expect_false (v >= shift)) 1791 if (ecb_expect_false (v >= shift))
1718 { 1792 {
1719 ev_tstamp f; 1793 ev_tstamp f;
1720 1794
1721 if (v == v - 1.) 1795 if (v == v - 1.)
1722 return v; /* very large number */ 1796 return v; /* very large numbers are assumed to be integer */
1723 1797
1724 f = shift * ev_floor (v * (1. / shift)); 1798 f = shift * ev_floor (v * (1. / shift));
1725 return f + ev_floor (v - f); 1799 return f + ev_floor (v - f);
1726 }
1727
1728 /* special treatment for negative args? */
1729 if (ecb_expect_false (v < 0.))
1730 {
1731 ev_tstamp f = -ev_floor (-v);
1732
1733 return f - (f == v ? 0 : 1);
1734 } 1800 }
1735 1801
1736 /* fits into an unsigned long */ 1802 /* fits into an unsigned long */
1737 return (unsigned long)v; 1803 return (unsigned long)v;
1738} 1804}
1882{ 1948{
1883 WL head; 1949 WL head;
1884 unsigned char events; /* the events watched for */ 1950 unsigned char events; /* the events watched for */
1885 unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */ 1951 unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */
1886 unsigned char emask; /* some backends store the actual kernel mask in here */ 1952 unsigned char emask; /* some backends store the actual kernel mask in here */
1887 unsigned char unused; 1953 unsigned char eflags; /* flags field for use by backends */
1888#if EV_USE_EPOLL 1954#if EV_USE_EPOLL
1889 unsigned int egen; /* generation counter to counter epoll bugs */ 1955 unsigned int egen; /* generation counter to counter epoll bugs */
1890#endif 1956#endif
1891#if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP 1957#if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP
1892 SOCKET handle; 1958 SOCKET handle;
1946 static struct ev_loop default_loop_struct; 2012 static struct ev_loop default_loop_struct;
1947 EV_API_DECL struct ev_loop *ev_default_loop_ptr = 0; /* needs to be initialised to make it a definition despite extern */ 2013 EV_API_DECL struct ev_loop *ev_default_loop_ptr = 0; /* needs to be initialised to make it a definition despite extern */
1948 2014
1949#else 2015#else
1950 2016
1951 EV_API_DECL ev_tstamp ev_rt_now = 0; /* needs to be initialised to make it a definition despite extern */ 2017 EV_API_DECL ev_tstamp ev_rt_now = EV_TS_CONST (0.); /* needs to be initialised to make it a definition despite extern */
1952 #define VAR(name,decl) static decl; 2018 #define VAR(name,decl) static decl;
1953 #include "ev_vars.h" 2019 #include "ev_vars.h"
1954 #undef VAR 2020 #undef VAR
1955 2021
1956 static int ev_default_loop_ptr; 2022 static int ev_default_loop_ptr;
1978#if EV_USE_REALTIME 2044#if EV_USE_REALTIME
1979 if (ecb_expect_true (have_realtime)) 2045 if (ecb_expect_true (have_realtime))
1980 { 2046 {
1981 struct timespec ts; 2047 struct timespec ts;
1982 clock_gettime (CLOCK_REALTIME, &ts); 2048 clock_gettime (CLOCK_REALTIME, &ts);
1983 return ts.tv_sec + ts.tv_nsec * 1e-9; 2049 return EV_TS_GET (ts);
1984 } 2050 }
1985#endif 2051#endif
1986 2052
2053 {
1987 struct timeval tv; 2054 struct timeval tv;
1988 gettimeofday (&tv, 0); 2055 gettimeofday (&tv, 0);
1989 return tv.tv_sec + tv.tv_usec * 1e-6; 2056 return EV_TV_GET (tv);
2057 }
1990} 2058}
1991#endif 2059#endif
1992 2060
1993inline_size ev_tstamp 2061inline_size ev_tstamp
1994get_clock (void) 2062get_clock (void)
1996#if EV_USE_MONOTONIC 2064#if EV_USE_MONOTONIC
1997 if (ecb_expect_true (have_monotonic)) 2065 if (ecb_expect_true (have_monotonic))
1998 { 2066 {
1999 struct timespec ts; 2067 struct timespec ts;
2000 clock_gettime (CLOCK_MONOTONIC, &ts); 2068 clock_gettime (CLOCK_MONOTONIC, &ts);
2001 return ts.tv_sec + ts.tv_nsec * 1e-9; 2069 return EV_TS_GET (ts);
2002 } 2070 }
2003#endif 2071#endif
2004 2072
2005 return ev_time (); 2073 return ev_time ();
2006} 2074}
2014#endif 2082#endif
2015 2083
2016void 2084void
2017ev_sleep (ev_tstamp delay) EV_NOEXCEPT 2085ev_sleep (ev_tstamp delay) EV_NOEXCEPT
2018{ 2086{
2019 if (delay > 0.) 2087 if (delay > EV_TS_CONST (0.))
2020 { 2088 {
2021#if EV_USE_NANOSLEEP 2089#if EV_USE_NANOSLEEP
2022 struct timespec ts; 2090 struct timespec ts;
2023 2091
2024 EV_TS_SET (ts, delay); 2092 EV_TS_SET (ts, delay);
2025 nanosleep (&ts, 0); 2093 nanosleep (&ts, 0);
2026#elif defined _WIN32 2094#elif defined _WIN32
2027 /* maybe this should round up, as ms is very low resolution */ 2095 /* maybe this should round up, as ms is very low resolution */
2028 /* compared to select (µs) or nanosleep (ns) */ 2096 /* compared to select (µs) or nanosleep (ns) */
2029 Sleep ((unsigned long)(delay * 1e3)); 2097 Sleep ((unsigned long)(EV_TS_TO_MSEC (delay)));
2030#else 2098#else
2031 struct timeval tv; 2099 struct timeval tv;
2032 2100
2033 /* here we rely on sys/time.h + sys/types.h + unistd.h providing select */ 2101 /* here we rely on sys/time.h + sys/types.h + unistd.h providing select */
2034 /* something not guaranteed by newer posix versions, but guaranteed */ 2102 /* something not guaranteed by newer posix versions, but guaranteed */
2194inline_size void 2262inline_size void
2195fd_reify (EV_P) 2263fd_reify (EV_P)
2196{ 2264{
2197 int i; 2265 int i;
2198 2266
2267 /* most backends do not modify the fdchanges list in backend_modfiy.
2268 * except io_uring, which has fixed-size buffers which might force us
2269 * to handle events in backend_modify, causing fdchangesd to be amended,
2270 * which could result in an endless loop.
2271 * to avoid this, we do not dynamically handle fds that were added
2272 * during fd_reify. that menas thast for those backends, fdchangecnt
2273 * might be non-zero during poll, which must cause them to not block.
2274 * to not put too much of a burden on other backends, this detail
2275 * needs to be handled in the backend.
2276 */
2277 int changecnt = fdchangecnt;
2278
2199#if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP 2279#if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP
2200 for (i = 0; i < fdchangecnt; ++i) 2280 for (i = 0; i < changecnt; ++i)
2201 { 2281 {
2202 int fd = fdchanges [i]; 2282 int fd = fdchanges [i];
2203 ANFD *anfd = anfds + fd; 2283 ANFD *anfd = anfds + fd;
2204 2284
2205 if (anfd->reify & EV__IOFDSET && anfd->head) 2285 if (anfd->reify & EV__IOFDSET && anfd->head)
2219 } 2299 }
2220 } 2300 }
2221 } 2301 }
2222#endif 2302#endif
2223 2303
2224 for (i = 0; i < fdchangecnt; ++i) 2304 for (i = 0; i < changecnt; ++i)
2225 { 2305 {
2226 int fd = fdchanges [i]; 2306 int fd = fdchanges [i];
2227 ANFD *anfd = anfds + fd; 2307 ANFD *anfd = anfds + fd;
2228 ev_io *w; 2308 ev_io *w;
2229 2309
2245 2325
2246 if (o_reify & EV__IOFDSET) 2326 if (o_reify & EV__IOFDSET)
2247 backend_modify (EV_A_ fd, o_events, anfd->events); 2327 backend_modify (EV_A_ fd, o_events, anfd->events);
2248 } 2328 }
2249 2329
2330 /* normally, fdchangecnt hasn't changed. if it has, then new fds have been added.
2331 * this is a rare case (see beginning comment in this function), so we copy them to the
2332 * front and hope the backend handles this case.
2333 */
2334 if (ecb_expect_false (fdchangecnt != changecnt))
2335 memmove (fdchanges, fdchanges + changecnt, (fdchangecnt - changecnt) * sizeof (*fdchanges));
2336
2250 fdchangecnt = 0; 2337 fdchangecnt -= changecnt;
2251} 2338}
2252 2339
2253/* something about the given fd changed */ 2340/* something about the given fd changed */
2254inline_size 2341inline_size
2255void 2342void
2384 2471
2385 /* find minimum child */ 2472 /* find minimum child */
2386 if (ecb_expect_true (pos + DHEAP - 1 < E)) 2473 if (ecb_expect_true (pos + DHEAP - 1 < E))
2387 { 2474 {
2388 /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos)); 2475 /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
2389 if ( ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos)); 2476 if ( minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));
2390 if ( ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos)); 2477 if ( minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));
2391 if ( ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos)); 2478 if ( minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));
2392 } 2479 }
2393 else if (pos < E) 2480 else if (pos < E)
2394 { 2481 {
2395 /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos)); 2482 /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
2396 if (pos + 1 < E && ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos)); 2483 if (pos + 1 < E && minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));
2397 if (pos + 2 < E && ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos)); 2484 if (pos + 2 < E && minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));
2398 if (pos + 3 < E && ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos)); 2485 if (pos + 3 < E && minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));
2399 } 2486 }
2400 else 2487 else
2401 break; 2488 break;
2402 2489
2403 if (ANHE_at (he) <= minat) 2490 if (ANHE_at (he) <= minat)
2411 2498
2412 heap [k] = he; 2499 heap [k] = he;
2413 ev_active (ANHE_w (he)) = k; 2500 ev_active (ANHE_w (he)) = k;
2414} 2501}
2415 2502
2416#else /* 4HEAP */ 2503#else /* not 4HEAP */
2417 2504
2418#define HEAP0 1 2505#define HEAP0 1
2419#define HPARENT(k) ((k) >> 1) 2506#define HPARENT(k) ((k) >> 1)
2420#define UPHEAP_DONE(p,k) (!(p)) 2507#define UPHEAP_DONE(p,k) (!(p))
2421 2508
2807 2894
2808#endif 2895#endif
2809 2896
2810/*****************************************************************************/ 2897/*****************************************************************************/
2811 2898
2899#if EV_USE_TIMERFD
2900
2901static void periodics_reschedule (EV_P);
2902
2903static void
2904timerfdcb (EV_P_ ev_io *iow, int revents)
2905{
2906 struct itimerspec its = { 0 };
2907
2908 /* since we can't easily come zup with a (portable) maximum value of time_t,
2909 * we wake up once per month, which hopefully is rare enough to not
2910 * be a problem. */
2911 its.it_value.tv_sec = ev_rt_now + 86400 * 30;
2912 timerfd_settime (timerfd, TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET, &its, 0);
2913
2914 ev_rt_now = ev_time ();
2915 /* periodics_reschedule only needs ev_rt_now */
2916 /* but maybe in the future we want the full treatment. */
2917 /*
2918 now_floor = EV_TS_CONST (0.);
2919 time_update (EV_A_ EV_TSTAMP_HUGE);
2920 */
2921 periodics_reschedule (EV_A);
2922}
2923
2924ecb_noinline ecb_cold
2925static void
2926evtimerfd_init (EV_P)
2927{
2928 if (!ev_is_active (&timerfd_w))
2929 {
2930 timerfd = timerfd_create (CLOCK_REALTIME, TFD_NONBLOCK | TFD_CLOEXEC);
2931
2932 if (timerfd >= 0)
2933 {
2934 fd_intern (timerfd); /* just to be sure */
2935
2936 ev_io_init (&timerfd_w, timerfdcb, timerfd, EV_READ);
2937 ev_set_priority (&timerfd_w, EV_MINPRI);
2938 ev_io_start (EV_A_ &timerfd_w);
2939 ev_unref (EV_A); /* watcher should not keep loop alive */
2940
2941 /* (re-) arm timer */
2942 timerfdcb (EV_A_ 0, 0);
2943 }
2944 }
2945}
2946
2947#endif
2948
2949/*****************************************************************************/
2950
2812#if EV_USE_IOCP 2951#if EV_USE_IOCP
2813# include "ev_iocp.c" 2952# include "ev_iocp.c"
2814#endif 2953#endif
2815#if EV_USE_PORT 2954#if EV_USE_PORT
2816# include "ev_port.c" 2955# include "ev_port.c"
2862unsigned int 3001unsigned int
2863ev_supported_backends (void) EV_NOEXCEPT 3002ev_supported_backends (void) EV_NOEXCEPT
2864{ 3003{
2865 unsigned int flags = 0; 3004 unsigned int flags = 0;
2866 3005
2867 if (EV_USE_PORT ) flags |= EVBACKEND_PORT; 3006 if (EV_USE_PORT ) flags |= EVBACKEND_PORT;
2868 if (EV_USE_KQUEUE ) flags |= EVBACKEND_KQUEUE; 3007 if (EV_USE_KQUEUE ) flags |= EVBACKEND_KQUEUE;
2869 if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL; 3008 if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL;
2870 if (EV_USE_LINUXAIO) flags |= EVBACKEND_LINUXAIO; 3009 if (EV_USE_LINUXAIO ) flags |= EVBACKEND_LINUXAIO;
2871 if (EV_USE_IOURING ) flags |= EVBACKEND_IOURING; 3010 if (EV_USE_IOURING && ev_linux_version () >= 0x050601) flags |= EVBACKEND_IOURING; /* 5.6.1+ */
2872 if (EV_USE_POLL ) flags |= EVBACKEND_POLL; 3011 if (EV_USE_POLL ) flags |= EVBACKEND_POLL;
2873 if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT; 3012 if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT;
2874 3013
2875 return flags; 3014 return flags;
2876} 3015}
2877 3016
2878ecb_cold 3017ecb_cold
2879unsigned int 3018unsigned int
2909 3048
2910ecb_cold 3049ecb_cold
2911unsigned int 3050unsigned int
2912ev_embeddable_backends (void) EV_NOEXCEPT 3051ev_embeddable_backends (void) EV_NOEXCEPT
2913{ 3052{
2914 int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT; 3053 int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT | EVBACKEND_IOURING;
2915 3054
2916 /* epoll embeddability broken on all linux versions up to at least 2.6.23 */ 3055 /* epoll embeddability broken on all linux versions up to at least 2.6.23 */
2917 if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */ 3056 if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */
2918 flags &= ~EVBACKEND_EPOLL; 3057 flags &= ~EVBACKEND_EPOLL;
3058
3059 /* EVBACKEND_LINUXAIO is theoretically embeddable, but suffers from a performance overhead */
2919 3060
2920 return flags; 3061 return flags;
2921} 3062}
2922 3063
2923unsigned int 3064unsigned int
3041 fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2; 3182 fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2;
3042#endif 3183#endif
3043#if EV_USE_SIGNALFD 3184#if EV_USE_SIGNALFD
3044 sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1; 3185 sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1;
3045#endif 3186#endif
3187#if EV_USE_TIMERFD
3188 timerfd = flags & EVFLAG_NOTIMERFD ? -1 : -2;
3189#endif
3046 3190
3047 if (!(flags & EVBACKEND_MASK)) 3191 if (!(flags & EVBACKEND_MASK))
3048 flags |= ev_recommended_backends (); 3192 flags |= ev_recommended_backends ();
3049 3193
3050#if EV_USE_IOCP 3194#if EV_USE_IOCP
3121 } 3265 }
3122 3266
3123#if EV_USE_SIGNALFD 3267#if EV_USE_SIGNALFD
3124 if (ev_is_active (&sigfd_w)) 3268 if (ev_is_active (&sigfd_w))
3125 close (sigfd); 3269 close (sigfd);
3270#endif
3271
3272#if EV_USE_TIMERFD
3273 if (ev_is_active (&timerfd_w))
3274 close (timerfd);
3126#endif 3275#endif
3127 3276
3128#if EV_USE_INOTIFY 3277#if EV_USE_INOTIFY
3129 if (fs_fd >= 0) 3278 if (fs_fd >= 0)
3130 close (fs_fd); 3279 close (fs_fd);
3223#endif 3372#endif
3224#if EV_USE_INOTIFY 3373#if EV_USE_INOTIFY
3225 infy_fork (EV_A); 3374 infy_fork (EV_A);
3226#endif 3375#endif
3227 3376
3377 if (postfork != 2)
3378 {
3379 #if EV_USE_SIGNALFD
3380 /* surprisingly, nothing needs to be done for signalfd, accoridng to docs, it does the right thing on fork */
3381 #endif
3382
3383 #if EV_USE_TIMERFD
3384 if (ev_is_active (&timerfd_w))
3385 {
3386 ev_ref (EV_A);
3387 ev_io_stop (EV_A_ &timerfd_w);
3388
3389 close (timerfd);
3390 timerfd = -2;
3391
3392 evtimerfd_init (EV_A);
3393 /* reschedule periodics, in case we missed something */
3394 ev_feed_event (EV_A_ &timerfd_w, EV_CUSTOM);
3395 }
3396 #endif
3397
3228#if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE 3398 #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
3229 if (ev_is_active (&pipe_w) && postfork != 2) 3399 if (ev_is_active (&pipe_w))
3230 { 3400 {
3231 /* pipe_write_wanted must be false now, so modifying fd vars should be safe */ 3401 /* pipe_write_wanted must be false now, so modifying fd vars should be safe */
3232 3402
3233 ev_ref (EV_A); 3403 ev_ref (EV_A);
3234 ev_io_stop (EV_A_ &pipe_w); 3404 ev_io_stop (EV_A_ &pipe_w);
3235 3405
3236 if (evpipe [0] >= 0) 3406 if (evpipe [0] >= 0)
3237 EV_WIN32_CLOSE_FD (evpipe [0]); 3407 EV_WIN32_CLOSE_FD (evpipe [0]);
3238 3408
3239 evpipe_init (EV_A); 3409 evpipe_init (EV_A);
3240 /* iterate over everything, in case we missed something before */ 3410 /* iterate over everything, in case we missed something before */
3241 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM); 3411 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
3412 }
3413 #endif
3242 } 3414 }
3243#endif
3244 3415
3245 postfork = 0; 3416 postfork = 0;
3246} 3417}
3247 3418
3248#if EV_MULTIPLICITY 3419#if EV_MULTIPLICITY
3518 { 3689 {
3519 ev_at (w) += w->repeat; 3690 ev_at (w) += w->repeat;
3520 if (ev_at (w) < mn_now) 3691 if (ev_at (w) < mn_now)
3521 ev_at (w) = mn_now; 3692 ev_at (w) = mn_now;
3522 3693
3523 assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > 0.)); 3694 assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > EV_TS_CONST (0.)));
3524 3695
3525 ANHE_at_cache (timers [HEAP0]); 3696 ANHE_at_cache (timers [HEAP0]);
3526 downheap (timers, timercnt, HEAP0); 3697 downheap (timers, timercnt, HEAP0);
3527 } 3698 }
3528 else 3699 else
3659 3830
3660 mn_now = get_clock (); 3831 mn_now = get_clock ();
3661 3832
3662 /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */ 3833 /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */
3663 /* interpolate in the meantime */ 3834 /* interpolate in the meantime */
3664 if (ecb_expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5)) 3835 if (ecb_expect_true (mn_now - now_floor < EV_TS_CONST (MIN_TIMEJUMP * .5)))
3665 { 3836 {
3666 ev_rt_now = rtmn_diff + mn_now; 3837 ev_rt_now = rtmn_diff + mn_now;
3667 return; 3838 return;
3668 } 3839 }
3669 3840
3683 ev_tstamp diff; 3854 ev_tstamp diff;
3684 rtmn_diff = ev_rt_now - mn_now; 3855 rtmn_diff = ev_rt_now - mn_now;
3685 3856
3686 diff = odiff - rtmn_diff; 3857 diff = odiff - rtmn_diff;
3687 3858
3688 if (ecb_expect_true ((diff < 0. ? -diff : diff) < MIN_TIMEJUMP)) 3859 if (ecb_expect_true ((diff < EV_TS_CONST (0.) ? -diff : diff) < EV_TS_CONST (MIN_TIMEJUMP)))
3689 return; /* all is well */ 3860 return; /* all is well */
3690 3861
3691 ev_rt_now = ev_time (); 3862 ev_rt_now = ev_time ();
3692 mn_now = get_clock (); 3863 mn_now = get_clock ();
3693 now_floor = mn_now; 3864 now_floor = mn_now;
3702 else 3873 else
3703#endif 3874#endif
3704 { 3875 {
3705 ev_rt_now = ev_time (); 3876 ev_rt_now = ev_time ();
3706 3877
3707 if (ecb_expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + MIN_TIMEJUMP)) 3878 if (ecb_expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + EV_TS_CONST (MIN_TIMEJUMP)))
3708 { 3879 {
3709 /* adjust timers. this is easy, as the offset is the same for all of them */ 3880 /* adjust timers. this is easy, as the offset is the same for all of them */
3710 timers_reschedule (EV_A_ ev_rt_now - mn_now); 3881 timers_reschedule (EV_A_ ev_rt_now - mn_now);
3711#if EV_PERIODIC_ENABLE 3882#if EV_PERIODIC_ENABLE
3712 periodics_reschedule (EV_A); 3883 periodics_reschedule (EV_A);
3781 3952
3782 /* remember old timestamp for io_blocktime calculation */ 3953 /* remember old timestamp for io_blocktime calculation */
3783 ev_tstamp prev_mn_now = mn_now; 3954 ev_tstamp prev_mn_now = mn_now;
3784 3955
3785 /* update time to cancel out callback processing overhead */ 3956 /* update time to cancel out callback processing overhead */
3786 time_update (EV_A_ 1e100); 3957 time_update (EV_A_ EV_TS_CONST (EV_TSTAMP_HUGE));
3787 3958
3788 /* from now on, we want a pipe-wake-up */ 3959 /* from now on, we want a pipe-wake-up */
3789 pipe_write_wanted = 1; 3960 pipe_write_wanted = 1;
3790 3961
3791 ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */ 3962 ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */
3792 3963
3793 if (ecb_expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped))) 3964 if (ecb_expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped)))
3794 { 3965 {
3795 waittime = MAX_BLOCKTIME; 3966 waittime = EV_TS_CONST (MAX_BLOCKTIME);
3796 3967
3797 if (timercnt) 3968 if (timercnt)
3798 { 3969 {
3799 ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now; 3970 ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now;
3800 if (waittime > to) waittime = to; 3971 if (waittime > to) waittime = to;
3810 3981
3811 /* don't let timeouts decrease the waittime below timeout_blocktime */ 3982 /* don't let timeouts decrease the waittime below timeout_blocktime */
3812 if (ecb_expect_false (waittime < timeout_blocktime)) 3983 if (ecb_expect_false (waittime < timeout_blocktime))
3813 waittime = timeout_blocktime; 3984 waittime = timeout_blocktime;
3814 3985
3815 /* at this point, we NEED to wait, so we have to ensure */ 3986 /* now there are two more special cases left, either we have
3816 /* to pass a minimum nonzero value to the backend */ 3987 * already-expired timers, so we should not sleep, or we have timers
3988 * that expire very soon, in which case we need to wait for a minimum
3989 * amount of time for some event loop backends.
3990 */
3817 if (ecb_expect_false (waittime < backend_mintime)) 3991 if (ecb_expect_false (waittime < backend_mintime))
3992 waittime = waittime <= EV_TS_CONST (0.)
3993 ? EV_TS_CONST (0.)
3818 waittime = backend_mintime; 3994 : backend_mintime;
3819 3995
3820 /* extra check because io_blocktime is commonly 0 */ 3996 /* extra check because io_blocktime is commonly 0 */
3821 if (ecb_expect_false (io_blocktime)) 3997 if (ecb_expect_false (io_blocktime))
3822 { 3998 {
3823 sleeptime = io_blocktime - (mn_now - prev_mn_now); 3999 sleeptime = io_blocktime - (mn_now - prev_mn_now);
3824 4000
3825 if (sleeptime > waittime - backend_mintime) 4001 if (sleeptime > waittime - backend_mintime)
3826 sleeptime = waittime - backend_mintime; 4002 sleeptime = waittime - backend_mintime;
3827 4003
3828 if (ecb_expect_true (sleeptime > 0.)) 4004 if (ecb_expect_true (sleeptime > EV_TS_CONST (0.)))
3829 { 4005 {
3830 ev_sleep (sleeptime); 4006 ev_sleep (sleeptime);
3831 waittime -= sleeptime; 4007 waittime -= sleeptime;
3832 } 4008 }
3833 } 4009 }
3907} 4083}
3908 4084
3909void 4085void
3910ev_now_update (EV_P) EV_NOEXCEPT 4086ev_now_update (EV_P) EV_NOEXCEPT
3911{ 4087{
3912 time_update (EV_A_ 1e100); 4088 time_update (EV_A_ EV_TSTAMP_HUGE);
3913} 4089}
3914 4090
3915void 4091void
3916ev_suspend (EV_P) EV_NOEXCEPT 4092ev_suspend (EV_P) EV_NOEXCEPT
3917{ 4093{
4148} 4324}
4149 4325
4150ev_tstamp 4326ev_tstamp
4151ev_timer_remaining (EV_P_ ev_timer *w) EV_NOEXCEPT 4327ev_timer_remaining (EV_P_ ev_timer *w) EV_NOEXCEPT
4152{ 4328{
4153 return ev_at (w) - (ev_is_active (w) ? mn_now : 0.); 4329 return ev_at (w) - (ev_is_active (w) ? mn_now : EV_TS_CONST (0.));
4154} 4330}
4155 4331
4156#if EV_PERIODIC_ENABLE 4332#if EV_PERIODIC_ENABLE
4157ecb_noinline 4333ecb_noinline
4158void 4334void
4159ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT 4335ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT
4160{ 4336{
4161 if (ecb_expect_false (ev_is_active (w))) 4337 if (ecb_expect_false (ev_is_active (w)))
4162 return; 4338 return;
4339
4340#if EV_USE_TIMERFD
4341 if (timerfd == -2)
4342 evtimerfd_init (EV_A);
4343#endif
4163 4344
4164 if (w->reschedule_cb) 4345 if (w->reschedule_cb)
4165 ev_at (w) = w->reschedule_cb (w, ev_rt_now); 4346 ev_at (w) = w->reschedule_cb (w, ev_rt_now);
4166 else if (w->interval) 4347 else if (w->interval)
4167 { 4348 {
4909 ev_run (EV_A_ EVRUN_NOWAIT); 5090 ev_run (EV_A_ EVRUN_NOWAIT);
4910 } 5091 }
4911 } 5092 }
4912} 5093}
4913 5094
5095#if EV_FORK_ENABLE
4914static void 5096static void
4915embed_fork_cb (EV_P_ ev_fork *fork_w, int revents) 5097embed_fork_cb (EV_P_ ev_fork *fork_w, int revents)
4916{ 5098{
4917 ev_embed *w = (ev_embed *)(((char *)fork_w) - offsetof (ev_embed, fork)); 5099 ev_embed *w = (ev_embed *)(((char *)fork_w) - offsetof (ev_embed, fork));
4918 5100
4925 ev_run (EV_A_ EVRUN_NOWAIT); 5107 ev_run (EV_A_ EVRUN_NOWAIT);
4926 } 5108 }
4927 5109
4928 ev_embed_start (EV_A_ w); 5110 ev_embed_start (EV_A_ w);
4929} 5111}
5112#endif
4930 5113
4931#if 0 5114#if 0
4932static void 5115static void
4933embed_idle_cb (EV_P_ ev_idle *idle, int revents) 5116embed_idle_cb (EV_P_ ev_idle *idle, int revents)
4934{ 5117{
4955 5138
4956 ev_prepare_init (&w->prepare, embed_prepare_cb); 5139 ev_prepare_init (&w->prepare, embed_prepare_cb);
4957 ev_set_priority (&w->prepare, EV_MINPRI); 5140 ev_set_priority (&w->prepare, EV_MINPRI);
4958 ev_prepare_start (EV_A_ &w->prepare); 5141 ev_prepare_start (EV_A_ &w->prepare);
4959 5142
5143#if EV_FORK_ENABLE
4960 ev_fork_init (&w->fork, embed_fork_cb); 5144 ev_fork_init (&w->fork, embed_fork_cb);
4961 ev_fork_start (EV_A_ &w->fork); 5145 ev_fork_start (EV_A_ &w->fork);
5146#endif
4962 5147
4963 /*ev_idle_init (&w->idle, e,bed_idle_cb);*/ 5148 /*ev_idle_init (&w->idle, e,bed_idle_cb);*/
4964 5149
4965 ev_start (EV_A_ (W)w, 1); 5150 ev_start (EV_A_ (W)w, 1);
4966 5151
4976 5161
4977 EV_FREQUENT_CHECK; 5162 EV_FREQUENT_CHECK;
4978 5163
4979 ev_io_stop (EV_A_ &w->io); 5164 ev_io_stop (EV_A_ &w->io);
4980 ev_prepare_stop (EV_A_ &w->prepare); 5165 ev_prepare_stop (EV_A_ &w->prepare);
5166#if EV_FORK_ENABLE
4981 ev_fork_stop (EV_A_ &w->fork); 5167 ev_fork_stop (EV_A_ &w->fork);
5168#endif
4982 5169
4983 ev_stop (EV_A_ (W)w); 5170 ev_stop (EV_A_ (W)w);
4984 5171
4985 EV_FREQUENT_CHECK; 5172 EV_FREQUENT_CHECK;
4986} 5173}

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines