ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev.c
(Generate patch)

Comparing libev/ev.c (file contents):
Revision 1.501 by root, Mon Jul 1 21:47:42 2019 UTC vs.
Revision 1.515 by root, Fri Dec 20 20:51:46 2019 UTC

124# else 124# else
125# undef EV_USE_LINUXAIO 125# undef EV_USE_LINUXAIO
126# define EV_USE_LINUXAIO 0 126# define EV_USE_LINUXAIO 0
127# endif 127# endif
128 128
129# if HAVE_LINUX_FS_H && HAVE_SYS_TIMERFD_H && HAVE_KERNEL_RWF_T
130# ifndef EV_USE_IOURING
131# define EV_USE_IOURING EV_FEATURE_BACKENDS
132# endif
133# else
134# undef EV_USE_IOURING
135# define EV_USE_IOURING 0
136# endif
137
129# if HAVE_KQUEUE && HAVE_SYS_EVENT_H 138# if HAVE_KQUEUE && HAVE_SYS_EVENT_H
130# ifndef EV_USE_KQUEUE 139# ifndef EV_USE_KQUEUE
131# define EV_USE_KQUEUE EV_FEATURE_BACKENDS 140# define EV_USE_KQUEUE EV_FEATURE_BACKENDS
132# endif 141# endif
133# else 142# else
168# endif 177# endif
169# else 178# else
170# undef EV_USE_EVENTFD 179# undef EV_USE_EVENTFD
171# define EV_USE_EVENTFD 0 180# define EV_USE_EVENTFD 0
172# endif 181# endif
173 182
183# if HAVE_SYS_TIMERFD_H
184# ifndef EV_USE_TIMERFD
185# define EV_USE_TIMERFD EV_FEATURE_OS
186# endif
187# else
188# undef EV_USE_TIMERFD
189# define EV_USE_TIMERFD 0
190# endif
191
174#endif 192#endif
175 193
176/* OS X, in its infinite idiocy, actually HARDCODES 194/* OS X, in its infinite idiocy, actually HARDCODES
177 * a limit of 1024 into their select. Where people have brains, 195 * a limit of 1024 into their select. Where people have brains,
178 * OS X engineers apparently have a vacuum. Or maybe they were 196 * OS X engineers apparently have a vacuum. Or maybe they were
333# define EV_USE_LINUXAIO 0 351# define EV_USE_LINUXAIO 0
334# endif 352# endif
335#endif 353#endif
336 354
337#ifndef EV_USE_IOURING 355#ifndef EV_USE_IOURING
338# if __linux 356# if __linux /* later checks might disable again */
339# define EV_USE_IOURING 0 357# define EV_USE_IOURING 1
340# else 358# else
341# define EV_USE_IOURING 0 359# define EV_USE_IOURING 0
342# endif 360# endif
343#endif 361#endif
344 362
369#ifndef EV_USE_SIGNALFD 387#ifndef EV_USE_SIGNALFD
370# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7)) 388# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7))
371# define EV_USE_SIGNALFD EV_FEATURE_OS 389# define EV_USE_SIGNALFD EV_FEATURE_OS
372# else 390# else
373# define EV_USE_SIGNALFD 0 391# define EV_USE_SIGNALFD 0
392# endif
393#endif
394
395#ifndef EV_USE_TIMERFD
396# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 8))
397# define EV_USE_TIMERFD EV_FEATURE_OS
398# else
399# define EV_USE_TIMERFD 0
374# endif 400# endif
375#endif 401#endif
376 402
377#if 0 /* debugging */ 403#if 0 /* debugging */
378# define EV_VERIFY 3 404# define EV_VERIFY 3
438#if !EV_STAT_ENABLE 464#if !EV_STAT_ENABLE
439# undef EV_USE_INOTIFY 465# undef EV_USE_INOTIFY
440# define EV_USE_INOTIFY 0 466# define EV_USE_INOTIFY 0
441#endif 467#endif
442 468
469#if __linux && EV_USE_IOURING
470# include <linux/version.h>
471# if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)
472# undef EV_USE_IOURING
473# define EV_USE_IOURING 0
474# endif
475#endif
476
443#if !EV_USE_NANOSLEEP 477#if !EV_USE_NANOSLEEP
444/* hp-ux has it in sys/time.h, which we unconditionally include above */ 478/* hp-ux has it in sys/time.h, which we unconditionally include above */
445# if !defined _WIN32 && !defined __hpux 479# if !defined _WIN32 && !defined __hpux
446# include <sys/select.h> 480# include <sys/select.h>
447# endif 481# endif
448#endif 482#endif
449 483
450#if EV_USE_LINUXAIO 484#if EV_USE_LINUXAIO
451# include <sys/syscall.h> 485# include <sys/syscall.h>
452# if !SYS_io_getevents || !EV_USE_EPOLL /* ev_linxaio uses ev_poll.c:ev_epoll_create */ 486# if SYS_io_getevents && EV_USE_EPOLL /* linuxaio backend requires epoll backend */
487# define EV_NEED_SYSCALL 1
488# else
453# undef EV_USE_LINUXAIO 489# undef EV_USE_LINUXAIO
454# define EV_USE_LINUXAIO 0 490# define EV_USE_LINUXAIO 0
455# else
456# define EV_NEED_SYSCALL 1
457# endif 491# endif
458#endif 492#endif
459 493
460#if EV_USE_IOURING 494#if EV_USE_IOURING
461# include <sys/syscall.h> 495# include <sys/syscall.h>
462# if !__alpha && !SYS_io_uring_setup 496# if !SYS_io_uring_setup && __linux && !__alpha
463# define SYS_io_uring_setup 425 497# define SYS_io_uring_setup 425
464# define SYS_io_uring_enter 426 498# define SYS_io_uring_enter 426
465# define SYS_io_uring_wregister 427 499# define SYS_io_uring_wregister 427
466# endif 500# endif
467# if SYS_io_uring_setup 501# if SYS_io_uring_setup && EV_USE_EPOLL /* iouring backend requires epoll backend */
468# define EV_NEED_SYSCALL 1 502# define EV_NEED_SYSCALL 1
469# else 503# else
470# undef EV_USE_IOURING 504# undef EV_USE_IOURING
471# define EV_USE_IOURING 0 505# define EV_USE_IOURING 0
472# endif 506# endif
481# define EV_USE_INOTIFY 0 515# define EV_USE_INOTIFY 0
482# endif 516# endif
483#endif 517#endif
484 518
485#if EV_USE_EVENTFD 519#if EV_USE_EVENTFD
486/* our minimum requirement is glibc 2.7 which has the stub, but not the header */ 520/* our minimum requirement is glibc 2.7 which has the stub, but not the full header */
487# include <stdint.h> 521# include <stdint.h>
488# ifndef EFD_NONBLOCK 522# ifndef EFD_NONBLOCK
489# define EFD_NONBLOCK O_NONBLOCK 523# define EFD_NONBLOCK O_NONBLOCK
490# endif 524# endif
491# ifndef EFD_CLOEXEC 525# ifndef EFD_CLOEXEC
497# endif 531# endif
498EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags); 532EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags);
499#endif 533#endif
500 534
501#if EV_USE_SIGNALFD 535#if EV_USE_SIGNALFD
502/* our minimum requirement is glibc 2.7 which has the stub, but not the header */ 536/* our minimum requirement is glibc 2.7 which has the stub, but not the full header */
503# include <stdint.h> 537# include <stdint.h>
504# ifndef SFD_NONBLOCK 538# ifndef SFD_NONBLOCK
505# define SFD_NONBLOCK O_NONBLOCK 539# define SFD_NONBLOCK O_NONBLOCK
506# endif 540# endif
507# ifndef SFD_CLOEXEC 541# ifndef SFD_CLOEXEC
509# define SFD_CLOEXEC O_CLOEXEC 543# define SFD_CLOEXEC O_CLOEXEC
510# else 544# else
511# define SFD_CLOEXEC 02000000 545# define SFD_CLOEXEC 02000000
512# endif 546# endif
513# endif 547# endif
514EV_CPP (extern "C") int signalfd (int fd, const sigset_t *mask, int flags); 548EV_CPP (extern "C") int (signalfd) (int fd, const sigset_t *mask, int flags);
515 549
516struct signalfd_siginfo 550struct signalfd_siginfo
517{ 551{
518 uint32_t ssi_signo; 552 uint32_t ssi_signo;
519 char pad[128 - sizeof (uint32_t)]; 553 char pad[128 - sizeof (uint32_t)];
520}; 554};
521#endif 555#endif
522 556
523/*****************************************************************************/ 557/* for timerfd, libev core requires TFD_TIMER_CANCEL_ON_SET &c */
524 558#if EV_USE_TIMERFD
525#if EV_NEED_SYSCALL 559# include <sys/timerfd.h>
526 560/* timerfd is only used for periodics */
527#include <sys/syscall.h> 561# if !(defined (TFD_TIMER_CANCEL_ON_SET) && defined (TFD_CLOEXEC) && defined (TFD_NONBLOCK)) || !EV_PERIODIC_ENABLE
528 562# undef EV_USE_TIMERFD
529/* 563# define EV_USE_TIMERFD 0
530 * define some syscall wrappers for common architectures
531 * this is mostly for nice looks during debugging, not performance.
532 * our syscalls return < 0, not == -1, on error. which is good
533 * enough for linux aio.
534 * TODO: arm is also common nowadays, maybe even mips and x86
535 * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove...
536 */
537#if __GNUC__ && __linux && ECB_AMD64 && !defined __OPTIMIZE_SIZE__
538 /* the costly errno access probably kills this for size optimisation */
539
540 #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5) \
541 ({ \
542 long res; \
543 register unsigned long r5 __asm__ ("r8" ); \
544 register unsigned long r4 __asm__ ("r10"); \
545 register unsigned long r3 __asm__ ("rdx"); \
546 register unsigned long r2 __asm__ ("rsi"); \
547 register unsigned long r1 __asm__ ("rdi"); \
548 if (narg >= 5) r5 = (unsigned long)(arg5); \
549 if (narg >= 4) r4 = (unsigned long)(arg4); \
550 if (narg >= 3) r3 = (unsigned long)(arg3); \
551 if (narg >= 2) r2 = (unsigned long)(arg2); \
552 if (narg >= 1) r1 = (unsigned long)(arg1); \
553 __asm__ __volatile__ ( \
554 "syscall\n\t" \
555 : "=a" (res) \
556 : "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \
557 : "cc", "r11", "cx", "memory"); \
558 errno = -res; \
559 res; \
560 })
561
562#endif 564# endif
563
564#ifdef ev_syscall
565 #define ev_syscall0(nr) ev_syscall (nr, 0, 0, 0, 0, 0, 0
566 #define ev_syscall1(nr,arg1) ev_syscall (nr, 1, arg1, 0, 0, 0, 0)
567 #define ev_syscall2(nr,arg1,arg2) ev_syscall (nr, 2, arg1, arg2, 0, 0, 0)
568 #define ev_syscall3(nr,arg1,arg2,arg3) ev_syscall (nr, 3, arg1, arg2, arg3, 0, 0)
569 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) ev_syscall (nr, 3, arg1, arg2, arg3, arg4, 0)
570 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5)
571#else
572 #define ev_syscall0(nr) syscall (nr)
573 #define ev_syscall1(nr,arg1) syscall (nr, arg1)
574 #define ev_syscall2(nr,arg1,arg2) syscall (nr, arg1, arg2)
575 #define ev_syscall3(nr,arg1,arg2,arg3) syscall (nr, arg1, arg2, arg3)
576 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) syscall (nr, arg1, arg2, arg3, arg4)
577 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) syscall (nr, arg1, arg2, arg3, arg4, arg5)
578#endif
579
580#endif 565#endif
581 566
582/*****************************************************************************/ 567/*****************************************************************************/
583 568
584#if EV_VERIFY >= 3 569#if EV_VERIFY >= 3
592 * This value is good at least till the year 4000. 577 * This value is good at least till the year 4000.
593 */ 578 */
594#define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */ 579#define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */
595/*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */ 580/*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */
596 581
597#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */ 582#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
598#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */ 583#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
599 584
585/* find a portable timestamp that is "always" in the future but fits into time_t.
586 * this is quite hard, and we are mostly guessing - we handle 32 bit signed/unsigned time_t,
587 * and sizes larger than 32 bit, and maybe the unlikely floating point time_t */
588#define EV_TSTAMP_HUGE \
589 (sizeof (time_t) >= 8 ? 10000000000000. \
590 : 0 < (time_t)4294967295 ? 4294967295. \
591 : 2147483647.) \
592
593#ifndef EV_TS_CONST
594# define EV_TS_CONST(nv) nv
595# define EV_TS_TO_MSEC(a) a * 1e3 + 0.9999
596# define EV_TS_FROM_USEC(us) us * 1e-6
600#define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0) 597# define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0)
601#define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0) 598# define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0)
599# define EV_TV_GET(tv) ((tv).tv_sec + (tv).tv_usec * 1e-6)
600# define EV_TS_GET(ts) ((ts).tv_sec + (ts).tv_nsec * 1e-9)
601#endif
602 602
603/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */ 603/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */
604/* ECB.H BEGIN */ 604/* ECB.H BEGIN */
605/* 605/*
606 * libecb - http://software.schmorp.de/pkg/libecb 606 * libecb - http://software.schmorp.de/pkg/libecb
1643# define inline_speed ecb_inline 1643# define inline_speed ecb_inline
1644#else 1644#else
1645# define inline_speed ecb_noinline static 1645# define inline_speed ecb_noinline static
1646#endif 1646#endif
1647 1647
1648/*****************************************************************************/
1649/* raw syscall wrappers */
1650
1651#if EV_NEED_SYSCALL
1652
1653#include <sys/syscall.h>
1654
1655/*
1656 * define some syscall wrappers for common architectures
1657 * this is mostly for nice looks during debugging, not performance.
1658 * our syscalls return < 0, not == -1, on error. which is good
1659 * enough for linux aio.
1660 * TODO: arm is also common nowadays, maybe even mips and x86
1661 * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove...
1662 */
1663#if __GNUC__ && __linux && ECB_AMD64 && !defined __OPTIMIZE_SIZE__
1664 /* the costly errno access probably kills this for size optimisation */
1665
1666 #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5,arg6) \
1667 ({ \
1668 long res; \
1669 register unsigned long r6 __asm__ ("r9" ); \
1670 register unsigned long r5 __asm__ ("r8" ); \
1671 register unsigned long r4 __asm__ ("r10"); \
1672 register unsigned long r3 __asm__ ("rdx"); \
1673 register unsigned long r2 __asm__ ("rsi"); \
1674 register unsigned long r1 __asm__ ("rdi"); \
1675 if (narg >= 6) r6 = (unsigned long)(arg6); \
1676 if (narg >= 5) r5 = (unsigned long)(arg5); \
1677 if (narg >= 4) r4 = (unsigned long)(arg4); \
1678 if (narg >= 3) r3 = (unsigned long)(arg3); \
1679 if (narg >= 2) r2 = (unsigned long)(arg2); \
1680 if (narg >= 1) r1 = (unsigned long)(arg1); \
1681 __asm__ __volatile__ ( \
1682 "syscall\n\t" \
1683 : "=a" (res) \
1684 : "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \
1685 : "cc", "r11", "cx", "memory"); \
1686 errno = -res; \
1687 res; \
1688 })
1689
1690#endif
1691
1692#ifdef ev_syscall
1693 #define ev_syscall0(nr) ev_syscall (nr, 0, 0, 0, 0, 0, 0, 0)
1694 #define ev_syscall1(nr,arg1) ev_syscall (nr, 1, arg1, 0, 0, 0, 0, 0)
1695 #define ev_syscall2(nr,arg1,arg2) ev_syscall (nr, 2, arg1, arg2, 0, 0, 0, 0)
1696 #define ev_syscall3(nr,arg1,arg2,arg3) ev_syscall (nr, 3, arg1, arg2, arg3, 0, 0, 0)
1697 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) ev_syscall (nr, 3, arg1, arg2, arg3, arg4, 0, 0)
1698 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5, 0)
1699 #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) ev_syscall (nr, 6, arg1, arg2, arg3, arg4, arg5,arg6)
1700#else
1701 #define ev_syscall0(nr) syscall (nr)
1702 #define ev_syscall1(nr,arg1) syscall (nr, arg1)
1703 #define ev_syscall2(nr,arg1,arg2) syscall (nr, arg1, arg2)
1704 #define ev_syscall3(nr,arg1,arg2,arg3) syscall (nr, arg1, arg2, arg3)
1705 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) syscall (nr, arg1, arg2, arg3, arg4)
1706 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) syscall (nr, arg1, arg2, arg3, arg4, arg5)
1707 #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) syscall (nr, arg1, arg2, arg3, arg4, arg5,arg6)
1708#endif
1709
1710#endif
1711
1712/*****************************************************************************/
1713
1648#define NUMPRI (EV_MAXPRI - EV_MINPRI + 1) 1714#define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)
1649 1715
1650#if EV_MINPRI == EV_MAXPRI 1716#if EV_MINPRI == EV_MAXPRI
1651# define ABSPRI(w) (((W)w), 0) 1717# define ABSPRI(w) (((W)w), 0)
1652#else 1718#else
1711 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 10000000000000000000. : 1000000000.; 1777 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 10000000000000000000. : 1000000000.;
1712#else 1778#else
1713 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.; 1779 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.;
1714#endif 1780#endif
1715 1781
1782 /* special treatment for negative arguments */
1783 if (ecb_expect_false (v < 0.))
1784 {
1785 ev_tstamp f = -ev_floor (-v);
1786
1787 return f - (f == v ? 0 : 1);
1788 }
1789
1716 /* argument too large for an unsigned long? */ 1790 /* argument too large for an unsigned long? then reduce it */
1717 if (ecb_expect_false (v >= shift)) 1791 if (ecb_expect_false (v >= shift))
1718 { 1792 {
1719 ev_tstamp f; 1793 ev_tstamp f;
1720 1794
1721 if (v == v - 1.) 1795 if (v == v - 1.)
1722 return v; /* very large number */ 1796 return v; /* very large numbers are assumed to be integer */
1723 1797
1724 f = shift * ev_floor (v * (1. / shift)); 1798 f = shift * ev_floor (v * (1. / shift));
1725 return f + ev_floor (v - f); 1799 return f + ev_floor (v - f);
1726 }
1727
1728 /* special treatment for negative args? */
1729 if (ecb_expect_false (v < 0.))
1730 {
1731 ev_tstamp f = -ev_floor (-v);
1732
1733 return f - (f == v ? 0 : 1);
1734 } 1800 }
1735 1801
1736 /* fits into an unsigned long */ 1802 /* fits into an unsigned long */
1737 return (unsigned long)v; 1803 return (unsigned long)v;
1738} 1804}
1882{ 1948{
1883 WL head; 1949 WL head;
1884 unsigned char events; /* the events watched for */ 1950 unsigned char events; /* the events watched for */
1885 unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */ 1951 unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */
1886 unsigned char emask; /* some backends store the actual kernel mask in here */ 1952 unsigned char emask; /* some backends store the actual kernel mask in here */
1887 unsigned char unused; 1953 unsigned char eflags; /* flags field for use by backends */
1888#if EV_USE_EPOLL 1954#if EV_USE_EPOLL
1889 unsigned int egen; /* generation counter to counter epoll bugs */ 1955 unsigned int egen; /* generation counter to counter epoll bugs */
1890#endif 1956#endif
1891#if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP 1957#if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP
1892 SOCKET handle; 1958 SOCKET handle;
1946 static struct ev_loop default_loop_struct; 2012 static struct ev_loop default_loop_struct;
1947 EV_API_DECL struct ev_loop *ev_default_loop_ptr = 0; /* needs to be initialised to make it a definition despite extern */ 2013 EV_API_DECL struct ev_loop *ev_default_loop_ptr = 0; /* needs to be initialised to make it a definition despite extern */
1948 2014
1949#else 2015#else
1950 2016
1951 EV_API_DECL ev_tstamp ev_rt_now = 0; /* needs to be initialised to make it a definition despite extern */ 2017 EV_API_DECL ev_tstamp ev_rt_now = EV_TS_CONST (0.); /* needs to be initialised to make it a definition despite extern */
1952 #define VAR(name,decl) static decl; 2018 #define VAR(name,decl) static decl;
1953 #include "ev_vars.h" 2019 #include "ev_vars.h"
1954 #undef VAR 2020 #undef VAR
1955 2021
1956 static int ev_default_loop_ptr; 2022 static int ev_default_loop_ptr;
1978#if EV_USE_REALTIME 2044#if EV_USE_REALTIME
1979 if (ecb_expect_true (have_realtime)) 2045 if (ecb_expect_true (have_realtime))
1980 { 2046 {
1981 struct timespec ts; 2047 struct timespec ts;
1982 clock_gettime (CLOCK_REALTIME, &ts); 2048 clock_gettime (CLOCK_REALTIME, &ts);
1983 return ts.tv_sec + ts.tv_nsec * 1e-9; 2049 return EV_TS_GET (ts);
1984 } 2050 }
1985#endif 2051#endif
1986 2052
2053 {
1987 struct timeval tv; 2054 struct timeval tv;
1988 gettimeofday (&tv, 0); 2055 gettimeofday (&tv, 0);
1989 return tv.tv_sec + tv.tv_usec * 1e-6; 2056 return EV_TV_GET (tv);
2057 }
1990} 2058}
1991#endif 2059#endif
1992 2060
1993inline_size ev_tstamp 2061inline_size ev_tstamp
1994get_clock (void) 2062get_clock (void)
1996#if EV_USE_MONOTONIC 2064#if EV_USE_MONOTONIC
1997 if (ecb_expect_true (have_monotonic)) 2065 if (ecb_expect_true (have_monotonic))
1998 { 2066 {
1999 struct timespec ts; 2067 struct timespec ts;
2000 clock_gettime (CLOCK_MONOTONIC, &ts); 2068 clock_gettime (CLOCK_MONOTONIC, &ts);
2001 return ts.tv_sec + ts.tv_nsec * 1e-9; 2069 return EV_TS_GET (ts);
2002 } 2070 }
2003#endif 2071#endif
2004 2072
2005 return ev_time (); 2073 return ev_time ();
2006} 2074}
2014#endif 2082#endif
2015 2083
2016void 2084void
2017ev_sleep (ev_tstamp delay) EV_NOEXCEPT 2085ev_sleep (ev_tstamp delay) EV_NOEXCEPT
2018{ 2086{
2019 if (delay > 0.) 2087 if (delay > EV_TS_CONST (0.))
2020 { 2088 {
2021#if EV_USE_NANOSLEEP 2089#if EV_USE_NANOSLEEP
2022 struct timespec ts; 2090 struct timespec ts;
2023 2091
2024 EV_TS_SET (ts, delay); 2092 EV_TS_SET (ts, delay);
2025 nanosleep (&ts, 0); 2093 nanosleep (&ts, 0);
2026#elif defined _WIN32 2094#elif defined _WIN32
2027 /* maybe this should round up, as ms is very low resolution */ 2095 /* maybe this should round up, as ms is very low resolution */
2028 /* compared to select (µs) or nanosleep (ns) */ 2096 /* compared to select (µs) or nanosleep (ns) */
2029 Sleep ((unsigned long)(delay * 1e3)); 2097 Sleep ((unsigned long)(EV_TS_TO_MSEC (delay)));
2030#else 2098#else
2031 struct timeval tv; 2099 struct timeval tv;
2032 2100
2033 /* here we rely on sys/time.h + sys/types.h + unistd.h providing select */ 2101 /* here we rely on sys/time.h + sys/types.h + unistd.h providing select */
2034 /* something not guaranteed by newer posix versions, but guaranteed */ 2102 /* something not guaranteed by newer posix versions, but guaranteed */
2384 2452
2385 /* find minimum child */ 2453 /* find minimum child */
2386 if (ecb_expect_true (pos + DHEAP - 1 < E)) 2454 if (ecb_expect_true (pos + DHEAP - 1 < E))
2387 { 2455 {
2388 /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos)); 2456 /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
2389 if ( ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos)); 2457 if ( minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));
2390 if ( ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos)); 2458 if ( minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));
2391 if ( ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos)); 2459 if ( minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));
2392 } 2460 }
2393 else if (pos < E) 2461 else if (pos < E)
2394 { 2462 {
2395 /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos)); 2463 /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
2396 if (pos + 1 < E && ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos)); 2464 if (pos + 1 < E && minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));
2397 if (pos + 2 < E && ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos)); 2465 if (pos + 2 < E && minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));
2398 if (pos + 3 < E && ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos)); 2466 if (pos + 3 < E && minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));
2399 } 2467 }
2400 else 2468 else
2401 break; 2469 break;
2402 2470
2403 if (ANHE_at (he) <= minat) 2471 if (ANHE_at (he) <= minat)
2411 2479
2412 heap [k] = he; 2480 heap [k] = he;
2413 ev_active (ANHE_w (he)) = k; 2481 ev_active (ANHE_w (he)) = k;
2414} 2482}
2415 2483
2416#else /* 4HEAP */ 2484#else /* not 4HEAP */
2417 2485
2418#define HEAP0 1 2486#define HEAP0 1
2419#define HPARENT(k) ((k) >> 1) 2487#define HPARENT(k) ((k) >> 1)
2420#define UPHEAP_DONE(p,k) (!(p)) 2488#define UPHEAP_DONE(p,k) (!(p))
2421 2489
2807 2875
2808#endif 2876#endif
2809 2877
2810/*****************************************************************************/ 2878/*****************************************************************************/
2811 2879
2880#if EV_USE_TIMERFD
2881
2882static void periodics_reschedule (EV_P);
2883
2884static void
2885timerfdcb (EV_P_ ev_io *iow, int revents)
2886{
2887 struct itimerspec its = { 0 };
2888
2889 /* since we can't easily come zup with a (portable) maximum value of time_t,
2890 * we wake up once per month, which hopefully is rare enough to not
2891 * be a problem. */
2892 its.it_value.tv_sec = ev_rt_now + 86400 * 30;
2893 timerfd_settime (timerfd, TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET, &its, 0);
2894
2895 ev_rt_now = ev_time ();
2896 /* periodics_reschedule only needs ev_rt_now */
2897 /* but maybe in the future we want the full treatment. */
2898 /*
2899 now_floor = EV_TS_CONST (0.);
2900 time_update (EV_A_ EV_TSTAMP_HUGE);
2901 */
2902 periodics_reschedule (EV_A);
2903}
2904
2905ecb_noinline ecb_cold
2906static void
2907evtimerfd_init (EV_P)
2908{
2909 if (!ev_is_active (&timerfd_w))
2910 {
2911 timerfd = timerfd_create (CLOCK_REALTIME, TFD_NONBLOCK | TFD_CLOEXEC);
2912
2913 if (timerfd >= 0)
2914 {
2915 fd_intern (timerfd); /* just to be sure */
2916
2917 ev_io_init (&timerfd_w, timerfdcb, timerfd, EV_READ);
2918 ev_set_priority (&sigfd_w, EV_MINPRI);
2919 ev_io_start (EV_A_ &timerfd_w);
2920 ev_unref (EV_A); /* watcher should not keep loop alive */
2921
2922 /* (re-) arm timer */
2923 timerfdcb (EV_A_ 0, 0);
2924 }
2925 }
2926}
2927
2928#endif
2929
2930/*****************************************************************************/
2931
2812#if EV_USE_IOCP 2932#if EV_USE_IOCP
2813# include "ev_iocp.c" 2933# include "ev_iocp.c"
2814#endif 2934#endif
2815#if EV_USE_PORT 2935#if EV_USE_PORT
2816# include "ev_port.c" 2936# include "ev_port.c"
2915 3035
2916 /* epoll embeddability broken on all linux versions up to at least 2.6.23 */ 3036 /* epoll embeddability broken on all linux versions up to at least 2.6.23 */
2917 if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */ 3037 if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */
2918 flags &= ~EVBACKEND_EPOLL; 3038 flags &= ~EVBACKEND_EPOLL;
2919 3039
3040 /* EVBACKEND_LINUXAIO is theoretically embeddable, but suffers from a performance overhead */
3041
3042 /* EVBACKEND_IOURING is practically embeddable, but the current implementation is not
3043 * because our backend_fd is the epoll fd we need as fallback.
3044 * if the kernel ever is fixed, this might change...
3045 */
3046
2920 return flags; 3047 return flags;
2921} 3048}
2922 3049
2923unsigned int 3050unsigned int
2924ev_backend (EV_P) EV_NOEXCEPT 3051ev_backend (EV_P) EV_NOEXCEPT
3041 fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2; 3168 fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2;
3042#endif 3169#endif
3043#if EV_USE_SIGNALFD 3170#if EV_USE_SIGNALFD
3044 sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1; 3171 sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1;
3045#endif 3172#endif
3173#if EV_USE_TIMERFD
3174 timerfd = flags & EVFLAG_NOTIMERFD ? -1 : -2;
3175#endif
3046 3176
3047 if (!(flags & EVBACKEND_MASK)) 3177 if (!(flags & EVBACKEND_MASK))
3048 flags |= ev_recommended_backends (); 3178 flags |= ev_recommended_backends ();
3049 3179
3050#if EV_USE_IOCP 3180#if EV_USE_IOCP
3121 } 3251 }
3122 3252
3123#if EV_USE_SIGNALFD 3253#if EV_USE_SIGNALFD
3124 if (ev_is_active (&sigfd_w)) 3254 if (ev_is_active (&sigfd_w))
3125 close (sigfd); 3255 close (sigfd);
3256#endif
3257
3258#if EV_USE_TIMERFD
3259 if (ev_is_active (&timerfd_w))
3260 close (timerfd);
3126#endif 3261#endif
3127 3262
3128#if EV_USE_INOTIFY 3263#if EV_USE_INOTIFY
3129 if (fs_fd >= 0) 3264 if (fs_fd >= 0)
3130 close (fs_fd); 3265 close (fs_fd);
3223#endif 3358#endif
3224#if EV_USE_INOTIFY 3359#if EV_USE_INOTIFY
3225 infy_fork (EV_A); 3360 infy_fork (EV_A);
3226#endif 3361#endif
3227 3362
3363 if (postfork != 2)
3364 {
3365 #if EV_USE_SIGNALFD
3366 /* surprisingly, nothing needs to be done for signalfd, accoridng to docs, it does the right thing on fork */
3367 #endif
3368
3369 #if EV_USE_TIMERFD
3370 if (ev_is_active (&timerfd_w))
3371 {
3372 ev_ref (EV_A);
3373 ev_io_stop (EV_A_ &timerfd_w);
3374
3375 close (timerfd);
3376 timerfd = -2;
3377
3378 evtimerfd_init (EV_A);
3379 /* reschedule periodics, in case we missed something */
3380 ev_feed_event (EV_A_ &timerfd_w, EV_CUSTOM);
3381 }
3382 #endif
3383
3228#if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE 3384 #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
3229 if (ev_is_active (&pipe_w) && postfork != 2) 3385 if (ev_is_active (&pipe_w))
3230 { 3386 {
3231 /* pipe_write_wanted must be false now, so modifying fd vars should be safe */ 3387 /* pipe_write_wanted must be false now, so modifying fd vars should be safe */
3232 3388
3233 ev_ref (EV_A); 3389 ev_ref (EV_A);
3234 ev_io_stop (EV_A_ &pipe_w); 3390 ev_io_stop (EV_A_ &pipe_w);
3235 3391
3236 if (evpipe [0] >= 0) 3392 if (evpipe [0] >= 0)
3237 EV_WIN32_CLOSE_FD (evpipe [0]); 3393 EV_WIN32_CLOSE_FD (evpipe [0]);
3238 3394
3239 evpipe_init (EV_A); 3395 evpipe_init (EV_A);
3240 /* iterate over everything, in case we missed something before */ 3396 /* iterate over everything, in case we missed something before */
3241 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM); 3397 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
3398 }
3399 #endif
3242 } 3400 }
3243#endif
3244 3401
3245 postfork = 0; 3402 postfork = 0;
3246} 3403}
3247 3404
3248#if EV_MULTIPLICITY 3405#if EV_MULTIPLICITY
3518 { 3675 {
3519 ev_at (w) += w->repeat; 3676 ev_at (w) += w->repeat;
3520 if (ev_at (w) < mn_now) 3677 if (ev_at (w) < mn_now)
3521 ev_at (w) = mn_now; 3678 ev_at (w) = mn_now;
3522 3679
3523 assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > 0.)); 3680 assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > EV_TS_CONST (0.)));
3524 3681
3525 ANHE_at_cache (timers [HEAP0]); 3682 ANHE_at_cache (timers [HEAP0]);
3526 downheap (timers, timercnt, HEAP0); 3683 downheap (timers, timercnt, HEAP0);
3527 } 3684 }
3528 else 3685 else
3659 3816
3660 mn_now = get_clock (); 3817 mn_now = get_clock ();
3661 3818
3662 /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */ 3819 /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */
3663 /* interpolate in the meantime */ 3820 /* interpolate in the meantime */
3664 if (ecb_expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5)) 3821 if (ecb_expect_true (mn_now - now_floor < EV_TS_CONST (MIN_TIMEJUMP * .5)))
3665 { 3822 {
3666 ev_rt_now = rtmn_diff + mn_now; 3823 ev_rt_now = rtmn_diff + mn_now;
3667 return; 3824 return;
3668 } 3825 }
3669 3826
3683 ev_tstamp diff; 3840 ev_tstamp diff;
3684 rtmn_diff = ev_rt_now - mn_now; 3841 rtmn_diff = ev_rt_now - mn_now;
3685 3842
3686 diff = odiff - rtmn_diff; 3843 diff = odiff - rtmn_diff;
3687 3844
3688 if (ecb_expect_true ((diff < 0. ? -diff : diff) < MIN_TIMEJUMP)) 3845 if (ecb_expect_true ((diff < EV_TS_CONST (0.) ? -diff : diff) < EV_TS_CONST (MIN_TIMEJUMP)))
3689 return; /* all is well */ 3846 return; /* all is well */
3690 3847
3691 ev_rt_now = ev_time (); 3848 ev_rt_now = ev_time ();
3692 mn_now = get_clock (); 3849 mn_now = get_clock ();
3693 now_floor = mn_now; 3850 now_floor = mn_now;
3702 else 3859 else
3703#endif 3860#endif
3704 { 3861 {
3705 ev_rt_now = ev_time (); 3862 ev_rt_now = ev_time ();
3706 3863
3707 if (ecb_expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + MIN_TIMEJUMP)) 3864 if (ecb_expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + EV_TS_CONST (MIN_TIMEJUMP)))
3708 { 3865 {
3709 /* adjust timers. this is easy, as the offset is the same for all of them */ 3866 /* adjust timers. this is easy, as the offset is the same for all of them */
3710 timers_reschedule (EV_A_ ev_rt_now - mn_now); 3867 timers_reschedule (EV_A_ ev_rt_now - mn_now);
3711#if EV_PERIODIC_ENABLE 3868#if EV_PERIODIC_ENABLE
3712 periodics_reschedule (EV_A); 3869 periodics_reschedule (EV_A);
3781 3938
3782 /* remember old timestamp for io_blocktime calculation */ 3939 /* remember old timestamp for io_blocktime calculation */
3783 ev_tstamp prev_mn_now = mn_now; 3940 ev_tstamp prev_mn_now = mn_now;
3784 3941
3785 /* update time to cancel out callback processing overhead */ 3942 /* update time to cancel out callback processing overhead */
3786 time_update (EV_A_ 1e100); 3943 time_update (EV_A_ EV_TS_CONST (EV_TSTAMP_HUGE));
3787 3944
3788 /* from now on, we want a pipe-wake-up */ 3945 /* from now on, we want a pipe-wake-up */
3789 pipe_write_wanted = 1; 3946 pipe_write_wanted = 1;
3790 3947
3791 ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */ 3948 ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */
3792 3949
3793 if (ecb_expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped))) 3950 if (ecb_expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped)))
3794 { 3951 {
3795 waittime = MAX_BLOCKTIME; 3952 waittime = EV_TS_CONST (MAX_BLOCKTIME);
3796 3953
3797 if (timercnt) 3954 if (timercnt)
3798 { 3955 {
3799 ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now; 3956 ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now;
3800 if (waittime > to) waittime = to; 3957 if (waittime > to) waittime = to;
3810 3967
3811 /* don't let timeouts decrease the waittime below timeout_blocktime */ 3968 /* don't let timeouts decrease the waittime below timeout_blocktime */
3812 if (ecb_expect_false (waittime < timeout_blocktime)) 3969 if (ecb_expect_false (waittime < timeout_blocktime))
3813 waittime = timeout_blocktime; 3970 waittime = timeout_blocktime;
3814 3971
3815 /* at this point, we NEED to wait, so we have to ensure */ 3972 /* now there are two more special cases left, either we have
3816 /* to pass a minimum nonzero value to the backend */ 3973 * already-expired timers, so we should not sleep, or we have timers
3974 * that expire very soon, in which case we need to wait for a minimum
3975 * amount of time for some event loop backends.
3976 */
3817 if (ecb_expect_false (waittime < backend_mintime)) 3977 if (ecb_expect_false (waittime < backend_mintime))
3978 waittime = waittime <= EV_TS_CONST (0.)
3979 ? EV_TS_CONST (0.)
3818 waittime = backend_mintime; 3980 : backend_mintime;
3819 3981
3820 /* extra check because io_blocktime is commonly 0 */ 3982 /* extra check because io_blocktime is commonly 0 */
3821 if (ecb_expect_false (io_blocktime)) 3983 if (ecb_expect_false (io_blocktime))
3822 { 3984 {
3823 sleeptime = io_blocktime - (mn_now - prev_mn_now); 3985 sleeptime = io_blocktime - (mn_now - prev_mn_now);
3824 3986
3825 if (sleeptime > waittime - backend_mintime) 3987 if (sleeptime > waittime - backend_mintime)
3826 sleeptime = waittime - backend_mintime; 3988 sleeptime = waittime - backend_mintime;
3827 3989
3828 if (ecb_expect_true (sleeptime > 0.)) 3990 if (ecb_expect_true (sleeptime > EV_TS_CONST (0.)))
3829 { 3991 {
3830 ev_sleep (sleeptime); 3992 ev_sleep (sleeptime);
3831 waittime -= sleeptime; 3993 waittime -= sleeptime;
3832 } 3994 }
3833 } 3995 }
3907} 4069}
3908 4070
3909void 4071void
3910ev_now_update (EV_P) EV_NOEXCEPT 4072ev_now_update (EV_P) EV_NOEXCEPT
3911{ 4073{
3912 time_update (EV_A_ 1e100); 4074 time_update (EV_A_ EV_TSTAMP_HUGE);
3913} 4075}
3914 4076
3915void 4077void
3916ev_suspend (EV_P) EV_NOEXCEPT 4078ev_suspend (EV_P) EV_NOEXCEPT
3917{ 4079{
4148} 4310}
4149 4311
4150ev_tstamp 4312ev_tstamp
4151ev_timer_remaining (EV_P_ ev_timer *w) EV_NOEXCEPT 4313ev_timer_remaining (EV_P_ ev_timer *w) EV_NOEXCEPT
4152{ 4314{
4153 return ev_at (w) - (ev_is_active (w) ? mn_now : 0.); 4315 return ev_at (w) - (ev_is_active (w) ? mn_now : EV_TS_CONST (0.));
4154} 4316}
4155 4317
4156#if EV_PERIODIC_ENABLE 4318#if EV_PERIODIC_ENABLE
4157ecb_noinline 4319ecb_noinline
4158void 4320void
4159ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT 4321ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT
4160{ 4322{
4161 if (ecb_expect_false (ev_is_active (w))) 4323 if (ecb_expect_false (ev_is_active (w)))
4162 return; 4324 return;
4325
4326#if EV_USE_TIMERFD
4327 if (timerfd == -2)
4328 evtimerfd_init (EV_A);
4329#endif
4163 4330
4164 if (w->reschedule_cb) 4331 if (w->reschedule_cb)
4165 ev_at (w) = w->reschedule_cb (w, ev_rt_now); 4332 ev_at (w) = w->reschedule_cb (w, ev_rt_now);
4166 else if (w->interval) 4333 else if (w->interval)
4167 { 4334 {

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines