ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev.c
(Generate patch)

Comparing libev/ev.c (file contents):
Revision 1.501 by root, Mon Jul 1 21:47:42 2019 UTC vs.
Revision 1.502 by root, Tue Jul 2 06:07:54 2019 UTC

447# endif 447# endif
448#endif 448#endif
449 449
450#if EV_USE_LINUXAIO 450#if EV_USE_LINUXAIO
451# include <sys/syscall.h> 451# include <sys/syscall.h>
452# if !SYS_io_getevents || !EV_USE_EPOLL /* ev_linxaio uses ev_poll.c:ev_epoll_create */ 452# if SYS_io_getevents && EV_USE_EPOLL /* linuxaio backend requires epoll backend */
453# define EV_NEED_SYSCALL 1
454# else
453# undef EV_USE_LINUXAIO 455# undef EV_USE_LINUXAIO
454# define EV_USE_LINUXAIO 0 456# define EV_USE_LINUXAIO 0
455# else
456# define EV_NEED_SYSCALL 1
457# endif 457# endif
458#endif 458#endif
459 459
460#if EV_USE_IOURING 460#if EV_USE_IOURING
461# include <sys/syscall.h> 461# include <sys/syscall.h>
462# if !__alpha && !SYS_io_uring_setup 462# if !__alpha && !SYS_io_uring_setup
463# define SYS_io_uring_setup 425 463# define SYS_io_uring_setup 425
464# define SYS_io_uring_enter 426 464# define SYS_io_uring_enter 426
465# define SYS_io_uring_wregister 427 465# define SYS_io_uring_wregister 427
466# endif 466# endif
467# if SYS_io_uring_setup 467# if SYS_io_uring_setup && EV_USE_EPOLL /* iouring backend requires epoll backend */
468# define EV_NEED_SYSCALL 1 468# define EV_NEED_SYSCALL 1
469# else 469# else
470# undef EV_USE_IOURING 470# undef EV_USE_IOURING
471# define EV_USE_IOURING 0 471# define EV_USE_IOURING 0
472# endif 472# endif
520}; 520};
521#endif 521#endif
522 522
523/*****************************************************************************/ 523/*****************************************************************************/
524 524
525#if EV_NEED_SYSCALL
526
527#include <sys/syscall.h>
528
529/*
530 * define some syscall wrappers for common architectures
531 * this is mostly for nice looks during debugging, not performance.
532 * our syscalls return < 0, not == -1, on error. which is good
533 * enough for linux aio.
534 * TODO: arm is also common nowadays, maybe even mips and x86
535 * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove...
536 */
537#if __GNUC__ && __linux && ECB_AMD64 && !defined __OPTIMIZE_SIZE__
538 /* the costly errno access probably kills this for size optimisation */
539
540 #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5) \
541 ({ \
542 long res; \
543 register unsigned long r5 __asm__ ("r8" ); \
544 register unsigned long r4 __asm__ ("r10"); \
545 register unsigned long r3 __asm__ ("rdx"); \
546 register unsigned long r2 __asm__ ("rsi"); \
547 register unsigned long r1 __asm__ ("rdi"); \
548 if (narg >= 5) r5 = (unsigned long)(arg5); \
549 if (narg >= 4) r4 = (unsigned long)(arg4); \
550 if (narg >= 3) r3 = (unsigned long)(arg3); \
551 if (narg >= 2) r2 = (unsigned long)(arg2); \
552 if (narg >= 1) r1 = (unsigned long)(arg1); \
553 __asm__ __volatile__ ( \
554 "syscall\n\t" \
555 : "=a" (res) \
556 : "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \
557 : "cc", "r11", "cx", "memory"); \
558 errno = -res; \
559 res; \
560 })
561
562#endif
563
564#ifdef ev_syscall
565 #define ev_syscall0(nr) ev_syscall (nr, 0, 0, 0, 0, 0, 0
566 #define ev_syscall1(nr,arg1) ev_syscall (nr, 1, arg1, 0, 0, 0, 0)
567 #define ev_syscall2(nr,arg1,arg2) ev_syscall (nr, 2, arg1, arg2, 0, 0, 0)
568 #define ev_syscall3(nr,arg1,arg2,arg3) ev_syscall (nr, 3, arg1, arg2, arg3, 0, 0)
569 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) ev_syscall (nr, 3, arg1, arg2, arg3, arg4, 0)
570 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5)
571#else
572 #define ev_syscall0(nr) syscall (nr)
573 #define ev_syscall1(nr,arg1) syscall (nr, arg1)
574 #define ev_syscall2(nr,arg1,arg2) syscall (nr, arg1, arg2)
575 #define ev_syscall3(nr,arg1,arg2,arg3) syscall (nr, arg1, arg2, arg3)
576 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) syscall (nr, arg1, arg2, arg3, arg4)
577 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) syscall (nr, arg1, arg2, arg3, arg4, arg5)
578#endif
579
580#endif
581
582/*****************************************************************************/
583
584#if EV_VERIFY >= 3 525#if EV_VERIFY >= 3
585# define EV_FREQUENT_CHECK ev_verify (EV_A) 526# define EV_FREQUENT_CHECK ev_verify (EV_A)
586#else 527#else
587# define EV_FREQUENT_CHECK do { } while (0) 528# define EV_FREQUENT_CHECK do { } while (0)
588#endif 529#endif
592 * This value is good at least till the year 4000. 533 * This value is good at least till the year 4000.
593 */ 534 */
594#define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */ 535#define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */
595/*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */ 536/*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */
596 537
597#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */ 538#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
598#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */ 539#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
540
541/* find a portable timestamp that is "alawys" in the future but fits into time_t.
542 * this is quite hard, and we are mostly guessing - we handle 32 bit signed/unsigned time_t,
543 * and sizes large than 32 bit, but and maybe the unlikely loating point time_t */
544#define EV_TSTAMP_HUGE \
545 (sizeof (time_t) >= 8 ? 10000000000000. \
546 : 0 < (time_t)4294967295 ? 4294967295. \
547 : 2147483647.) \
599 548
600#define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0) 549#define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0)
601#define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0) 550#define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0)
602 551
603/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */ 552/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */
1643# define inline_speed ecb_inline 1592# define inline_speed ecb_inline
1644#else 1593#else
1645# define inline_speed ecb_noinline static 1594# define inline_speed ecb_noinline static
1646#endif 1595#endif
1647 1596
1597/*****************************************************************************/
1598/* raw syscall wrappers */
1599
1600#if EV_NEED_SYSCALL
1601
1602#include <sys/syscall.h>
1603
1604/*
1605 * define some syscall wrappers for common architectures
1606 * this is mostly for nice looks during debugging, not performance.
1607 * our syscalls return < 0, not == -1, on error. which is good
1608 * enough for linux aio.
1609 * TODO: arm is also common nowadays, maybe even mips and x86
1610 * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove...
1611 */
1612#if __GNUC__ && __linux && ECB_AMD64 && !defined __OPTIMIZE_SIZE__
1613 /* the costly errno access probably kills this for size optimisation */
1614
1615 #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5,arg6) \
1616 ({ \
1617 long res; \
1618 register unsigned long r6 __asm__ ("r9" ); \
1619 register unsigned long r5 __asm__ ("r8" ); \
1620 register unsigned long r4 __asm__ ("r10"); \
1621 register unsigned long r3 __asm__ ("rdx"); \
1622 register unsigned long r2 __asm__ ("rsi"); \
1623 register unsigned long r1 __asm__ ("rdi"); \
1624 if (narg >= 6) r6 = (unsigned long)(arg6); \
1625 if (narg >= 5) r5 = (unsigned long)(arg5); \
1626 if (narg >= 4) r4 = (unsigned long)(arg4); \
1627 if (narg >= 3) r3 = (unsigned long)(arg3); \
1628 if (narg >= 2) r2 = (unsigned long)(arg2); \
1629 if (narg >= 1) r1 = (unsigned long)(arg1); \
1630 __asm__ __volatile__ ( \
1631 "syscall\n\t" \
1632 : "=a" (res) \
1633 : "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \
1634 : "cc", "r11", "cx", "memory"); \
1635 errno = -res; \
1636 res; \
1637 })
1638
1639#endif
1640
1641#ifdef ev_syscall
1642 #define ev_syscall0(nr) ev_syscall (nr, 0, 0, 0, 0, 0, 0, 0)
1643 #define ev_syscall1(nr,arg1) ev_syscall (nr, 1, arg1, 0, 0, 0, 0, 0)
1644 #define ev_syscall2(nr,arg1,arg2) ev_syscall (nr, 2, arg1, arg2, 0, 0, 0, 0)
1645 #define ev_syscall3(nr,arg1,arg2,arg3) ev_syscall (nr, 3, arg1, arg2, arg3, 0, 0, 0)
1646 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) ev_syscall (nr, 3, arg1, arg2, arg3, arg4, 0, 0)
1647 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5, 0)
1648 #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) ev_syscall (nr, 6, arg1, arg2, arg3, arg4, arg5,arg6)
1649#else
1650 #define ev_syscall0(nr) syscall (nr)
1651 #define ev_syscall1(nr,arg1) syscall (nr, arg1)
1652 #define ev_syscall2(nr,arg1,arg2) syscall (nr, arg1, arg2)
1653 #define ev_syscall3(nr,arg1,arg2,arg3) syscall (nr, arg1, arg2, arg3)
1654 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) syscall (nr, arg1, arg2, arg3, arg4)
1655 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) syscall (nr, arg1, arg2, arg3, arg4, arg5)
1656 #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) syscall (nr, arg1, arg2, arg3, arg4, arg5,arg6)
1657#endif
1658
1659#endif
1660
1661/*****************************************************************************/
1662
1648#define NUMPRI (EV_MAXPRI - EV_MINPRI + 1) 1663#define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)
1649 1664
1650#if EV_MINPRI == EV_MAXPRI 1665#if EV_MINPRI == EV_MAXPRI
1651# define ABSPRI(w) (((W)w), 0) 1666# define ABSPRI(w) (((W)w), 0)
1652#else 1667#else
1882{ 1897{
1883 WL head; 1898 WL head;
1884 unsigned char events; /* the events watched for */ 1899 unsigned char events; /* the events watched for */
1885 unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */ 1900 unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */
1886 unsigned char emask; /* some backends store the actual kernel mask in here */ 1901 unsigned char emask; /* some backends store the actual kernel mask in here */
1887 unsigned char unused; 1902 unsigned char eflags; /* flags field for use by backends */
1888#if EV_USE_EPOLL 1903#if EV_USE_EPOLL
1889 unsigned int egen; /* generation counter to counter epoll bugs */ 1904 unsigned int egen; /* generation counter to counter epoll bugs */
1890#endif 1905#endif
1891#if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP 1906#if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP
1892 SOCKET handle; 1907 SOCKET handle;
2914 int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT; 2929 int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT;
2915 2930
2916 /* epoll embeddability broken on all linux versions up to at least 2.6.23 */ 2931 /* epoll embeddability broken on all linux versions up to at least 2.6.23 */
2917 if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */ 2932 if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */
2918 flags &= ~EVBACKEND_EPOLL; 2933 flags &= ~EVBACKEND_EPOLL;
2934
2935 /* EVBACKEND_LINUXAIO is theoretically embeddable, but suffers from a performance overhead */
2936
2937 /* EVBACKEND_IOURING is practically embeddable, but the current implementation is not
2938 * because our backend_fd is the epoll fd we need as fallback.
2939 * if the kernel ever is fixed, this might change...
2940 */
2919 2941
2920 return flags; 2942 return flags;
2921} 2943}
2922 2944
2923unsigned int 2945unsigned int

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines