ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev.c
(Generate patch)

Comparing libev/ev.c (file contents):
Revision 1.501 by root, Mon Jul 1 21:47:42 2019 UTC vs.
Revision 1.537 by sf-exg, Sun May 14 19:02:31 2023 UTC

1/* 1/*
2 * libev event processing core, watcher management 2 * libev event processing core, watcher management
3 * 3 *
4 * Copyright (c) 2007-2019 Marc Alexander Lehmann <libev@schmorp.de> 4 * Copyright (c) 2007-2020 Marc Alexander Lehmann <libev@schmorp.de>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without modifica- 7 * Redistribution and use in source and binary forms, with or without modifica-
8 * tion, are permitted provided that the following conditions are met: 8 * tion, are permitted provided that the following conditions are met:
9 * 9 *
35 * and other provisions required by the GPL. If you do not delete the 35 * and other provisions required by the GPL. If you do not delete the
36 * provisions above, a recipient may use your version of this file under 36 * provisions above, a recipient may use your version of this file under
37 * either the BSD or the GPL. 37 * either the BSD or the GPL.
38 */ 38 */
39 39
40#pragma clang diagnostic ignored "-Wunused-value"
41#pragma clang diagnostic ignored "-Wcomment"
42#pragma clang diagnostic ignored "-Wextern-initializer"
43
40/* this big block deduces configuration from config.h */ 44/* this big block deduces configuration from config.h */
41#ifndef EV_STANDALONE 45#ifndef EV_STANDALONE
42# ifdef EV_CONFIG_H 46# ifdef EV_CONFIG_H
43# include EV_CONFIG_H 47# include EV_CONFIG_H
44# else 48# else
117# define EV_USE_EPOLL 0 121# define EV_USE_EPOLL 0
118# endif 122# endif
119 123
120# if HAVE_LINUX_AIO_ABI_H 124# if HAVE_LINUX_AIO_ABI_H
121# ifndef EV_USE_LINUXAIO 125# ifndef EV_USE_LINUXAIO
122# define EV_USE_LINUXAIO EV_FEATURE_BACKENDS 126# define EV_USE_LINUXAIO 0 /* was: EV_FEATURE_BACKENDS, always off by default */
123# endif 127# endif
124# else 128# else
125# undef EV_USE_LINUXAIO 129# undef EV_USE_LINUXAIO
126# define EV_USE_LINUXAIO 0 130# define EV_USE_LINUXAIO 0
127# endif 131# endif
128 132
133# if HAVE_LINUX_FS_H && HAVE_SYS_TIMERFD_H && HAVE_KERNEL_RWF_T
134# ifndef EV_USE_IOURING
135# define EV_USE_IOURING EV_FEATURE_BACKENDS
136# endif
137# else
138# undef EV_USE_IOURING
139# define EV_USE_IOURING 0
140# endif
141
129# if HAVE_KQUEUE && HAVE_SYS_EVENT_H 142# if HAVE_KQUEUE && HAVE_SYS_EVENT_H
130# ifndef EV_USE_KQUEUE 143# ifndef EV_USE_KQUEUE
131# define EV_USE_KQUEUE EV_FEATURE_BACKENDS 144# define EV_USE_KQUEUE EV_FEATURE_BACKENDS
132# endif 145# endif
133# else 146# else
168# endif 181# endif
169# else 182# else
170# undef EV_USE_EVENTFD 183# undef EV_USE_EVENTFD
171# define EV_USE_EVENTFD 0 184# define EV_USE_EVENTFD 0
172# endif 185# endif
173 186
187# if HAVE_SYS_TIMERFD_H
188# ifndef EV_USE_TIMERFD
189# define EV_USE_TIMERFD EV_FEATURE_OS
190# endif
191# else
192# undef EV_USE_TIMERFD
193# define EV_USE_TIMERFD 0
194# endif
195
174#endif 196#endif
175 197
176/* OS X, in its infinite idiocy, actually HARDCODES 198/* OS X, in its infinite idiocy, actually HARDCODES
177 * a limit of 1024 into their select. Where people have brains, 199 * a limit of 1024 into their select. Where people have brains,
178 * OS X engineers apparently have a vacuum. Or maybe they were 200 * OS X engineers apparently have a vacuum. Or maybe they were
326# define EV_USE_PORT 0 348# define EV_USE_PORT 0
327#endif 349#endif
328 350
329#ifndef EV_USE_LINUXAIO 351#ifndef EV_USE_LINUXAIO
330# if __linux /* libev currently assumes linux/aio_abi.h is always available on linux */ 352# if __linux /* libev currently assumes linux/aio_abi.h is always available on linux */
331# define EV_USE_LINUXAIO 1 353# define EV_USE_LINUXAIO 0 /* was: 1, always off by default */
332# else 354# else
333# define EV_USE_LINUXAIO 0 355# define EV_USE_LINUXAIO 0
334# endif 356# endif
335#endif 357#endif
336 358
337#ifndef EV_USE_IOURING 359#ifndef EV_USE_IOURING
338# if __linux 360# if __linux /* later checks might disable again */
339# define EV_USE_IOURING 0 361# define EV_USE_IOURING 1
340# else 362# else
341# define EV_USE_IOURING 0 363# define EV_USE_IOURING 0
342# endif 364# endif
343#endif 365#endif
344 366
369#ifndef EV_USE_SIGNALFD 391#ifndef EV_USE_SIGNALFD
370# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7)) 392# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7))
371# define EV_USE_SIGNALFD EV_FEATURE_OS 393# define EV_USE_SIGNALFD EV_FEATURE_OS
372# else 394# else
373# define EV_USE_SIGNALFD 0 395# define EV_USE_SIGNALFD 0
396# endif
397#endif
398
399#ifndef EV_USE_TIMERFD
400# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 8))
401# define EV_USE_TIMERFD EV_FEATURE_OS
402# else
403# define EV_USE_TIMERFD 0
374# endif 404# endif
375#endif 405#endif
376 406
377#if 0 /* debugging */ 407#if 0 /* debugging */
378# define EV_VERIFY 3 408# define EV_VERIFY 3
438#if !EV_STAT_ENABLE 468#if !EV_STAT_ENABLE
439# undef EV_USE_INOTIFY 469# undef EV_USE_INOTIFY
440# define EV_USE_INOTIFY 0 470# define EV_USE_INOTIFY 0
441#endif 471#endif
442 472
473#if __linux && EV_USE_IOURING
474# include <linux/version.h>
475# if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)
476# undef EV_USE_IOURING
477# define EV_USE_IOURING 0
478# endif
479#endif
480
443#if !EV_USE_NANOSLEEP 481#if !EV_USE_NANOSLEEP
444/* hp-ux has it in sys/time.h, which we unconditionally include above */ 482/* hp-ux has it in sys/time.h, which we unconditionally include above */
445# if !defined _WIN32 && !defined __hpux 483# if !defined _WIN32 && !defined __hpux
446# include <sys/select.h> 484# include <sys/select.h>
447# endif 485# endif
448#endif 486#endif
449 487
450#if EV_USE_LINUXAIO 488#if EV_USE_LINUXAIO
451# include <sys/syscall.h> 489# include <sys/syscall.h>
452# if !SYS_io_getevents || !EV_USE_EPOLL /* ev_linxaio uses ev_poll.c:ev_epoll_create */ 490# if SYS_io_getevents && EV_USE_EPOLL /* linuxaio backend requires epoll backend */
491# define EV_NEED_SYSCALL 1
492# else
453# undef EV_USE_LINUXAIO 493# undef EV_USE_LINUXAIO
454# define EV_USE_LINUXAIO 0 494# define EV_USE_LINUXAIO 0
455# else
456# define EV_NEED_SYSCALL 1
457# endif 495# endif
458#endif 496#endif
459 497
460#if EV_USE_IOURING 498#if EV_USE_IOURING
461# include <sys/syscall.h> 499# include <sys/syscall.h>
462# if !__alpha && !SYS_io_uring_setup 500# if !SYS_io_uring_register && __linux && !__alpha
463# define SYS_io_uring_setup 425 501# define SYS_io_uring_setup 425
464# define SYS_io_uring_enter 426 502# define SYS_io_uring_enter 426
465# define SYS_io_uring_wregister 427 503# define SYS_io_uring_register 427
466# endif 504# endif
467# if SYS_io_uring_setup 505# if SYS_io_uring_setup && EV_USE_EPOLL /* iouring backend requires epoll backend */
468# define EV_NEED_SYSCALL 1 506# define EV_NEED_SYSCALL 1
469# else 507# else
470# undef EV_USE_IOURING 508# undef EV_USE_IOURING
471# define EV_USE_IOURING 0 509# define EV_USE_IOURING 0
472# endif 510# endif
481# define EV_USE_INOTIFY 0 519# define EV_USE_INOTIFY 0
482# endif 520# endif
483#endif 521#endif
484 522
485#if EV_USE_EVENTFD 523#if EV_USE_EVENTFD
486/* our minimum requirement is glibc 2.7 which has the stub, but not the header */ 524/* our minimum requirement is glibc 2.7 which has the stub, but not the full header */
487# include <stdint.h> 525# include <stdint.h>
488# ifndef EFD_NONBLOCK 526# ifndef EFD_NONBLOCK
489# define EFD_NONBLOCK O_NONBLOCK 527# define EFD_NONBLOCK O_NONBLOCK
490# endif 528# endif
491# ifndef EFD_CLOEXEC 529# ifndef EFD_CLOEXEC
497# endif 535# endif
498EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags); 536EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags);
499#endif 537#endif
500 538
501#if EV_USE_SIGNALFD 539#if EV_USE_SIGNALFD
502/* our minimum requirement is glibc 2.7 which has the stub, but not the header */ 540/* our minimum requirement is glibc 2.7 which has the stub, but not the full header */
503# include <stdint.h> 541# include <stdint.h>
504# ifndef SFD_NONBLOCK 542# ifndef SFD_NONBLOCK
505# define SFD_NONBLOCK O_NONBLOCK 543# define SFD_NONBLOCK O_NONBLOCK
506# endif 544# endif
507# ifndef SFD_CLOEXEC 545# ifndef SFD_CLOEXEC
509# define SFD_CLOEXEC O_CLOEXEC 547# define SFD_CLOEXEC O_CLOEXEC
510# else 548# else
511# define SFD_CLOEXEC 02000000 549# define SFD_CLOEXEC 02000000
512# endif 550# endif
513# endif 551# endif
514EV_CPP (extern "C") int signalfd (int fd, const sigset_t *mask, int flags); 552EV_CPP (extern "C") int (signalfd) (int fd, const sigset_t *mask, int flags);
515 553
516struct signalfd_siginfo 554struct signalfd_siginfo
517{ 555{
518 uint32_t ssi_signo; 556 uint32_t ssi_signo;
519 char pad[128 - sizeof (uint32_t)]; 557 char pad[128 - sizeof (uint32_t)];
520}; 558};
521#endif 559#endif
522 560
523/*****************************************************************************/ 561/* for timerfd, libev core requires TFD_TIMER_CANCEL_ON_SET &c */
524 562#if EV_USE_TIMERFD
525#if EV_NEED_SYSCALL 563# include <sys/timerfd.h>
526 564/* timerfd is only used for periodics */
527#include <sys/syscall.h> 565# if !(defined (TFD_TIMER_CANCEL_ON_SET) && defined (TFD_CLOEXEC) && defined (TFD_NONBLOCK)) || !EV_PERIODIC_ENABLE
528 566# undef EV_USE_TIMERFD
529/* 567# define EV_USE_TIMERFD 0
530 * define some syscall wrappers for common architectures
531 * this is mostly for nice looks during debugging, not performance.
532 * our syscalls return < 0, not == -1, on error. which is good
533 * enough for linux aio.
534 * TODO: arm is also common nowadays, maybe even mips and x86
535 * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove...
536 */
537#if __GNUC__ && __linux && ECB_AMD64 && !defined __OPTIMIZE_SIZE__
538 /* the costly errno access probably kills this for size optimisation */
539
540 #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5) \
541 ({ \
542 long res; \
543 register unsigned long r5 __asm__ ("r8" ); \
544 register unsigned long r4 __asm__ ("r10"); \
545 register unsigned long r3 __asm__ ("rdx"); \
546 register unsigned long r2 __asm__ ("rsi"); \
547 register unsigned long r1 __asm__ ("rdi"); \
548 if (narg >= 5) r5 = (unsigned long)(arg5); \
549 if (narg >= 4) r4 = (unsigned long)(arg4); \
550 if (narg >= 3) r3 = (unsigned long)(arg3); \
551 if (narg >= 2) r2 = (unsigned long)(arg2); \
552 if (narg >= 1) r1 = (unsigned long)(arg1); \
553 __asm__ __volatile__ ( \
554 "syscall\n\t" \
555 : "=a" (res) \
556 : "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \
557 : "cc", "r11", "cx", "memory"); \
558 errno = -res; \
559 res; \
560 })
561
562#endif 568# endif
563
564#ifdef ev_syscall
565 #define ev_syscall0(nr) ev_syscall (nr, 0, 0, 0, 0, 0, 0
566 #define ev_syscall1(nr,arg1) ev_syscall (nr, 1, arg1, 0, 0, 0, 0)
567 #define ev_syscall2(nr,arg1,arg2) ev_syscall (nr, 2, arg1, arg2, 0, 0, 0)
568 #define ev_syscall3(nr,arg1,arg2,arg3) ev_syscall (nr, 3, arg1, arg2, arg3, 0, 0)
569 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) ev_syscall (nr, 3, arg1, arg2, arg3, arg4, 0)
570 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5)
571#else
572 #define ev_syscall0(nr) syscall (nr)
573 #define ev_syscall1(nr,arg1) syscall (nr, arg1)
574 #define ev_syscall2(nr,arg1,arg2) syscall (nr, arg1, arg2)
575 #define ev_syscall3(nr,arg1,arg2,arg3) syscall (nr, arg1, arg2, arg3)
576 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) syscall (nr, arg1, arg2, arg3, arg4)
577 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) syscall (nr, arg1, arg2, arg3, arg4, arg5)
578#endif
579
580#endif 569#endif
581 570
582/*****************************************************************************/ 571/*****************************************************************************/
583 572
584#if EV_VERIFY >= 3 573#if EV_VERIFY >= 3
592 * This value is good at least till the year 4000. 581 * This value is good at least till the year 4000.
593 */ 582 */
594#define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */ 583#define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */
595/*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */ 584/*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */
596 585
597#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */ 586#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
598#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */ 587#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
588#define MAX_BLOCKTIME2 1500001.07 /* same, but when timerfd is used to detect jumps, also safe delay to not overflow */
599 589
590/* find a portable timestamp that is "always" in the future but fits into time_t.
591 * this is quite hard, and we are mostly guessing - we handle 32 bit signed/unsigned time_t,
592 * and sizes larger than 32 bit, and maybe the unlikely floating point time_t */
593#define EV_TSTAMP_HUGE \
594 (sizeof (time_t) >= 8 ? 10000000000000. \
595 : 0 < (time_t)4294967295 ? 4294967295. \
596 : 2147483647.) \
597
598#ifndef EV_TS_CONST
599# define EV_TS_CONST(nv) nv
600# define EV_TS_TO_MSEC(a) a * 1e3 + 0.9999
601# define EV_TS_FROM_USEC(us) us * 1e-6
600#define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0) 602# define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0)
601#define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0) 603# define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0)
604# define EV_TV_GET(tv) ((tv).tv_sec + (tv).tv_usec * 1e-6)
605# define EV_TS_GET(ts) ((ts).tv_sec + (ts).tv_nsec * 1e-9)
606#endif
602 607
603/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */ 608/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */
604/* ECB.H BEGIN */ 609/* ECB.H BEGIN */
605/* 610/*
606 * libecb - http://software.schmorp.de/pkg/libecb 611 * libecb - http://software.schmorp.de/pkg/libecb
607 * 612 *
608 * Copyright (©) 2009-2015 Marc Alexander Lehmann <libecb@schmorp.de> 613 * Copyright (©) 2009-2015,2018-2020 Marc Alexander Lehmann <libecb@schmorp.de>
609 * Copyright (©) 2011 Emanuele Giaquinta 614 * Copyright (©) 2011 Emanuele Giaquinta
610 * All rights reserved. 615 * All rights reserved.
611 * 616 *
612 * Redistribution and use in source and binary forms, with or without modifica- 617 * Redistribution and use in source and binary forms, with or without modifica-
613 * tion, are permitted provided that the following conditions are met: 618 * tion, are permitted provided that the following conditions are met:
644 649
645#ifndef ECB_H 650#ifndef ECB_H
646#define ECB_H 651#define ECB_H
647 652
648/* 16 bits major, 16 bits minor */ 653/* 16 bits major, 16 bits minor */
649#define ECB_VERSION 0x00010006 654#define ECB_VERSION 0x00010008
650 655
651#ifdef _WIN32 656#include <string.h> /* for memcpy */
657
658#if defined (_WIN32) && !defined (__MINGW32__)
652 typedef signed char int8_t; 659 typedef signed char int8_t;
653 typedef unsigned char uint8_t; 660 typedef unsigned char uint8_t;
661 typedef signed char int_fast8_t;
662 typedef unsigned char uint_fast8_t;
654 typedef signed short int16_t; 663 typedef signed short int16_t;
655 typedef unsigned short uint16_t; 664 typedef unsigned short uint16_t;
665 typedef signed int int_fast16_t;
666 typedef unsigned int uint_fast16_t;
656 typedef signed int int32_t; 667 typedef signed int int32_t;
657 typedef unsigned int uint32_t; 668 typedef unsigned int uint32_t;
669 typedef signed int int_fast32_t;
670 typedef unsigned int uint_fast32_t;
658 #if __GNUC__ 671 #if __GNUC__
659 typedef signed long long int64_t; 672 typedef signed long long int64_t;
660 typedef unsigned long long uint64_t; 673 typedef unsigned long long uint64_t;
661 #else /* _MSC_VER || __BORLANDC__ */ 674 #else /* _MSC_VER || __BORLANDC__ */
662 typedef signed __int64 int64_t; 675 typedef signed __int64 int64_t;
663 typedef unsigned __int64 uint64_t; 676 typedef unsigned __int64 uint64_t;
664 #endif 677 #endif
678 typedef int64_t int_fast64_t;
679 typedef uint64_t uint_fast64_t;
665 #ifdef _WIN64 680 #ifdef _WIN64
666 #define ECB_PTRSIZE 8 681 #define ECB_PTRSIZE 8
667 typedef uint64_t uintptr_t; 682 typedef uint64_t uintptr_t;
668 typedef int64_t intptr_t; 683 typedef int64_t intptr_t;
669 #else 684 #else
680 #endif 695 #endif
681#endif 696#endif
682 697
683#define ECB_GCC_AMD64 (__amd64 || __amd64__ || __x86_64 || __x86_64__) 698#define ECB_GCC_AMD64 (__amd64 || __amd64__ || __x86_64 || __x86_64__)
684#define ECB_MSVC_AMD64 (_M_AMD64 || _M_X64) 699#define ECB_MSVC_AMD64 (_M_AMD64 || _M_X64)
700
701#ifndef ECB_OPTIMIZE_SIZE
702 #if __OPTIMIZE_SIZE__
703 #define ECB_OPTIMIZE_SIZE 1
704 #else
705 #define ECB_OPTIMIZE_SIZE 0
706 #endif
707#endif
685 708
686/* work around x32 idiocy by defining proper macros */ 709/* work around x32 idiocy by defining proper macros */
687#if ECB_GCC_AMD64 || ECB_MSVC_AMD64 710#if ECB_GCC_AMD64 || ECB_MSVC_AMD64
688 #if _ILP32 711 #if _ILP32
689 #define ECB_AMD64_X32 1 712 #define ECB_AMD64_X32 1
1196ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); } 1219ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); }
1197ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); } 1220ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); }
1198ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); } 1221ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); }
1199ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); } 1222ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); }
1200 1223
1224#if ECB_CPP
1225
1226inline uint8_t ecb_ctz (uint8_t v) { return ecb_ctz32 (v); }
1227inline uint16_t ecb_ctz (uint16_t v) { return ecb_ctz32 (v); }
1228inline uint32_t ecb_ctz (uint32_t v) { return ecb_ctz32 (v); }
1229inline uint64_t ecb_ctz (uint64_t v) { return ecb_ctz64 (v); }
1230
1231inline bool ecb_is_pot (uint8_t v) { return ecb_is_pot32 (v); }
1232inline bool ecb_is_pot (uint16_t v) { return ecb_is_pot32 (v); }
1233inline bool ecb_is_pot (uint32_t v) { return ecb_is_pot32 (v); }
1234inline bool ecb_is_pot (uint64_t v) { return ecb_is_pot64 (v); }
1235
1236inline int ecb_ld (uint8_t v) { return ecb_ld32 (v); }
1237inline int ecb_ld (uint16_t v) { return ecb_ld32 (v); }
1238inline int ecb_ld (uint32_t v) { return ecb_ld32 (v); }
1239inline int ecb_ld (uint64_t v) { return ecb_ld64 (v); }
1240
1241inline int ecb_popcount (uint8_t v) { return ecb_popcount32 (v); }
1242inline int ecb_popcount (uint16_t v) { return ecb_popcount32 (v); }
1243inline int ecb_popcount (uint32_t v) { return ecb_popcount32 (v); }
1244inline int ecb_popcount (uint64_t v) { return ecb_popcount64 (v); }
1245
1246inline uint8_t ecb_bitrev (uint8_t v) { return ecb_bitrev8 (v); }
1247inline uint16_t ecb_bitrev (uint16_t v) { return ecb_bitrev16 (v); }
1248inline uint32_t ecb_bitrev (uint32_t v) { return ecb_bitrev32 (v); }
1249
1250inline uint8_t ecb_rotl (uint8_t v, unsigned int count) { return ecb_rotl8 (v, count); }
1251inline uint16_t ecb_rotl (uint16_t v, unsigned int count) { return ecb_rotl16 (v, count); }
1252inline uint32_t ecb_rotl (uint32_t v, unsigned int count) { return ecb_rotl32 (v, count); }
1253inline uint64_t ecb_rotl (uint64_t v, unsigned int count) { return ecb_rotl64 (v, count); }
1254
1255inline uint8_t ecb_rotr (uint8_t v, unsigned int count) { return ecb_rotr8 (v, count); }
1256inline uint16_t ecb_rotr (uint16_t v, unsigned int count) { return ecb_rotr16 (v, count); }
1257inline uint32_t ecb_rotr (uint32_t v, unsigned int count) { return ecb_rotr32 (v, count); }
1258inline uint64_t ecb_rotr (uint64_t v, unsigned int count) { return ecb_rotr64 (v, count); }
1259
1260#endif
1261
1201#if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64)) 1262#if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64))
1202 #if ECB_GCC_VERSION(4,8) || ECB_CLANG_BUILTIN(__builtin_bswap16) 1263 #if ECB_GCC_VERSION(4,8) || ECB_CLANG_BUILTIN(__builtin_bswap16)
1203 #define ecb_bswap16(x) __builtin_bswap16 (x) 1264 #define ecb_bswap16(x) __builtin_bswap16 (x)
1204 #else 1265 #else
1205 #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16) 1266 #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16)
1276ecb_inline ecb_const ecb_bool ecb_big_endian (void); 1337ecb_inline ecb_const ecb_bool ecb_big_endian (void);
1277ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11223344; } 1338ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11223344; }
1278ecb_inline ecb_const ecb_bool ecb_little_endian (void); 1339ecb_inline ecb_const ecb_bool ecb_little_endian (void);
1279ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44332211; } 1340ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44332211; }
1280 1341
1342/*****************************************************************************/
1343/* unaligned load/store */
1344
1345ecb_inline uint_fast16_t ecb_be_u16_to_host (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; }
1346ecb_inline uint_fast32_t ecb_be_u32_to_host (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; }
1347ecb_inline uint_fast64_t ecb_be_u64_to_host (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; }
1348
1349ecb_inline uint_fast16_t ecb_le_u16_to_host (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; }
1350ecb_inline uint_fast32_t ecb_le_u32_to_host (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; }
1351ecb_inline uint_fast64_t ecb_le_u64_to_host (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; }
1352
1353ecb_inline uint_fast16_t ecb_peek_u16_u (const void *ptr) { uint16_t v; memcpy (&v, ptr, sizeof (v)); return v; }
1354ecb_inline uint_fast32_t ecb_peek_u32_u (const void *ptr) { uint32_t v; memcpy (&v, ptr, sizeof (v)); return v; }
1355ecb_inline uint_fast64_t ecb_peek_u64_u (const void *ptr) { uint64_t v; memcpy (&v, ptr, sizeof (v)); return v; }
1356
1357ecb_inline uint_fast16_t ecb_peek_be_u16_u (const void *ptr) { return ecb_be_u16_to_host (ecb_peek_u16_u (ptr)); }
1358ecb_inline uint_fast32_t ecb_peek_be_u32_u (const void *ptr) { return ecb_be_u32_to_host (ecb_peek_u32_u (ptr)); }
1359ecb_inline uint_fast64_t ecb_peek_be_u64_u (const void *ptr) { return ecb_be_u64_to_host (ecb_peek_u64_u (ptr)); }
1360
1361ecb_inline uint_fast16_t ecb_peek_le_u16_u (const void *ptr) { return ecb_le_u16_to_host (ecb_peek_u16_u (ptr)); }
1362ecb_inline uint_fast32_t ecb_peek_le_u32_u (const void *ptr) { return ecb_le_u32_to_host (ecb_peek_u32_u (ptr)); }
1363ecb_inline uint_fast64_t ecb_peek_le_u64_u (const void *ptr) { return ecb_le_u64_to_host (ecb_peek_u64_u (ptr)); }
1364
1365ecb_inline uint_fast16_t ecb_host_to_be_u16 (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; }
1366ecb_inline uint_fast32_t ecb_host_to_be_u32 (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; }
1367ecb_inline uint_fast64_t ecb_host_to_be_u64 (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; }
1368
1369ecb_inline uint_fast16_t ecb_host_to_le_u16 (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; }
1370ecb_inline uint_fast32_t ecb_host_to_le_u32 (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; }
1371ecb_inline uint_fast64_t ecb_host_to_le_u64 (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; }
1372
1373ecb_inline void ecb_poke_u16_u (void *ptr, uint16_t v) { memcpy (ptr, &v, sizeof (v)); }
1374ecb_inline void ecb_poke_u32_u (void *ptr, uint32_t v) { memcpy (ptr, &v, sizeof (v)); }
1375ecb_inline void ecb_poke_u64_u (void *ptr, uint64_t v) { memcpy (ptr, &v, sizeof (v)); }
1376
1377ecb_inline void ecb_poke_be_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_be_u16 (v)); }
1378ecb_inline void ecb_poke_be_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_be_u32 (v)); }
1379ecb_inline void ecb_poke_be_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_be_u64 (v)); }
1380
1381ecb_inline void ecb_poke_le_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_le_u16 (v)); }
1382ecb_inline void ecb_poke_le_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_le_u32 (v)); }
1383ecb_inline void ecb_poke_le_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_le_u64 (v)); }
1384
1385#if ECB_CPP
1386
1387inline uint8_t ecb_bswap (uint8_t v) { return v; }
1388inline uint16_t ecb_bswap (uint16_t v) { return ecb_bswap16 (v); }
1389inline uint32_t ecb_bswap (uint32_t v) { return ecb_bswap32 (v); }
1390inline uint64_t ecb_bswap (uint64_t v) { return ecb_bswap64 (v); }
1391
1392template<typename T> inline T ecb_be_to_host (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; }
1393template<typename T> inline T ecb_le_to_host (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; }
1394template<typename T> inline T ecb_peek (const void *ptr) { return *(const T *)ptr; }
1395template<typename T> inline T ecb_peek_be (const void *ptr) { return ecb_be_to_host (ecb_peek <T> (ptr)); }
1396template<typename T> inline T ecb_peek_le (const void *ptr) { return ecb_le_to_host (ecb_peek <T> (ptr)); }
1397template<typename T> inline T ecb_peek_u (const void *ptr) { T v; memcpy (&v, ptr, sizeof (v)); return v; }
1398template<typename T> inline T ecb_peek_be_u (const void *ptr) { return ecb_be_to_host (ecb_peek_u<T> (ptr)); }
1399template<typename T> inline T ecb_peek_le_u (const void *ptr) { return ecb_le_to_host (ecb_peek_u<T> (ptr)); }
1400
1401template<typename T> inline T ecb_host_to_be (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; }
1402template<typename T> inline T ecb_host_to_le (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; }
1403template<typename T> inline void ecb_poke (void *ptr, T v) { *(T *)ptr = v; }
1404template<typename T> inline void ecb_poke_be (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_be (v)); }
1405template<typename T> inline void ecb_poke_le (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_le (v)); }
1406template<typename T> inline void ecb_poke_u (void *ptr, T v) { memcpy (ptr, &v, sizeof (v)); }
1407template<typename T> inline void ecb_poke_be_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_be (v)); }
1408template<typename T> inline void ecb_poke_le_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_le (v)); }
1409
1410#endif
1411
1412/*****************************************************************************/
1413
1281#if ECB_GCC_VERSION(3,0) || ECB_C99 1414#if ECB_GCC_VERSION(3,0) || ECB_C99
1282 #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0)) 1415 #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0))
1283#else 1416#else
1284 #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n))) 1417 #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n)))
1285#endif 1418#endif
1308 return N; 1441 return N;
1309 } 1442 }
1310#else 1443#else
1311 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) 1444 #define ecb_array_length(name) (sizeof (name) / sizeof (name [0]))
1312#endif 1445#endif
1446
1447/*****************************************************************************/
1313 1448
1314ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x); 1449ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x);
1315ecb_function_ ecb_const uint32_t 1450ecb_function_ ecb_const uint32_t
1316ecb_binary16_to_binary32 (uint32_t x) 1451ecb_binary16_to_binary32 (uint32_t x)
1317{ 1452{
1426 || defined __sh__ \ 1561 || defined __sh__ \
1427 || defined _M_IX86 || defined ECB_MSVC_AMD64 || defined _M_IA64 \ 1562 || defined _M_IX86 || defined ECB_MSVC_AMD64 || defined _M_IA64 \
1428 || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \ 1563 || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \
1429 || defined __aarch64__ 1564 || defined __aarch64__
1430 #define ECB_STDFP 1 1565 #define ECB_STDFP 1
1431 #include <string.h> /* for memcpy */
1432#else 1566#else
1433 #define ECB_STDFP 0 1567 #define ECB_STDFP 0
1434#endif 1568#endif
1435 1569
1436#ifndef ECB_NO_LIBM 1570#ifndef ECB_NO_LIBM
1643# define inline_speed ecb_inline 1777# define inline_speed ecb_inline
1644#else 1778#else
1645# define inline_speed ecb_noinline static 1779# define inline_speed ecb_noinline static
1646#endif 1780#endif
1647 1781
1782/*****************************************************************************/
1783/* raw syscall wrappers */
1784
1785#if EV_NEED_SYSCALL
1786
1787#include <sys/syscall.h>
1788
1789/*
1790 * define some syscall wrappers for common architectures
1791 * this is mostly for nice looks during debugging, not performance.
1792 * our syscalls return < 0, not == -1, on error. which is good
1793 * enough for linux aio.
1794 * TODO: arm is also common nowadays, maybe even mips and x86
1795 * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove...
1796 */
1797#if __GNUC__ && __linux && ECB_AMD64 && !EV_FEATURE_CODE
1798 /* the costly errno access probably kills this for size optimisation */
1799
1800 #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5,arg6) \
1801 ({ \
1802 long res; \
1803 register unsigned long r6 __asm__ ("r9" ); \
1804 register unsigned long r5 __asm__ ("r8" ); \
1805 register unsigned long r4 __asm__ ("r10"); \
1806 register unsigned long r3 __asm__ ("rdx"); \
1807 register unsigned long r2 __asm__ ("rsi"); \
1808 register unsigned long r1 __asm__ ("rdi"); \
1809 if (narg >= 6) r6 = (unsigned long)(arg6); \
1810 if (narg >= 5) r5 = (unsigned long)(arg5); \
1811 if (narg >= 4) r4 = (unsigned long)(arg4); \
1812 if (narg >= 3) r3 = (unsigned long)(arg3); \
1813 if (narg >= 2) r2 = (unsigned long)(arg2); \
1814 if (narg >= 1) r1 = (unsigned long)(arg1); \
1815 __asm__ __volatile__ ( \
1816 "syscall\n\t" \
1817 : "=a" (res) \
1818 : "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \
1819 : "cc", "r11", "cx", "memory"); \
1820 errno = -res; \
1821 res; \
1822 })
1823
1824#endif
1825
1826#ifdef ev_syscall
1827 #define ev_syscall0(nr) ev_syscall (nr, 0, 0, 0, 0, 0, 0, 0)
1828 #define ev_syscall1(nr,arg1) ev_syscall (nr, 1, arg1, 0, 0, 0, 0, 0)
1829 #define ev_syscall2(nr,arg1,arg2) ev_syscall (nr, 2, arg1, arg2, 0, 0, 0, 0)
1830 #define ev_syscall3(nr,arg1,arg2,arg3) ev_syscall (nr, 3, arg1, arg2, arg3, 0, 0, 0)
1831 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) ev_syscall (nr, 3, arg1, arg2, arg3, arg4, 0, 0)
1832 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5, 0)
1833 #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) ev_syscall (nr, 6, arg1, arg2, arg3, arg4, arg5,arg6)
1834#else
1835 #define ev_syscall0(nr) syscall (nr)
1836 #define ev_syscall1(nr,arg1) syscall (nr, arg1)
1837 #define ev_syscall2(nr,arg1,arg2) syscall (nr, arg1, arg2)
1838 #define ev_syscall3(nr,arg1,arg2,arg3) syscall (nr, arg1, arg2, arg3)
1839 #define ev_syscall4(nr,arg1,arg2,arg3,arg4) syscall (nr, arg1, arg2, arg3, arg4)
1840 #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) syscall (nr, arg1, arg2, arg3, arg4, arg5)
1841 #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) syscall (nr, arg1, arg2, arg3, arg4, arg5,arg6)
1842#endif
1843
1844#endif
1845
1846/*****************************************************************************/
1847
1648#define NUMPRI (EV_MAXPRI - EV_MINPRI + 1) 1848#define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)
1649 1849
1650#if EV_MINPRI == EV_MAXPRI 1850#if EV_MINPRI == EV_MAXPRI
1651# define ABSPRI(w) (((W)w), 0) 1851# define ABSPRI(w) (((W)w), 0)
1652#else 1852#else
1711 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 10000000000000000000. : 1000000000.; 1911 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 10000000000000000000. : 1000000000.;
1712#else 1912#else
1713 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.; 1913 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.;
1714#endif 1914#endif
1715 1915
1916 /* special treatment for negative arguments */
1917 if (ecb_expect_false (v < 0.))
1918 {
1919 ev_tstamp f = -ev_floor (-v);
1920
1921 return f - (f == v ? 0 : 1);
1922 }
1923
1716 /* argument too large for an unsigned long? */ 1924 /* argument too large for an unsigned long? then reduce it */
1717 if (ecb_expect_false (v >= shift)) 1925 if (ecb_expect_false (v >= shift))
1718 { 1926 {
1719 ev_tstamp f; 1927 ev_tstamp f;
1720 1928
1721 if (v == v - 1.) 1929 if (v == v - 1.)
1722 return v; /* very large number */ 1930 return v; /* very large numbers are assumed to be integer */
1723 1931
1724 f = shift * ev_floor (v * (1. / shift)); 1932 f = shift * ev_floor (v * (1. / shift));
1725 return f + ev_floor (v - f); 1933 return f + ev_floor (v - f);
1726 }
1727
1728 /* special treatment for negative args? */
1729 if (ecb_expect_false (v < 0.))
1730 {
1731 ev_tstamp f = -ev_floor (-v);
1732
1733 return f - (f == v ? 0 : 1);
1734 } 1934 }
1735 1935
1736 /* fits into an unsigned long */ 1936 /* fits into an unsigned long */
1737 return (unsigned long)v; 1937 return (unsigned long)v;
1738} 1938}
1882{ 2082{
1883 WL head; 2083 WL head;
1884 unsigned char events; /* the events watched for */ 2084 unsigned char events; /* the events watched for */
1885 unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */ 2085 unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */
1886 unsigned char emask; /* some backends store the actual kernel mask in here */ 2086 unsigned char emask; /* some backends store the actual kernel mask in here */
1887 unsigned char unused; 2087 unsigned char eflags; /* flags field for use by backends */
1888#if EV_USE_EPOLL 2088#if EV_USE_EPOLL
1889 unsigned int egen; /* generation counter to counter epoll bugs */ 2089 unsigned int egen; /* generation counter to counter epoll bugs */
1890#endif 2090#endif
1891#if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP 2091#if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP
1892 SOCKET handle; 2092 SOCKET handle;
1946 static struct ev_loop default_loop_struct; 2146 static struct ev_loop default_loop_struct;
1947 EV_API_DECL struct ev_loop *ev_default_loop_ptr = 0; /* needs to be initialised to make it a definition despite extern */ 2147 EV_API_DECL struct ev_loop *ev_default_loop_ptr = 0; /* needs to be initialised to make it a definition despite extern */
1948 2148
1949#else 2149#else
1950 2150
1951 EV_API_DECL ev_tstamp ev_rt_now = 0; /* needs to be initialised to make it a definition despite extern */ 2151 EV_API_DECL ev_tstamp ev_rt_now = EV_TS_CONST (0.); /* needs to be initialised to make it a definition despite extern */
1952 #define VAR(name,decl) static decl; 2152 #define VAR(name,decl) static decl;
1953 #include "ev_vars.h" 2153 #include "ev_vars.h"
1954 #undef VAR 2154 #undef VAR
1955 2155
1956 static int ev_default_loop_ptr; 2156 static int ev_default_loop_ptr;
1978#if EV_USE_REALTIME 2178#if EV_USE_REALTIME
1979 if (ecb_expect_true (have_realtime)) 2179 if (ecb_expect_true (have_realtime))
1980 { 2180 {
1981 struct timespec ts; 2181 struct timespec ts;
1982 clock_gettime (CLOCK_REALTIME, &ts); 2182 clock_gettime (CLOCK_REALTIME, &ts);
1983 return ts.tv_sec + ts.tv_nsec * 1e-9; 2183 return EV_TS_GET (ts);
1984 } 2184 }
1985#endif 2185#endif
1986 2186
2187 {
1987 struct timeval tv; 2188 struct timeval tv;
1988 gettimeofday (&tv, 0); 2189 gettimeofday (&tv, 0);
1989 return tv.tv_sec + tv.tv_usec * 1e-6; 2190 return EV_TV_GET (tv);
2191 }
1990} 2192}
1991#endif 2193#endif
1992 2194
1993inline_size ev_tstamp 2195inline_size ev_tstamp
1994get_clock (void) 2196get_clock (void)
1996#if EV_USE_MONOTONIC 2198#if EV_USE_MONOTONIC
1997 if (ecb_expect_true (have_monotonic)) 2199 if (ecb_expect_true (have_monotonic))
1998 { 2200 {
1999 struct timespec ts; 2201 struct timespec ts;
2000 clock_gettime (CLOCK_MONOTONIC, &ts); 2202 clock_gettime (CLOCK_MONOTONIC, &ts);
2001 return ts.tv_sec + ts.tv_nsec * 1e-9; 2203 return EV_TS_GET (ts);
2002 } 2204 }
2003#endif 2205#endif
2004 2206
2005 return ev_time (); 2207 return ev_time ();
2006} 2208}
2014#endif 2216#endif
2015 2217
2016void 2218void
2017ev_sleep (ev_tstamp delay) EV_NOEXCEPT 2219ev_sleep (ev_tstamp delay) EV_NOEXCEPT
2018{ 2220{
2019 if (delay > 0.) 2221 if (delay > EV_TS_CONST (0.))
2020 { 2222 {
2021#if EV_USE_NANOSLEEP 2223#if EV_USE_NANOSLEEP
2022 struct timespec ts; 2224 struct timespec ts;
2023 2225
2024 EV_TS_SET (ts, delay); 2226 EV_TS_SET (ts, delay);
2025 nanosleep (&ts, 0); 2227 nanosleep (&ts, 0);
2026#elif defined _WIN32 2228#elif defined _WIN32
2027 /* maybe this should round up, as ms is very low resolution */ 2229 /* maybe this should round up, as ms is very low resolution */
2028 /* compared to select (µs) or nanosleep (ns) */ 2230 /* compared to select (µs) or nanosleep (ns) */
2029 Sleep ((unsigned long)(delay * 1e3)); 2231 Sleep ((unsigned long)(EV_TS_TO_MSEC (delay)));
2030#else 2232#else
2031 struct timeval tv; 2233 struct timeval tv;
2032 2234
2033 /* here we rely on sys/time.h + sys/types.h + unistd.h providing select */ 2235 /* here we rely on sys/time.h + sys/types.h + unistd.h providing select */
2034 /* something not guaranteed by newer posix versions, but guaranteed */ 2236 /* something not guaranteed by newer posix versions, but guaranteed */
2194inline_size void 2396inline_size void
2195fd_reify (EV_P) 2397fd_reify (EV_P)
2196{ 2398{
2197 int i; 2399 int i;
2198 2400
2401 /* most backends do not modify the fdchanges list in backend_modify.
2402 * except io_uring, which has fixed-size buffers which might force us
2403 * to handle events in backend_modify, causing fdchanges to be amended,
2404 * which could result in an endless loop.
2405 * to avoid this, we do not dynamically handle fds that were added
2406 * during fd_reify. that means that for those backends, fdchangecnt
2407 * might be non-zero during poll, which must cause them to not block.
2408 * to not put too much of a burden on other backends, this detail
2409 * needs to be handled in the backend.
2410 */
2411 int changecnt = fdchangecnt;
2412
2199#if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP 2413#if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP
2200 for (i = 0; i < fdchangecnt; ++i) 2414 for (i = 0; i < changecnt; ++i)
2201 { 2415 {
2202 int fd = fdchanges [i]; 2416 int fd = fdchanges [i];
2203 ANFD *anfd = anfds + fd; 2417 ANFD *anfd = anfds + fd;
2204 2418
2205 if (anfd->reify & EV__IOFDSET && anfd->head) 2419 if (anfd->reify & EV__IOFDSET && anfd->head)
2219 } 2433 }
2220 } 2434 }
2221 } 2435 }
2222#endif 2436#endif
2223 2437
2224 for (i = 0; i < fdchangecnt; ++i) 2438 for (i = 0; i < changecnt; ++i)
2225 { 2439 {
2226 int fd = fdchanges [i]; 2440 int fd = fdchanges [i];
2227 ANFD *anfd = anfds + fd; 2441 ANFD *anfd = anfds + fd;
2228 ev_io *w; 2442 ev_io *w;
2229 2443
2245 2459
2246 if (o_reify & EV__IOFDSET) 2460 if (o_reify & EV__IOFDSET)
2247 backend_modify (EV_A_ fd, o_events, anfd->events); 2461 backend_modify (EV_A_ fd, o_events, anfd->events);
2248 } 2462 }
2249 2463
2464 /* normally, fdchangecnt hasn't changed. if it has, then new fds have been added.
2465 * this is a rare case (see beginning comment in this function), so we copy them to the
2466 * front and hope the backend handles this case.
2467 */
2468 if (ecb_expect_false (fdchangecnt != changecnt))
2469 memmove (fdchanges, fdchanges + changecnt, (fdchangecnt - changecnt) * sizeof (*fdchanges));
2470
2250 fdchangecnt = 0; 2471 fdchangecnt -= changecnt;
2251} 2472}
2252 2473
2253/* something about the given fd changed */ 2474/* something about the given fd changed */
2254inline_size 2475inline_size
2255void 2476void
2256fd_change (EV_P_ int fd, int flags) 2477fd_change (EV_P_ int fd, int flags)
2257{ 2478{
2258 unsigned char reify = anfds [fd].reify; 2479 unsigned char reify = anfds [fd].reify;
2259 anfds [fd].reify |= flags; 2480 anfds [fd].reify = reify | flags;
2260 2481
2261 if (ecb_expect_true (!reify)) 2482 if (ecb_expect_true (!reify))
2262 { 2483 {
2263 ++fdchangecnt; 2484 ++fdchangecnt;
2264 array_needsize (int, fdchanges, fdchangemax, fdchangecnt, array_needsize_noinit); 2485 array_needsize (int, fdchanges, fdchangemax, fdchangecnt, array_needsize_noinit);
2384 2605
2385 /* find minimum child */ 2606 /* find minimum child */
2386 if (ecb_expect_true (pos + DHEAP - 1 < E)) 2607 if (ecb_expect_true (pos + DHEAP - 1 < E))
2387 { 2608 {
2388 /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos)); 2609 /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
2389 if ( ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos)); 2610 if ( minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));
2390 if ( ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos)); 2611 if ( minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));
2391 if ( ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos)); 2612 if ( minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));
2392 } 2613 }
2393 else if (pos < E) 2614 else if (pos < E)
2394 { 2615 {
2395 /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos)); 2616 /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
2396 if (pos + 1 < E && ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos)); 2617 if (pos + 1 < E && minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));
2397 if (pos + 2 < E && ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos)); 2618 if (pos + 2 < E && minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));
2398 if (pos + 3 < E && ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos)); 2619 if (pos + 3 < E && minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));
2399 } 2620 }
2400 else 2621 else
2401 break; 2622 break;
2402 2623
2403 if (ANHE_at (he) <= minat) 2624 if (ANHE_at (he) <= minat)
2411 2632
2412 heap [k] = he; 2633 heap [k] = he;
2413 ev_active (ANHE_w (he)) = k; 2634 ev_active (ANHE_w (he)) = k;
2414} 2635}
2415 2636
2416#else /* 4HEAP */ 2637#else /* not 4HEAP */
2417 2638
2418#define HEAP0 1 2639#define HEAP0 1
2419#define HPARENT(k) ((k) >> 1) 2640#define HPARENT(k) ((k) >> 1)
2420#define UPHEAP_DONE(p,k) (!(p)) 2641#define UPHEAP_DONE(p,k) (!(p))
2421 2642
2493 upheap (heap, i + HEAP0); 2714 upheap (heap, i + HEAP0);
2494} 2715}
2495 2716
2496/*****************************************************************************/ 2717/*****************************************************************************/
2497 2718
2498/* associate signal watchers to a signal signal */ 2719/* associate signal watchers to a signal */
2499typedef struct 2720typedef struct
2500{ 2721{
2501 EV_ATOMIC_T pending; 2722 EV_ATOMIC_T pending;
2502#if EV_MULTIPLICITY 2723#if EV_MULTIPLICITY
2503 EV_P; 2724 EV_P;
2807 3028
2808#endif 3029#endif
2809 3030
2810/*****************************************************************************/ 3031/*****************************************************************************/
2811 3032
3033#if EV_USE_TIMERFD
3034
3035static void periodics_reschedule (EV_P);
3036
3037static void
3038timerfdcb (EV_P_ ev_io *iow, int revents)
3039{
3040 struct itimerspec its = { 0 };
3041
3042 its.it_value.tv_sec = ev_rt_now + (int)MAX_BLOCKTIME2;
3043 timerfd_settime (timerfd, TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET, &its, 0);
3044
3045 ev_rt_now = ev_time ();
3046 /* periodics_reschedule only needs ev_rt_now */
3047 /* but maybe in the future we want the full treatment. */
3048 /*
3049 now_floor = EV_TS_CONST (0.);
3050 time_update (EV_A_ EV_TSTAMP_HUGE);
3051 */
3052#if EV_PERIODIC_ENABLE
3053 periodics_reschedule (EV_A);
3054#endif
3055}
3056
3057ecb_noinline ecb_cold
3058static void
3059evtimerfd_init (EV_P)
3060{
3061 if (!ev_is_active (&timerfd_w))
3062 {
3063 timerfd = timerfd_create (CLOCK_REALTIME, TFD_NONBLOCK | TFD_CLOEXEC);
3064
3065 if (timerfd >= 0)
3066 {
3067 fd_intern (timerfd); /* just to be sure */
3068
3069 ev_io_init (&timerfd_w, timerfdcb, timerfd, EV_READ);
3070 ev_set_priority (&timerfd_w, EV_MINPRI);
3071 ev_io_start (EV_A_ &timerfd_w);
3072 ev_unref (EV_A); /* watcher should not keep loop alive */
3073
3074 /* (re-) arm timer */
3075 timerfdcb (EV_A_ 0, 0);
3076 }
3077 }
3078}
3079
3080#endif
3081
3082/*****************************************************************************/
3083
2812#if EV_USE_IOCP 3084#if EV_USE_IOCP
2813# include "ev_iocp.c" 3085# include "ev_iocp.c"
2814#endif 3086#endif
2815#if EV_USE_PORT 3087#if EV_USE_PORT
2816# include "ev_port.c" 3088# include "ev_port.c"
2862unsigned int 3134unsigned int
2863ev_supported_backends (void) EV_NOEXCEPT 3135ev_supported_backends (void) EV_NOEXCEPT
2864{ 3136{
2865 unsigned int flags = 0; 3137 unsigned int flags = 0;
2866 3138
2867 if (EV_USE_PORT ) flags |= EVBACKEND_PORT; 3139 if (EV_USE_PORT ) flags |= EVBACKEND_PORT;
2868 if (EV_USE_KQUEUE ) flags |= EVBACKEND_KQUEUE; 3140 if (EV_USE_KQUEUE ) flags |= EVBACKEND_KQUEUE;
2869 if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL; 3141 if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL;
2870 if (EV_USE_LINUXAIO) flags |= EVBACKEND_LINUXAIO; 3142 if (EV_USE_LINUXAIO ) flags |= EVBACKEND_LINUXAIO;
2871 if (EV_USE_IOURING ) flags |= EVBACKEND_IOURING; 3143 if (EV_USE_IOURING && ev_linux_version () >= 0x050601) flags |= EVBACKEND_IOURING; /* 5.6.1+ */
2872 if (EV_USE_POLL ) flags |= EVBACKEND_POLL; 3144 if (EV_USE_POLL ) flags |= EVBACKEND_POLL;
2873 if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT; 3145 if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT;
2874 3146
2875 return flags; 3147 return flags;
2876} 3148}
2877 3149
2878ecb_cold 3150ecb_cold
2879unsigned int 3151unsigned int
2897 3169
2898 /* TODO: linuxaio is very experimental */ 3170 /* TODO: linuxaio is very experimental */
2899#if !EV_RECOMMEND_LINUXAIO 3171#if !EV_RECOMMEND_LINUXAIO
2900 flags &= ~EVBACKEND_LINUXAIO; 3172 flags &= ~EVBACKEND_LINUXAIO;
2901#endif 3173#endif
2902 /* TODO: linuxaio is super experimental */ 3174 /* TODO: iouring is super experimental */
2903#if !EV_RECOMMEND_IOURING 3175#if !EV_RECOMMEND_IOURING
2904 flags &= ~EVBACKEND_IOURING; 3176 flags &= ~EVBACKEND_IOURING;
2905#endif 3177#endif
2906 3178
2907 return flags; 3179 return flags;
2909 3181
2910ecb_cold 3182ecb_cold
2911unsigned int 3183unsigned int
2912ev_embeddable_backends (void) EV_NOEXCEPT 3184ev_embeddable_backends (void) EV_NOEXCEPT
2913{ 3185{
2914 int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT; 3186 int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT | EVBACKEND_IOURING;
2915 3187
2916 /* epoll embeddability broken on all linux versions up to at least 2.6.23 */ 3188 /* epoll embeddability broken on all linux versions up to at least 2.6.23 */
2917 if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */ 3189 if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */
2918 flags &= ~EVBACKEND_EPOLL; 3190 flags &= ~EVBACKEND_EPOLL;
3191
3192 /* EVBACKEND_LINUXAIO is theoretically embeddable, but suffers from a performance overhead */
2919 3193
2920 return flags; 3194 return flags;
2921} 3195}
2922 3196
2923unsigned int 3197unsigned int
3041 fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2; 3315 fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2;
3042#endif 3316#endif
3043#if EV_USE_SIGNALFD 3317#if EV_USE_SIGNALFD
3044 sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1; 3318 sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1;
3045#endif 3319#endif
3320#if EV_USE_TIMERFD
3321 timerfd = flags & EVFLAG_NOTIMERFD ? -1 : -2;
3322#endif
3046 3323
3047 if (!(flags & EVBACKEND_MASK)) 3324 if (!(flags & EVBACKEND_MASK))
3048 flags |= ev_recommended_backends (); 3325 flags |= ev_recommended_backends ();
3049 3326
3050#if EV_USE_IOCP 3327#if EV_USE_IOCP
3121 } 3398 }
3122 3399
3123#if EV_USE_SIGNALFD 3400#if EV_USE_SIGNALFD
3124 if (ev_is_active (&sigfd_w)) 3401 if (ev_is_active (&sigfd_w))
3125 close (sigfd); 3402 close (sigfd);
3403#endif
3404
3405#if EV_USE_TIMERFD
3406 if (ev_is_active (&timerfd_w))
3407 close (timerfd);
3126#endif 3408#endif
3127 3409
3128#if EV_USE_INOTIFY 3410#if EV_USE_INOTIFY
3129 if (fs_fd >= 0) 3411 if (fs_fd >= 0)
3130 close (fs_fd); 3412 close (fs_fd);
3223#endif 3505#endif
3224#if EV_USE_INOTIFY 3506#if EV_USE_INOTIFY
3225 infy_fork (EV_A); 3507 infy_fork (EV_A);
3226#endif 3508#endif
3227 3509
3510 if (postfork != 2)
3511 {
3512 #if EV_USE_SIGNALFD
3513 /* surprisingly, nothing needs to be done for signalfd, according to docs, it does the right thing on fork */
3514 #endif
3515
3516 #if EV_USE_TIMERFD
3517 if (ev_is_active (&timerfd_w))
3518 {
3519 ev_ref (EV_A);
3520 ev_io_stop (EV_A_ &timerfd_w);
3521
3522 close (timerfd);
3523 timerfd = -2;
3524
3525 evtimerfd_init (EV_A);
3526 /* reschedule periodics, in case we missed something */
3527 ev_feed_event (EV_A_ &timerfd_w, EV_CUSTOM);
3528 }
3529 #endif
3530
3228#if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE 3531 #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
3229 if (ev_is_active (&pipe_w) && postfork != 2) 3532 if (ev_is_active (&pipe_w))
3230 { 3533 {
3231 /* pipe_write_wanted must be false now, so modifying fd vars should be safe */ 3534 /* pipe_write_wanted must be false now, so modifying fd vars should be safe */
3232 3535
3233 ev_ref (EV_A); 3536 ev_ref (EV_A);
3234 ev_io_stop (EV_A_ &pipe_w); 3537 ev_io_stop (EV_A_ &pipe_w);
3235 3538
3236 if (evpipe [0] >= 0) 3539 if (evpipe [0] >= 0)
3237 EV_WIN32_CLOSE_FD (evpipe [0]); 3540 EV_WIN32_CLOSE_FD (evpipe [0]);
3238 3541
3239 evpipe_init (EV_A); 3542 evpipe_init (EV_A);
3240 /* iterate over everything, in case we missed something before */ 3543 /* iterate over everything, in case we missed something before */
3241 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM); 3544 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
3545 }
3546 #endif
3242 } 3547 }
3243#endif
3244 3548
3245 postfork = 0; 3549 postfork = 0;
3246} 3550}
3247 3551
3248#if EV_MULTIPLICITY 3552#if EV_MULTIPLICITY
3518 { 3822 {
3519 ev_at (w) += w->repeat; 3823 ev_at (w) += w->repeat;
3520 if (ev_at (w) < mn_now) 3824 if (ev_at (w) < mn_now)
3521 ev_at (w) = mn_now; 3825 ev_at (w) = mn_now;
3522 3826
3523 assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > 0.)); 3827 assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > EV_TS_CONST (0.)));
3524 3828
3525 ANHE_at_cache (timers [HEAP0]); 3829 ANHE_at_cache (timers [HEAP0]);
3526 downheap (timers, timercnt, HEAP0); 3830 downheap (timers, timercnt, HEAP0);
3527 } 3831 }
3528 else 3832 else
3659 3963
3660 mn_now = get_clock (); 3964 mn_now = get_clock ();
3661 3965
3662 /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */ 3966 /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */
3663 /* interpolate in the meantime */ 3967 /* interpolate in the meantime */
3664 if (ecb_expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5)) 3968 if (ecb_expect_true (mn_now - now_floor < EV_TS_CONST (MIN_TIMEJUMP * .5)))
3665 { 3969 {
3666 ev_rt_now = rtmn_diff + mn_now; 3970 ev_rt_now = rtmn_diff + mn_now;
3667 return; 3971 return;
3668 } 3972 }
3669 3973
3683 ev_tstamp diff; 3987 ev_tstamp diff;
3684 rtmn_diff = ev_rt_now - mn_now; 3988 rtmn_diff = ev_rt_now - mn_now;
3685 3989
3686 diff = odiff - rtmn_diff; 3990 diff = odiff - rtmn_diff;
3687 3991
3688 if (ecb_expect_true ((diff < 0. ? -diff : diff) < MIN_TIMEJUMP)) 3992 if (ecb_expect_true ((diff < EV_TS_CONST (0.) ? -diff : diff) < EV_TS_CONST (MIN_TIMEJUMP)))
3689 return; /* all is well */ 3993 return; /* all is well */
3690 3994
3691 ev_rt_now = ev_time (); 3995 ev_rt_now = ev_time ();
3692 mn_now = get_clock (); 3996 mn_now = get_clock ();
3693 now_floor = mn_now; 3997 now_floor = mn_now;
3702 else 4006 else
3703#endif 4007#endif
3704 { 4008 {
3705 ev_rt_now = ev_time (); 4009 ev_rt_now = ev_time ();
3706 4010
3707 if (ecb_expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + MIN_TIMEJUMP)) 4011 if (ecb_expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + EV_TS_CONST (MIN_TIMEJUMP)))
3708 { 4012 {
3709 /* adjust timers. this is easy, as the offset is the same for all of them */ 4013 /* adjust timers. this is easy, as the offset is the same for all of them */
3710 timers_reschedule (EV_A_ ev_rt_now - mn_now); 4014 timers_reschedule (EV_A_ ev_rt_now - mn_now);
3711#if EV_PERIODIC_ENABLE 4015#if EV_PERIODIC_ENABLE
3712 periodics_reschedule (EV_A); 4016 periodics_reschedule (EV_A);
3781 4085
3782 /* remember old timestamp for io_blocktime calculation */ 4086 /* remember old timestamp for io_blocktime calculation */
3783 ev_tstamp prev_mn_now = mn_now; 4087 ev_tstamp prev_mn_now = mn_now;
3784 4088
3785 /* update time to cancel out callback processing overhead */ 4089 /* update time to cancel out callback processing overhead */
3786 time_update (EV_A_ 1e100); 4090 time_update (EV_A_ EV_TS_CONST (EV_TSTAMP_HUGE));
3787 4091
3788 /* from now on, we want a pipe-wake-up */ 4092 /* from now on, we want a pipe-wake-up */
3789 pipe_write_wanted = 1; 4093 pipe_write_wanted = 1;
3790 4094
3791 ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */ 4095 ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */
3792 4096
3793 if (ecb_expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped))) 4097 if (ecb_expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped)))
3794 { 4098 {
3795 waittime = MAX_BLOCKTIME; 4099 waittime = EV_TS_CONST (MAX_BLOCKTIME);
4100
4101#if EV_USE_MONOTONIC
4102 if (ecb_expect_true (have_monotonic))
4103 {
4104#if EV_USE_TIMERFD
4105 /* sleep a lot longer when we can reliably detect timejumps */
4106 if (ecb_expect_true (timerfd != -1))
4107 waittime = EV_TS_CONST (MAX_BLOCKTIME2);
4108#endif
4109#if !EV_PERIODIC_ENABLE
4110 /* without periodics but with monotonic clock there is no need */
4111 /* for any time jump detection, so sleep longer */
4112 waittime = EV_TS_CONST (MAX_BLOCKTIME2);
4113#endif
4114 }
4115#endif
3796 4116
3797 if (timercnt) 4117 if (timercnt)
3798 { 4118 {
3799 ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now; 4119 ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now;
3800 if (waittime > to) waittime = to; 4120 if (waittime > to) waittime = to;
3810 4130
3811 /* don't let timeouts decrease the waittime below timeout_blocktime */ 4131 /* don't let timeouts decrease the waittime below timeout_blocktime */
3812 if (ecb_expect_false (waittime < timeout_blocktime)) 4132 if (ecb_expect_false (waittime < timeout_blocktime))
3813 waittime = timeout_blocktime; 4133 waittime = timeout_blocktime;
3814 4134
3815 /* at this point, we NEED to wait, so we have to ensure */ 4135 /* now there are two more special cases left, either we have
3816 /* to pass a minimum nonzero value to the backend */ 4136 * already-expired timers, so we should not sleep, or we have timers
4137 * that expire very soon, in which case we need to wait for a minimum
4138 * amount of time for some event loop backends.
4139 */
3817 if (ecb_expect_false (waittime < backend_mintime)) 4140 if (ecb_expect_false (waittime < backend_mintime))
4141 waittime = waittime <= EV_TS_CONST (0.)
4142 ? EV_TS_CONST (0.)
3818 waittime = backend_mintime; 4143 : backend_mintime;
3819 4144
3820 /* extra check because io_blocktime is commonly 0 */ 4145 /* extra check because io_blocktime is commonly 0 */
3821 if (ecb_expect_false (io_blocktime)) 4146 if (ecb_expect_false (io_blocktime))
3822 { 4147 {
3823 sleeptime = io_blocktime - (mn_now - prev_mn_now); 4148 sleeptime = io_blocktime - (mn_now - prev_mn_now);
3824 4149
3825 if (sleeptime > waittime - backend_mintime) 4150 if (sleeptime > waittime - backend_mintime)
3826 sleeptime = waittime - backend_mintime; 4151 sleeptime = waittime - backend_mintime;
3827 4152
3828 if (ecb_expect_true (sleeptime > 0.)) 4153 if (ecb_expect_true (sleeptime > EV_TS_CONST (0.)))
3829 { 4154 {
3830 ev_sleep (sleeptime); 4155 ev_sleep (sleeptime);
3831 waittime -= sleeptime; 4156 waittime -= sleeptime;
3832 } 4157 }
3833 } 4158 }
3907} 4232}
3908 4233
3909void 4234void
3910ev_now_update (EV_P) EV_NOEXCEPT 4235ev_now_update (EV_P) EV_NOEXCEPT
3911{ 4236{
3912 time_update (EV_A_ 1e100); 4237 time_update (EV_A_ EV_TSTAMP_HUGE);
3913} 4238}
3914 4239
3915void 4240void
3916ev_suspend (EV_P) EV_NOEXCEPT 4241ev_suspend (EV_P) EV_NOEXCEPT
3917{ 4242{
4148} 4473}
4149 4474
4150ev_tstamp 4475ev_tstamp
4151ev_timer_remaining (EV_P_ ev_timer *w) EV_NOEXCEPT 4476ev_timer_remaining (EV_P_ ev_timer *w) EV_NOEXCEPT
4152{ 4477{
4153 return ev_at (w) - (ev_is_active (w) ? mn_now : 0.); 4478 return ev_at (w) - (ev_is_active (w) ? mn_now : EV_TS_CONST (0.));
4154} 4479}
4155 4480
4156#if EV_PERIODIC_ENABLE 4481#if EV_PERIODIC_ENABLE
4157ecb_noinline 4482ecb_noinline
4158void 4483void
4159ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT 4484ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT
4160{ 4485{
4161 if (ecb_expect_false (ev_is_active (w))) 4486 if (ecb_expect_false (ev_is_active (w)))
4162 return; 4487 return;
4488
4489#if EV_USE_TIMERFD
4490 if (timerfd == -2)
4491 evtimerfd_init (EV_A);
4492#endif
4163 4493
4164 if (w->reschedule_cb) 4494 if (w->reschedule_cb)
4165 ev_at (w) = w->reschedule_cb (w, ev_rt_now); 4495 ev_at (w) = w->reschedule_cb (w, ev_rt_now);
4166 else if (w->interval) 4496 else if (w->interval)
4167 { 4497 {
4909 ev_run (EV_A_ EVRUN_NOWAIT); 5239 ev_run (EV_A_ EVRUN_NOWAIT);
4910 } 5240 }
4911 } 5241 }
4912} 5242}
4913 5243
5244#if EV_FORK_ENABLE
4914static void 5245static void
4915embed_fork_cb (EV_P_ ev_fork *fork_w, int revents) 5246embed_fork_cb (EV_P_ ev_fork *fork_w, int revents)
4916{ 5247{
4917 ev_embed *w = (ev_embed *)(((char *)fork_w) - offsetof (ev_embed, fork)); 5248 ev_embed *w = (ev_embed *)(((char *)fork_w) - offsetof (ev_embed, fork));
4918 5249
4925 ev_run (EV_A_ EVRUN_NOWAIT); 5256 ev_run (EV_A_ EVRUN_NOWAIT);
4926 } 5257 }
4927 5258
4928 ev_embed_start (EV_A_ w); 5259 ev_embed_start (EV_A_ w);
4929} 5260}
5261#endif
4930 5262
4931#if 0 5263#if 0
4932static void 5264static void
4933embed_idle_cb (EV_P_ ev_idle *idle, int revents) 5265embed_idle_cb (EV_P_ ev_idle *idle, int revents)
4934{ 5266{
4955 5287
4956 ev_prepare_init (&w->prepare, embed_prepare_cb); 5288 ev_prepare_init (&w->prepare, embed_prepare_cb);
4957 ev_set_priority (&w->prepare, EV_MINPRI); 5289 ev_set_priority (&w->prepare, EV_MINPRI);
4958 ev_prepare_start (EV_A_ &w->prepare); 5290 ev_prepare_start (EV_A_ &w->prepare);
4959 5291
5292#if EV_FORK_ENABLE
4960 ev_fork_init (&w->fork, embed_fork_cb); 5293 ev_fork_init (&w->fork, embed_fork_cb);
4961 ev_fork_start (EV_A_ &w->fork); 5294 ev_fork_start (EV_A_ &w->fork);
5295#endif
4962 5296
4963 /*ev_idle_init (&w->idle, e,bed_idle_cb);*/ 5297 /*ev_idle_init (&w->idle, e,bed_idle_cb);*/
4964 5298
4965 ev_start (EV_A_ (W)w, 1); 5299 ev_start (EV_A_ (W)w, 1);
4966 5300
4976 5310
4977 EV_FREQUENT_CHECK; 5311 EV_FREQUENT_CHECK;
4978 5312
4979 ev_io_stop (EV_A_ &w->io); 5313 ev_io_stop (EV_A_ &w->io);
4980 ev_prepare_stop (EV_A_ &w->prepare); 5314 ev_prepare_stop (EV_A_ &w->prepare);
5315#if EV_FORK_ENABLE
4981 ev_fork_stop (EV_A_ &w->fork); 5316 ev_fork_stop (EV_A_ &w->fork);
5317#endif
4982 5318
4983 ev_stop (EV_A_ (W)w); 5319 ev_stop (EV_A_ (W)w);
4984 5320
4985 EV_FREQUENT_CHECK; 5321 EV_FREQUENT_CHECK;
4986} 5322}

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines