1 | /* |
1 | /* |
2 | * libev event processing core, watcher management |
2 | * libev event processing core, watcher management |
3 | * |
3 | * |
4 | * Copyright (c) 2007-2019 Marc Alexander Lehmann <libev@schmorp.de> |
4 | * Copyright (c) 2007-2020 Marc Alexander Lehmann <libev@schmorp.de> |
5 | * All rights reserved. |
5 | * All rights reserved. |
6 | * |
6 | * |
7 | * Redistribution and use in source and binary forms, with or without modifica- |
7 | * Redistribution and use in source and binary forms, with or without modifica- |
8 | * tion, are permitted provided that the following conditions are met: |
8 | * tion, are permitted provided that the following conditions are met: |
9 | * |
9 | * |
… | |
… | |
117 | # define EV_USE_EPOLL 0 |
117 | # define EV_USE_EPOLL 0 |
118 | # endif |
118 | # endif |
119 | |
119 | |
120 | # if HAVE_LINUX_AIO_ABI_H |
120 | # if HAVE_LINUX_AIO_ABI_H |
121 | # ifndef EV_USE_LINUXAIO |
121 | # ifndef EV_USE_LINUXAIO |
122 | # define EV_USE_LINUXAIO EV_FEATURE_BACKENDS |
122 | # define EV_USE_LINUXAIO 0 /* was: EV_FEATURE_BACKENDS, always off by default */ |
123 | # endif |
123 | # endif |
124 | # else |
124 | # else |
125 | # undef EV_USE_LINUXAIO |
125 | # undef EV_USE_LINUXAIO |
126 | # define EV_USE_LINUXAIO 0 |
126 | # define EV_USE_LINUXAIO 0 |
127 | # endif |
127 | # endif |
… | |
… | |
344 | # define EV_USE_PORT 0 |
344 | # define EV_USE_PORT 0 |
345 | #endif |
345 | #endif |
346 | |
346 | |
347 | #ifndef EV_USE_LINUXAIO |
347 | #ifndef EV_USE_LINUXAIO |
348 | # if __linux /* libev currently assumes linux/aio_abi.h is always available on linux */ |
348 | # if __linux /* libev currently assumes linux/aio_abi.h is always available on linux */ |
349 | # define EV_USE_LINUXAIO 1 |
349 | # define EV_USE_LINUXAIO 0 /* was: 1, always off by default */ |
350 | # else |
350 | # else |
351 | # define EV_USE_LINUXAIO 0 |
351 | # define EV_USE_LINUXAIO 0 |
352 | # endif |
352 | # endif |
353 | #endif |
353 | #endif |
354 | |
354 | |
… | |
… | |
491 | # endif |
491 | # endif |
492 | #endif |
492 | #endif |
493 | |
493 | |
494 | #if EV_USE_IOURING |
494 | #if EV_USE_IOURING |
495 | # include <sys/syscall.h> |
495 | # include <sys/syscall.h> |
496 | # if !SYS_io_uring_setup && __linux && !__alpha |
496 | # if !SYS_io_uring_register && __linux && !__alpha |
497 | # define SYS_io_uring_setup 425 |
497 | # define SYS_io_uring_setup 425 |
498 | # define SYS_io_uring_enter 426 |
498 | # define SYS_io_uring_enter 426 |
499 | # define SYS_io_uring_wregister 427 |
499 | # define SYS_io_uring_register 427 |
500 | # endif |
500 | # endif |
501 | # if SYS_io_uring_setup && EV_USE_EPOLL /* iouring backend requires epoll backend */ |
501 | # if SYS_io_uring_setup && EV_USE_EPOLL /* iouring backend requires epoll backend */ |
502 | # define EV_NEED_SYSCALL 1 |
502 | # define EV_NEED_SYSCALL 1 |
503 | # else |
503 | # else |
504 | # undef EV_USE_IOURING |
504 | # undef EV_USE_IOURING |
… | |
… | |
579 | #define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */ |
579 | #define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */ |
580 | /*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */ |
580 | /*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */ |
581 | |
581 | |
582 | #define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */ |
582 | #define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */ |
583 | #define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */ |
583 | #define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */ |
|
|
584 | #define MAX_BLOCKTIME2 1500001.07 /* same, but when timerfd is used to detect jumps, also safe delay to not overflow */ |
584 | |
585 | |
585 | /* find a portable timestamp that is "always" in the future but fits into time_t. |
586 | /* find a portable timestamp that is "always" in the future but fits into time_t. |
586 | * this is quite hard, and we are mostly guessing - we handle 32 bit signed/unsigned time_t, |
587 | * this is quite hard, and we are mostly guessing - we handle 32 bit signed/unsigned time_t, |
587 | * and sizes larger than 32 bit, and maybe the unlikely floating point time_t */ |
588 | * and sizes larger than 32 bit, and maybe the unlikely floating point time_t */ |
588 | #define EV_TSTAMP_HUGE \ |
589 | #define EV_TSTAMP_HUGE \ |
… | |
… | |
603 | /* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */ |
604 | /* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */ |
604 | /* ECB.H BEGIN */ |
605 | /* ECB.H BEGIN */ |
605 | /* |
606 | /* |
606 | * libecb - http://software.schmorp.de/pkg/libecb |
607 | * libecb - http://software.schmorp.de/pkg/libecb |
607 | * |
608 | * |
608 | * Copyright (©) 2009-2015 Marc Alexander Lehmann <libecb@schmorp.de> |
609 | * Copyright (©) 2009-2015,2018-2020 Marc Alexander Lehmann <libecb@schmorp.de> |
609 | * Copyright (©) 2011 Emanuele Giaquinta |
610 | * Copyright (©) 2011 Emanuele Giaquinta |
610 | * All rights reserved. |
611 | * All rights reserved. |
611 | * |
612 | * |
612 | * Redistribution and use in source and binary forms, with or without modifica- |
613 | * Redistribution and use in source and binary forms, with or without modifica- |
613 | * tion, are permitted provided that the following conditions are met: |
614 | * tion, are permitted provided that the following conditions are met: |
… | |
… | |
644 | |
645 | |
645 | #ifndef ECB_H |
646 | #ifndef ECB_H |
646 | #define ECB_H |
647 | #define ECB_H |
647 | |
648 | |
648 | /* 16 bits major, 16 bits minor */ |
649 | /* 16 bits major, 16 bits minor */ |
649 | #define ECB_VERSION 0x00010006 |
650 | #define ECB_VERSION 0x00010008 |
650 | |
651 | |
651 | #ifdef _WIN32 |
652 | #include <string.h> /* for memcpy */ |
|
|
653 | |
|
|
654 | #if defined (_WIN32) && !defined (__MINGW32__) |
652 | typedef signed char int8_t; |
655 | typedef signed char int8_t; |
653 | typedef unsigned char uint8_t; |
656 | typedef unsigned char uint8_t; |
|
|
657 | typedef signed char int_fast8_t; |
|
|
658 | typedef unsigned char uint_fast8_t; |
654 | typedef signed short int16_t; |
659 | typedef signed short int16_t; |
655 | typedef unsigned short uint16_t; |
660 | typedef unsigned short uint16_t; |
|
|
661 | typedef signed int int_fast16_t; |
|
|
662 | typedef unsigned int uint_fast16_t; |
656 | typedef signed int int32_t; |
663 | typedef signed int int32_t; |
657 | typedef unsigned int uint32_t; |
664 | typedef unsigned int uint32_t; |
|
|
665 | typedef signed int int_fast32_t; |
|
|
666 | typedef unsigned int uint_fast32_t; |
658 | #if __GNUC__ |
667 | #if __GNUC__ |
659 | typedef signed long long int64_t; |
668 | typedef signed long long int64_t; |
660 | typedef unsigned long long uint64_t; |
669 | typedef unsigned long long uint64_t; |
661 | #else /* _MSC_VER || __BORLANDC__ */ |
670 | #else /* _MSC_VER || __BORLANDC__ */ |
662 | typedef signed __int64 int64_t; |
671 | typedef signed __int64 int64_t; |
663 | typedef unsigned __int64 uint64_t; |
672 | typedef unsigned __int64 uint64_t; |
664 | #endif |
673 | #endif |
|
|
674 | typedef int64_t int_fast64_t; |
|
|
675 | typedef uint64_t uint_fast64_t; |
665 | #ifdef _WIN64 |
676 | #ifdef _WIN64 |
666 | #define ECB_PTRSIZE 8 |
677 | #define ECB_PTRSIZE 8 |
667 | typedef uint64_t uintptr_t; |
678 | typedef uint64_t uintptr_t; |
668 | typedef int64_t intptr_t; |
679 | typedef int64_t intptr_t; |
669 | #else |
680 | #else |
… | |
… | |
680 | #endif |
691 | #endif |
681 | #endif |
692 | #endif |
682 | |
693 | |
683 | #define ECB_GCC_AMD64 (__amd64 || __amd64__ || __x86_64 || __x86_64__) |
694 | #define ECB_GCC_AMD64 (__amd64 || __amd64__ || __x86_64 || __x86_64__) |
684 | #define ECB_MSVC_AMD64 (_M_AMD64 || _M_X64) |
695 | #define ECB_MSVC_AMD64 (_M_AMD64 || _M_X64) |
|
|
696 | |
|
|
697 | #ifndef ECB_OPTIMIZE_SIZE |
|
|
698 | #if __OPTIMIZE_SIZE__ |
|
|
699 | #define ECB_OPTIMIZE_SIZE 1 |
|
|
700 | #else |
|
|
701 | #define ECB_OPTIMIZE_SIZE 0 |
|
|
702 | #endif |
|
|
703 | #endif |
685 | |
704 | |
686 | /* work around x32 idiocy by defining proper macros */ |
705 | /* work around x32 idiocy by defining proper macros */ |
687 | #if ECB_GCC_AMD64 || ECB_MSVC_AMD64 |
706 | #if ECB_GCC_AMD64 || ECB_MSVC_AMD64 |
688 | #if _ILP32 |
707 | #if _ILP32 |
689 | #define ECB_AMD64_X32 1 |
708 | #define ECB_AMD64_X32 1 |
… | |
… | |
1196 | ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); } |
1215 | ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); } |
1197 | ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); } |
1216 | ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); } |
1198 | ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); } |
1217 | ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); } |
1199 | ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); } |
1218 | ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); } |
1200 | |
1219 | |
|
|
1220 | #if ECB_CPP |
|
|
1221 | |
|
|
1222 | inline uint8_t ecb_ctz (uint8_t v) { return ecb_ctz32 (v); } |
|
|
1223 | inline uint16_t ecb_ctz (uint16_t v) { return ecb_ctz32 (v); } |
|
|
1224 | inline uint32_t ecb_ctz (uint32_t v) { return ecb_ctz32 (v); } |
|
|
1225 | inline uint64_t ecb_ctz (uint64_t v) { return ecb_ctz64 (v); } |
|
|
1226 | |
|
|
1227 | inline bool ecb_is_pot (uint8_t v) { return ecb_is_pot32 (v); } |
|
|
1228 | inline bool ecb_is_pot (uint16_t v) { return ecb_is_pot32 (v); } |
|
|
1229 | inline bool ecb_is_pot (uint32_t v) { return ecb_is_pot32 (v); } |
|
|
1230 | inline bool ecb_is_pot (uint64_t v) { return ecb_is_pot64 (v); } |
|
|
1231 | |
|
|
1232 | inline int ecb_ld (uint8_t v) { return ecb_ld32 (v); } |
|
|
1233 | inline int ecb_ld (uint16_t v) { return ecb_ld32 (v); } |
|
|
1234 | inline int ecb_ld (uint32_t v) { return ecb_ld32 (v); } |
|
|
1235 | inline int ecb_ld (uint64_t v) { return ecb_ld64 (v); } |
|
|
1236 | |
|
|
1237 | inline int ecb_popcount (uint8_t v) { return ecb_popcount32 (v); } |
|
|
1238 | inline int ecb_popcount (uint16_t v) { return ecb_popcount32 (v); } |
|
|
1239 | inline int ecb_popcount (uint32_t v) { return ecb_popcount32 (v); } |
|
|
1240 | inline int ecb_popcount (uint64_t v) { return ecb_popcount64 (v); } |
|
|
1241 | |
|
|
1242 | inline uint8_t ecb_bitrev (uint8_t v) { return ecb_bitrev8 (v); } |
|
|
1243 | inline uint16_t ecb_bitrev (uint16_t v) { return ecb_bitrev16 (v); } |
|
|
1244 | inline uint32_t ecb_bitrev (uint32_t v) { return ecb_bitrev32 (v); } |
|
|
1245 | |
|
|
1246 | inline uint8_t ecb_rotl (uint8_t v, unsigned int count) { return ecb_rotl8 (v, count); } |
|
|
1247 | inline uint16_t ecb_rotl (uint16_t v, unsigned int count) { return ecb_rotl16 (v, count); } |
|
|
1248 | inline uint32_t ecb_rotl (uint32_t v, unsigned int count) { return ecb_rotl32 (v, count); } |
|
|
1249 | inline uint64_t ecb_rotl (uint64_t v, unsigned int count) { return ecb_rotl64 (v, count); } |
|
|
1250 | |
|
|
1251 | inline uint8_t ecb_rotr (uint8_t v, unsigned int count) { return ecb_rotr8 (v, count); } |
|
|
1252 | inline uint16_t ecb_rotr (uint16_t v, unsigned int count) { return ecb_rotr16 (v, count); } |
|
|
1253 | inline uint32_t ecb_rotr (uint32_t v, unsigned int count) { return ecb_rotr32 (v, count); } |
|
|
1254 | inline uint64_t ecb_rotr (uint64_t v, unsigned int count) { return ecb_rotr64 (v, count); } |
|
|
1255 | |
|
|
1256 | #endif |
|
|
1257 | |
1201 | #if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64)) |
1258 | #if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64)) |
1202 | #if ECB_GCC_VERSION(4,8) || ECB_CLANG_BUILTIN(__builtin_bswap16) |
1259 | #if ECB_GCC_VERSION(4,8) || ECB_CLANG_BUILTIN(__builtin_bswap16) |
1203 | #define ecb_bswap16(x) __builtin_bswap16 (x) |
1260 | #define ecb_bswap16(x) __builtin_bswap16 (x) |
1204 | #else |
1261 | #else |
1205 | #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16) |
1262 | #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16) |
… | |
… | |
1276 | ecb_inline ecb_const ecb_bool ecb_big_endian (void); |
1333 | ecb_inline ecb_const ecb_bool ecb_big_endian (void); |
1277 | ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11223344; } |
1334 | ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11223344; } |
1278 | ecb_inline ecb_const ecb_bool ecb_little_endian (void); |
1335 | ecb_inline ecb_const ecb_bool ecb_little_endian (void); |
1279 | ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44332211; } |
1336 | ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44332211; } |
1280 | |
1337 | |
|
|
1338 | /*****************************************************************************/ |
|
|
1339 | /* unaligned load/store */ |
|
|
1340 | |
|
|
1341 | ecb_inline uint_fast16_t ecb_be_u16_to_host (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; } |
|
|
1342 | ecb_inline uint_fast32_t ecb_be_u32_to_host (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; } |
|
|
1343 | ecb_inline uint_fast64_t ecb_be_u64_to_host (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; } |
|
|
1344 | |
|
|
1345 | ecb_inline uint_fast16_t ecb_le_u16_to_host (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; } |
|
|
1346 | ecb_inline uint_fast32_t ecb_le_u32_to_host (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; } |
|
|
1347 | ecb_inline uint_fast64_t ecb_le_u64_to_host (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; } |
|
|
1348 | |
|
|
1349 | ecb_inline uint_fast16_t ecb_peek_u16_u (const void *ptr) { uint16_t v; memcpy (&v, ptr, sizeof (v)); return v; } |
|
|
1350 | ecb_inline uint_fast32_t ecb_peek_u32_u (const void *ptr) { uint32_t v; memcpy (&v, ptr, sizeof (v)); return v; } |
|
|
1351 | ecb_inline uint_fast64_t ecb_peek_u64_u (const void *ptr) { uint64_t v; memcpy (&v, ptr, sizeof (v)); return v; } |
|
|
1352 | |
|
|
1353 | ecb_inline uint_fast16_t ecb_peek_be_u16_u (const void *ptr) { return ecb_be_u16_to_host (ecb_peek_u16_u (ptr)); } |
|
|
1354 | ecb_inline uint_fast32_t ecb_peek_be_u32_u (const void *ptr) { return ecb_be_u32_to_host (ecb_peek_u32_u (ptr)); } |
|
|
1355 | ecb_inline uint_fast64_t ecb_peek_be_u64_u (const void *ptr) { return ecb_be_u64_to_host (ecb_peek_u64_u (ptr)); } |
|
|
1356 | |
|
|
1357 | ecb_inline uint_fast16_t ecb_peek_le_u16_u (const void *ptr) { return ecb_le_u16_to_host (ecb_peek_u16_u (ptr)); } |
|
|
1358 | ecb_inline uint_fast32_t ecb_peek_le_u32_u (const void *ptr) { return ecb_le_u32_to_host (ecb_peek_u32_u (ptr)); } |
|
|
1359 | ecb_inline uint_fast64_t ecb_peek_le_u64_u (const void *ptr) { return ecb_le_u64_to_host (ecb_peek_u64_u (ptr)); } |
|
|
1360 | |
|
|
1361 | ecb_inline uint_fast16_t ecb_host_to_be_u16 (uint_fast16_t v) { return ecb_little_endian () ? ecb_bswap16 (v) : v; } |
|
|
1362 | ecb_inline uint_fast32_t ecb_host_to_be_u32 (uint_fast32_t v) { return ecb_little_endian () ? ecb_bswap32 (v) : v; } |
|
|
1363 | ecb_inline uint_fast64_t ecb_host_to_be_u64 (uint_fast64_t v) { return ecb_little_endian () ? ecb_bswap64 (v) : v; } |
|
|
1364 | |
|
|
1365 | ecb_inline uint_fast16_t ecb_host_to_le_u16 (uint_fast16_t v) { return ecb_big_endian () ? ecb_bswap16 (v) : v; } |
|
|
1366 | ecb_inline uint_fast32_t ecb_host_to_le_u32 (uint_fast32_t v) { return ecb_big_endian () ? ecb_bswap32 (v) : v; } |
|
|
1367 | ecb_inline uint_fast64_t ecb_host_to_le_u64 (uint_fast64_t v) { return ecb_big_endian () ? ecb_bswap64 (v) : v; } |
|
|
1368 | |
|
|
1369 | ecb_inline void ecb_poke_u16_u (void *ptr, uint16_t v) { memcpy (ptr, &v, sizeof (v)); } |
|
|
1370 | ecb_inline void ecb_poke_u32_u (void *ptr, uint32_t v) { memcpy (ptr, &v, sizeof (v)); } |
|
|
1371 | ecb_inline void ecb_poke_u64_u (void *ptr, uint64_t v) { memcpy (ptr, &v, sizeof (v)); } |
|
|
1372 | |
|
|
1373 | ecb_inline void ecb_poke_be_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_be_u16 (v)); } |
|
|
1374 | ecb_inline void ecb_poke_be_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_be_u32 (v)); } |
|
|
1375 | ecb_inline void ecb_poke_be_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_be_u64 (v)); } |
|
|
1376 | |
|
|
1377 | ecb_inline void ecb_poke_le_u16_u (void *ptr, uint_fast16_t v) { ecb_poke_u16_u (ptr, ecb_host_to_le_u16 (v)); } |
|
|
1378 | ecb_inline void ecb_poke_le_u32_u (void *ptr, uint_fast32_t v) { ecb_poke_u32_u (ptr, ecb_host_to_le_u32 (v)); } |
|
|
1379 | ecb_inline void ecb_poke_le_u64_u (void *ptr, uint_fast64_t v) { ecb_poke_u64_u (ptr, ecb_host_to_le_u64 (v)); } |
|
|
1380 | |
|
|
1381 | #if ECB_CPP |
|
|
1382 | |
|
|
1383 | inline uint8_t ecb_bswap (uint8_t v) { return v; } |
|
|
1384 | inline uint16_t ecb_bswap (uint16_t v) { return ecb_bswap16 (v); } |
|
|
1385 | inline uint32_t ecb_bswap (uint32_t v) { return ecb_bswap32 (v); } |
|
|
1386 | inline uint64_t ecb_bswap (uint64_t v) { return ecb_bswap64 (v); } |
|
|
1387 | |
|
|
1388 | template<typename T> inline T ecb_be_to_host (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; } |
|
|
1389 | template<typename T> inline T ecb_le_to_host (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; } |
|
|
1390 | template<typename T> inline T ecb_peek (const void *ptr) { return *(const T *)ptr; } |
|
|
1391 | template<typename T> inline T ecb_peek_be (const void *ptr) { return ecb_be_to_host (ecb_peek <T> (ptr)); } |
|
|
1392 | template<typename T> inline T ecb_peek_le (const void *ptr) { return ecb_le_to_host (ecb_peek <T> (ptr)); } |
|
|
1393 | template<typename T> inline T ecb_peek_u (const void *ptr) { T v; memcpy (&v, ptr, sizeof (v)); return v; } |
|
|
1394 | template<typename T> inline T ecb_peek_be_u (const void *ptr) { return ecb_be_to_host (ecb_peek_u<T> (ptr)); } |
|
|
1395 | template<typename T> inline T ecb_peek_le_u (const void *ptr) { return ecb_le_to_host (ecb_peek_u<T> (ptr)); } |
|
|
1396 | |
|
|
1397 | template<typename T> inline T ecb_host_to_be (T v) { return ecb_little_endian () ? ecb_bswap (v) : v; } |
|
|
1398 | template<typename T> inline T ecb_host_to_le (T v) { return ecb_big_endian () ? ecb_bswap (v) : v; } |
|
|
1399 | template<typename T> inline void ecb_poke (void *ptr, T v) { *(T *)ptr = v; } |
|
|
1400 | template<typename T> inline void ecb_poke_be (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_be (v)); } |
|
|
1401 | template<typename T> inline void ecb_poke_le (void *ptr, T v) { return ecb_poke <T> (ptr, ecb_host_to_le (v)); } |
|
|
1402 | template<typename T> inline void ecb_poke_u (void *ptr, T v) { memcpy (ptr, &v, sizeof (v)); } |
|
|
1403 | template<typename T> inline void ecb_poke_be_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_be (v)); } |
|
|
1404 | template<typename T> inline void ecb_poke_le_u (void *ptr, T v) { return ecb_poke_u<T> (ptr, ecb_host_to_le (v)); } |
|
|
1405 | |
|
|
1406 | #endif |
|
|
1407 | |
|
|
1408 | /*****************************************************************************/ |
|
|
1409 | |
1281 | #if ECB_GCC_VERSION(3,0) || ECB_C99 |
1410 | #if ECB_GCC_VERSION(3,0) || ECB_C99 |
1282 | #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0)) |
1411 | #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0)) |
1283 | #else |
1412 | #else |
1284 | #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n))) |
1413 | #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n))) |
1285 | #endif |
1414 | #endif |
… | |
… | |
1308 | return N; |
1437 | return N; |
1309 | } |
1438 | } |
1310 | #else |
1439 | #else |
1311 | #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) |
1440 | #define ecb_array_length(name) (sizeof (name) / sizeof (name [0])) |
1312 | #endif |
1441 | #endif |
|
|
1442 | |
|
|
1443 | /*****************************************************************************/ |
1313 | |
1444 | |
1314 | ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x); |
1445 | ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x); |
1315 | ecb_function_ ecb_const uint32_t |
1446 | ecb_function_ ecb_const uint32_t |
1316 | ecb_binary16_to_binary32 (uint32_t x) |
1447 | ecb_binary16_to_binary32 (uint32_t x) |
1317 | { |
1448 | { |
… | |
… | |
1426 | || defined __sh__ \ |
1557 | || defined __sh__ \ |
1427 | || defined _M_IX86 || defined ECB_MSVC_AMD64 || defined _M_IA64 \ |
1558 | || defined _M_IX86 || defined ECB_MSVC_AMD64 || defined _M_IA64 \ |
1428 | || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \ |
1559 | || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \ |
1429 | || defined __aarch64__ |
1560 | || defined __aarch64__ |
1430 | #define ECB_STDFP 1 |
1561 | #define ECB_STDFP 1 |
1431 | #include <string.h> /* for memcpy */ |
|
|
1432 | #else |
1562 | #else |
1433 | #define ECB_STDFP 0 |
1563 | #define ECB_STDFP 0 |
1434 | #endif |
1564 | #endif |
1435 | |
1565 | |
1436 | #ifndef ECB_NO_LIBM |
1566 | #ifndef ECB_NO_LIBM |
… | |
… | |
1658 | * our syscalls return < 0, not == -1, on error. which is good |
1788 | * our syscalls return < 0, not == -1, on error. which is good |
1659 | * enough for linux aio. |
1789 | * enough for linux aio. |
1660 | * TODO: arm is also common nowadays, maybe even mips and x86 |
1790 | * TODO: arm is also common nowadays, maybe even mips and x86 |
1661 | * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove... |
1791 | * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove... |
1662 | */ |
1792 | */ |
1663 | #if __GNUC__ && __linux && ECB_AMD64 && !defined __OPTIMIZE_SIZE__ |
1793 | #if __GNUC__ && __linux && ECB_AMD64 && !EV_FEATURE_CODE |
1664 | /* the costly errno access probably kills this for size optimisation */ |
1794 | /* the costly errno access probably kills this for size optimisation */ |
1665 | |
1795 | |
1666 | #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5,arg6) \ |
1796 | #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5,arg6) \ |
1667 | ({ \ |
1797 | ({ \ |
1668 | long res; \ |
1798 | long res; \ |
… | |
… | |
2262 | inline_size void |
2392 | inline_size void |
2263 | fd_reify (EV_P) |
2393 | fd_reify (EV_P) |
2264 | { |
2394 | { |
2265 | int i; |
2395 | int i; |
2266 | |
2396 | |
|
|
2397 | /* most backends do not modify the fdchanges list in backend_modfiy. |
|
|
2398 | * except io_uring, which has fixed-size buffers which might force us |
|
|
2399 | * to handle events in backend_modify, causing fdchanges to be amended, |
|
|
2400 | * which could result in an endless loop. |
|
|
2401 | * to avoid this, we do not dynamically handle fds that were added |
|
|
2402 | * during fd_reify. that means that for those backends, fdchangecnt |
|
|
2403 | * might be non-zero during poll, which must cause them to not block. |
|
|
2404 | * to not put too much of a burden on other backends, this detail |
|
|
2405 | * needs to be handled in the backend. |
|
|
2406 | */ |
|
|
2407 | int changecnt = fdchangecnt; |
|
|
2408 | |
2267 | #if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP |
2409 | #if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP |
2268 | for (i = 0; i < fdchangecnt; ++i) |
2410 | for (i = 0; i < changecnt; ++i) |
2269 | { |
2411 | { |
2270 | int fd = fdchanges [i]; |
2412 | int fd = fdchanges [i]; |
2271 | ANFD *anfd = anfds + fd; |
2413 | ANFD *anfd = anfds + fd; |
2272 | |
2414 | |
2273 | if (anfd->reify & EV__IOFDSET && anfd->head) |
2415 | if (anfd->reify & EV__IOFDSET && anfd->head) |
… | |
… | |
2287 | } |
2429 | } |
2288 | } |
2430 | } |
2289 | } |
2431 | } |
2290 | #endif |
2432 | #endif |
2291 | |
2433 | |
2292 | for (i = 0; i < fdchangecnt; ++i) |
2434 | for (i = 0; i < changecnt; ++i) |
2293 | { |
2435 | { |
2294 | int fd = fdchanges [i]; |
2436 | int fd = fdchanges [i]; |
2295 | ANFD *anfd = anfds + fd; |
2437 | ANFD *anfd = anfds + fd; |
2296 | ev_io *w; |
2438 | ev_io *w; |
2297 | |
2439 | |
… | |
… | |
2313 | |
2455 | |
2314 | if (o_reify & EV__IOFDSET) |
2456 | if (o_reify & EV__IOFDSET) |
2315 | backend_modify (EV_A_ fd, o_events, anfd->events); |
2457 | backend_modify (EV_A_ fd, o_events, anfd->events); |
2316 | } |
2458 | } |
2317 | |
2459 | |
|
|
2460 | /* normally, fdchangecnt hasn't changed. if it has, then new fds have been added. |
|
|
2461 | * this is a rare case (see beginning comment in this function), so we copy them to the |
|
|
2462 | * front and hope the backend handles this case. |
|
|
2463 | */ |
|
|
2464 | if (ecb_expect_false (fdchangecnt != changecnt)) |
|
|
2465 | memmove (fdchanges, fdchanges + changecnt, (fdchangecnt - changecnt) * sizeof (*fdchanges)); |
|
|
2466 | |
2318 | fdchangecnt = 0; |
2467 | fdchangecnt -= changecnt; |
2319 | } |
2468 | } |
2320 | |
2469 | |
2321 | /* something about the given fd changed */ |
2470 | /* something about the given fd changed */ |
2322 | inline_size |
2471 | inline_size |
2323 | void |
2472 | void |
2324 | fd_change (EV_P_ int fd, int flags) |
2473 | fd_change (EV_P_ int fd, int flags) |
2325 | { |
2474 | { |
2326 | unsigned char reify = anfds [fd].reify; |
2475 | unsigned char reify = anfds [fd].reify; |
2327 | anfds [fd].reify |= flags; |
2476 | anfds [fd].reify = reify | flags; |
2328 | |
2477 | |
2329 | if (ecb_expect_true (!reify)) |
2478 | if (ecb_expect_true (!reify)) |
2330 | { |
2479 | { |
2331 | ++fdchangecnt; |
2480 | ++fdchangecnt; |
2332 | array_needsize (int, fdchanges, fdchangemax, fdchangecnt, array_needsize_noinit); |
2481 | array_needsize (int, fdchanges, fdchangemax, fdchangecnt, array_needsize_noinit); |
… | |
… | |
2561 | upheap (heap, i + HEAP0); |
2710 | upheap (heap, i + HEAP0); |
2562 | } |
2711 | } |
2563 | |
2712 | |
2564 | /*****************************************************************************/ |
2713 | /*****************************************************************************/ |
2565 | |
2714 | |
2566 | /* associate signal watchers to a signal signal */ |
2715 | /* associate signal watchers to a signal */ |
2567 | typedef struct |
2716 | typedef struct |
2568 | { |
2717 | { |
2569 | EV_ATOMIC_T pending; |
2718 | EV_ATOMIC_T pending; |
2570 | #if EV_MULTIPLICITY |
2719 | #if EV_MULTIPLICITY |
2571 | EV_P; |
2720 | EV_P; |
… | |
… | |
2884 | static void |
3033 | static void |
2885 | timerfdcb (EV_P_ ev_io *iow, int revents) |
3034 | timerfdcb (EV_P_ ev_io *iow, int revents) |
2886 | { |
3035 | { |
2887 | struct itimerspec its = { 0 }; |
3036 | struct itimerspec its = { 0 }; |
2888 | |
3037 | |
2889 | /* since we can't easily come zup with a (portable) maximum value of time_t, |
3038 | its.it_value.tv_sec = ev_rt_now + (int)MAX_BLOCKTIME2; |
2890 | * we wake up once per month, which hopefully is rare enough to not |
|
|
2891 | * be a problem. */ |
|
|
2892 | its.it_value.tv_sec = ev_rt_now + 86400 * 30; |
|
|
2893 | timerfd_settime (timerfd, TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET, &its, 0); |
3039 | timerfd_settime (timerfd, TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET, &its, 0); |
2894 | |
3040 | |
2895 | ev_rt_now = ev_time (); |
3041 | ev_rt_now = ev_time (); |
2896 | /* periodics_reschedule only needs ev_rt_now */ |
3042 | /* periodics_reschedule only needs ev_rt_now */ |
2897 | /* but maybe in the future we want the full treatment. */ |
3043 | /* but maybe in the future we want the full treatment. */ |
2898 | /* |
3044 | /* |
2899 | now_floor = EV_TS_CONST (0.); |
3045 | now_floor = EV_TS_CONST (0.); |
2900 | time_update (EV_A_ EV_TSTAMP_HUGE); |
3046 | time_update (EV_A_ EV_TSTAMP_HUGE); |
2901 | */ |
3047 | */ |
|
|
3048 | #if EV_PERIODIC_ENABLE |
2902 | periodics_reschedule (EV_A); |
3049 | periodics_reschedule (EV_A); |
|
|
3050 | #endif |
2903 | } |
3051 | } |
2904 | |
3052 | |
2905 | ecb_noinline ecb_cold |
3053 | ecb_noinline ecb_cold |
2906 | static void |
3054 | static void |
2907 | evtimerfd_init (EV_P) |
3055 | evtimerfd_init (EV_P) |
… | |
… | |
2913 | if (timerfd >= 0) |
3061 | if (timerfd >= 0) |
2914 | { |
3062 | { |
2915 | fd_intern (timerfd); /* just to be sure */ |
3063 | fd_intern (timerfd); /* just to be sure */ |
2916 | |
3064 | |
2917 | ev_io_init (&timerfd_w, timerfdcb, timerfd, EV_READ); |
3065 | ev_io_init (&timerfd_w, timerfdcb, timerfd, EV_READ); |
2918 | ev_set_priority (&sigfd_w, EV_MINPRI); |
3066 | ev_set_priority (&timerfd_w, EV_MINPRI); |
2919 | ev_io_start (EV_A_ &timerfd_w); |
3067 | ev_io_start (EV_A_ &timerfd_w); |
2920 | ev_unref (EV_A); /* watcher should not keep loop alive */ |
3068 | ev_unref (EV_A); /* watcher should not keep loop alive */ |
2921 | |
3069 | |
2922 | /* (re-) arm timer */ |
3070 | /* (re-) arm timer */ |
2923 | timerfdcb (EV_A_ 0, 0); |
3071 | timerfdcb (EV_A_ 0, 0); |
… | |
… | |
2982 | unsigned int |
3130 | unsigned int |
2983 | ev_supported_backends (void) EV_NOEXCEPT |
3131 | ev_supported_backends (void) EV_NOEXCEPT |
2984 | { |
3132 | { |
2985 | unsigned int flags = 0; |
3133 | unsigned int flags = 0; |
2986 | |
3134 | |
2987 | if (EV_USE_PORT ) flags |= EVBACKEND_PORT; |
3135 | if (EV_USE_PORT ) flags |= EVBACKEND_PORT; |
2988 | if (EV_USE_KQUEUE ) flags |= EVBACKEND_KQUEUE; |
3136 | if (EV_USE_KQUEUE ) flags |= EVBACKEND_KQUEUE; |
2989 | if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL; |
3137 | if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL; |
2990 | if (EV_USE_LINUXAIO) flags |= EVBACKEND_LINUXAIO; |
3138 | if (EV_USE_LINUXAIO ) flags |= EVBACKEND_LINUXAIO; |
2991 | if (EV_USE_IOURING ) flags |= EVBACKEND_IOURING; |
3139 | if (EV_USE_IOURING && ev_linux_version () >= 0x050601) flags |= EVBACKEND_IOURING; /* 5.6.1+ */ |
2992 | if (EV_USE_POLL ) flags |= EVBACKEND_POLL; |
3140 | if (EV_USE_POLL ) flags |= EVBACKEND_POLL; |
2993 | if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT; |
3141 | if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT; |
2994 | |
3142 | |
2995 | return flags; |
3143 | return flags; |
2996 | } |
3144 | } |
2997 | |
3145 | |
2998 | ecb_cold |
3146 | ecb_cold |
2999 | unsigned int |
3147 | unsigned int |
… | |
… | |
3017 | |
3165 | |
3018 | /* TODO: linuxaio is very experimental */ |
3166 | /* TODO: linuxaio is very experimental */ |
3019 | #if !EV_RECOMMEND_LINUXAIO |
3167 | #if !EV_RECOMMEND_LINUXAIO |
3020 | flags &= ~EVBACKEND_LINUXAIO; |
3168 | flags &= ~EVBACKEND_LINUXAIO; |
3021 | #endif |
3169 | #endif |
3022 | /* TODO: linuxaio is super experimental */ |
3170 | /* TODO: iouring is super experimental */ |
3023 | #if !EV_RECOMMEND_IOURING |
3171 | #if !EV_RECOMMEND_IOURING |
3024 | flags &= ~EVBACKEND_IOURING; |
3172 | flags &= ~EVBACKEND_IOURING; |
3025 | #endif |
3173 | #endif |
3026 | |
3174 | |
3027 | return flags; |
3175 | return flags; |
… | |
… | |
3029 | |
3177 | |
3030 | ecb_cold |
3178 | ecb_cold |
3031 | unsigned int |
3179 | unsigned int |
3032 | ev_embeddable_backends (void) EV_NOEXCEPT |
3180 | ev_embeddable_backends (void) EV_NOEXCEPT |
3033 | { |
3181 | { |
3034 | int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT; |
3182 | int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT | EVBACKEND_IOURING; |
3035 | |
3183 | |
3036 | /* epoll embeddability broken on all linux versions up to at least 2.6.23 */ |
3184 | /* epoll embeddability broken on all linux versions up to at least 2.6.23 */ |
3037 | if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */ |
3185 | if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */ |
3038 | flags &= ~EVBACKEND_EPOLL; |
3186 | flags &= ~EVBACKEND_EPOLL; |
3039 | |
3187 | |
3040 | /* EVBACKEND_LINUXAIO is theoretically embeddable, but suffers from a performance overhead */ |
3188 | /* EVBACKEND_LINUXAIO is theoretically embeddable, but suffers from a performance overhead */ |
3041 | |
|
|
3042 | /* EVBACKEND_IOURING is practically embeddable, but the current implementation is not |
|
|
3043 | * because our backend_fd is the epoll fd we need as fallback. |
|
|
3044 | * if the kernel ever is fixed, this might change... |
|
|
3045 | */ |
|
|
3046 | |
3189 | |
3047 | return flags; |
3190 | return flags; |
3048 | } |
3191 | } |
3049 | |
3192 | |
3050 | unsigned int |
3193 | unsigned int |
… | |
… | |
3949 | |
4092 | |
3950 | if (ecb_expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped))) |
4093 | if (ecb_expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped))) |
3951 | { |
4094 | { |
3952 | waittime = EV_TS_CONST (MAX_BLOCKTIME); |
4095 | waittime = EV_TS_CONST (MAX_BLOCKTIME); |
3953 | |
4096 | |
|
|
4097 | #if EV_USE_TIMERFD |
|
|
4098 | /* sleep a lot longer when we can reliably detect timejumps */ |
|
|
4099 | if (ecb_expect_true (timerfd >= 0)) |
|
|
4100 | waittime = EV_TS_CONST (MAX_BLOCKTIME2); |
|
|
4101 | #endif |
|
|
4102 | #if !EV_PERIODIC_ENABLE |
|
|
4103 | /* without periodics but with monotonic clock there is no need */ |
|
|
4104 | /* for any time jump detection, so sleep longer */ |
|
|
4105 | if (ecb_expect_true (have_monotonic)) |
|
|
4106 | waittime = EV_TS_CONST (MAX_BLOCKTIME2); |
|
|
4107 | #endif |
|
|
4108 | |
3954 | if (timercnt) |
4109 | if (timercnt) |
3955 | { |
4110 | { |
3956 | ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now; |
4111 | ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now; |
3957 | if (waittime > to) waittime = to; |
4112 | if (waittime > to) waittime = to; |
3958 | } |
4113 | } |
… | |
… | |
5076 | ev_run (EV_A_ EVRUN_NOWAIT); |
5231 | ev_run (EV_A_ EVRUN_NOWAIT); |
5077 | } |
5232 | } |
5078 | } |
5233 | } |
5079 | } |
5234 | } |
5080 | |
5235 | |
|
|
5236 | #if EV_FORK_ENABLE |
5081 | static void |
5237 | static void |
5082 | embed_fork_cb (EV_P_ ev_fork *fork_w, int revents) |
5238 | embed_fork_cb (EV_P_ ev_fork *fork_w, int revents) |
5083 | { |
5239 | { |
5084 | ev_embed *w = (ev_embed *)(((char *)fork_w) - offsetof (ev_embed, fork)); |
5240 | ev_embed *w = (ev_embed *)(((char *)fork_w) - offsetof (ev_embed, fork)); |
5085 | |
5241 | |
… | |
… | |
5092 | ev_run (EV_A_ EVRUN_NOWAIT); |
5248 | ev_run (EV_A_ EVRUN_NOWAIT); |
5093 | } |
5249 | } |
5094 | |
5250 | |
5095 | ev_embed_start (EV_A_ w); |
5251 | ev_embed_start (EV_A_ w); |
5096 | } |
5252 | } |
|
|
5253 | #endif |
5097 | |
5254 | |
5098 | #if 0 |
5255 | #if 0 |
5099 | static void |
5256 | static void |
5100 | embed_idle_cb (EV_P_ ev_idle *idle, int revents) |
5257 | embed_idle_cb (EV_P_ ev_idle *idle, int revents) |
5101 | { |
5258 | { |
… | |
… | |
5122 | |
5279 | |
5123 | ev_prepare_init (&w->prepare, embed_prepare_cb); |
5280 | ev_prepare_init (&w->prepare, embed_prepare_cb); |
5124 | ev_set_priority (&w->prepare, EV_MINPRI); |
5281 | ev_set_priority (&w->prepare, EV_MINPRI); |
5125 | ev_prepare_start (EV_A_ &w->prepare); |
5282 | ev_prepare_start (EV_A_ &w->prepare); |
5126 | |
5283 | |
|
|
5284 | #if EV_FORK_ENABLE |
5127 | ev_fork_init (&w->fork, embed_fork_cb); |
5285 | ev_fork_init (&w->fork, embed_fork_cb); |
5128 | ev_fork_start (EV_A_ &w->fork); |
5286 | ev_fork_start (EV_A_ &w->fork); |
|
|
5287 | #endif |
5129 | |
5288 | |
5130 | /*ev_idle_init (&w->idle, e,bed_idle_cb);*/ |
5289 | /*ev_idle_init (&w->idle, e,bed_idle_cb);*/ |
5131 | |
5290 | |
5132 | ev_start (EV_A_ (W)w, 1); |
5291 | ev_start (EV_A_ (W)w, 1); |
5133 | |
5292 | |
… | |
… | |
5143 | |
5302 | |
5144 | EV_FREQUENT_CHECK; |
5303 | EV_FREQUENT_CHECK; |
5145 | |
5304 | |
5146 | ev_io_stop (EV_A_ &w->io); |
5305 | ev_io_stop (EV_A_ &w->io); |
5147 | ev_prepare_stop (EV_A_ &w->prepare); |
5306 | ev_prepare_stop (EV_A_ &w->prepare); |
|
|
5307 | #if EV_FORK_ENABLE |
5148 | ev_fork_stop (EV_A_ &w->fork); |
5308 | ev_fork_stop (EV_A_ &w->fork); |
|
|
5309 | #endif |
5149 | |
5310 | |
5150 | ev_stop (EV_A_ (W)w); |
5311 | ev_stop (EV_A_ (W)w); |
5151 | |
5312 | |
5152 | EV_FREQUENT_CHECK; |
5313 | EV_FREQUENT_CHECK; |
5153 | } |
5314 | } |