… | |
… | |
464 | #define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */ |
464 | #define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */ |
465 | |
465 | |
466 | #define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0) |
466 | #define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0) |
467 | #define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0) |
467 | #define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0) |
468 | |
468 | |
469 | #if __GNUC__ >= 4 |
469 | /* the following are taken from libecb */ |
470 | # define expect(expr,value) __builtin_expect ((expr),(value)) |
470 | /* ecb.h start */ |
471 | # define noinline __attribute__ ((noinline)) |
471 | |
|
|
472 | /* many compilers define _GNUC_ to some versions but then only implement |
|
|
473 | * what their idiot authors think are the "more important" extensions, |
|
|
474 | * causing enourmous grief in return for some better fake benchmark numbers. |
|
|
475 | * or so. |
|
|
476 | * we try to detect these and simply assume they are not gcc - if they have |
|
|
477 | * an issue with that they should have done it right in the first place. |
|
|
478 | */ |
|
|
479 | #ifndef ECB_GCC_VERSION |
|
|
480 | #if !defined(__GNUC_MINOR__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_C) || defined(__SUNPRO_CC) || defined(__llvm__) || defined(__clang__) |
|
|
481 | #define ECB_GCC_VERSION(major,minor) 0 |
|
|
482 | #else |
|
|
483 | #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) |
|
|
484 | #endif |
|
|
485 | #endif |
|
|
486 | |
|
|
487 | #if __cplusplus |
|
|
488 | #define ecb_inline static inline |
|
|
489 | #elif ECB_GCC_VERSION(2,5) |
|
|
490 | #define ecb_inline static __inline__ |
|
|
491 | #elif ECB_C99 |
|
|
492 | #define ecb_inline static inline |
472 | #else |
493 | #else |
473 | # define expect(expr,value) (expr) |
494 | #define ecb_inline static |
474 | # define noinline |
|
|
475 | # if __STDC_VERSION__ < 199901L && __GNUC__ < 2 |
|
|
476 | # define inline |
|
|
477 | # endif |
495 | #endif |
|
|
496 | |
|
|
497 | #ifndef ECB_MEMORY_FENCE |
|
|
498 | #if ECB_GCC_VERSION(2,5) |
|
|
499 | #if __x86 |
|
|
500 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") |
|
|
501 | #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE |
|
|
502 | #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE /* better be safe than sorry */ |
|
|
503 | #elif __amd64 |
|
|
504 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") |
|
|
505 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("lfence" : : : "memory") |
|
|
506 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("sfence") |
|
|
507 | #endif |
478 | #endif |
508 | #endif |
|
|
509 | #endif |
479 | |
510 | |
|
|
511 | #ifndef ECB_MEMORY_FENCE |
|
|
512 | #if ECB_GCC_VERSION(4,4) |
|
|
513 | #define ECB_MEMORY_FENCE __sync_synchronize () |
|
|
514 | #define ECB_MEMORY_FENCE_ACQUIRE ({ char dummy = 0; __sync_lock_test_and_set (&dummy, 1); }) |
|
|
515 | #define ECB_MEMORY_FENCE_RELEASE ({ char dummy = 1; __sync_lock_release (&dummy ); }) |
|
|
516 | #elif _MSC_VER >= 1400 && 0 /* TODO: only true when using volatiles */ |
|
|
517 | #define ECB_MEMORY_FENCE do { } while (0) |
|
|
518 | #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE |
|
|
519 | #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE |
|
|
520 | #elif defined(_WIN32) |
|
|
521 | #include <WinNT.h> |
|
|
522 | #define ECB_MEMORY_FENCE MemoryBarrier () |
|
|
523 | #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE |
|
|
524 | #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE |
|
|
525 | #endif |
|
|
526 | #endif |
|
|
527 | |
|
|
528 | #ifndef ECB_MEMORY_FENCE |
|
|
529 | #include <pthread.h> |
|
|
530 | |
|
|
531 | static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER; |
|
|
532 | #define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0) |
|
|
533 | #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE |
|
|
534 | #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE |
|
|
535 | #endif |
|
|
536 | |
|
|
537 | #if ECB_GCC_VERSION(3,1) |
|
|
538 | #define ecb_attribute(attrlist) __attribute__(attrlist) |
|
|
539 | #define ecb_is_constant(expr) __builtin_constant_p (expr) |
|
|
540 | #define ecb_expect(expr,value) __builtin_expect ((expr),(value)) |
|
|
541 | #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality) |
|
|
542 | #else |
|
|
543 | #define ecb_attribute(attrlist) |
|
|
544 | #define ecb_is_constant(expr) 0 |
|
|
545 | #define ecb_expect(expr,value) (expr) |
|
|
546 | #define ecb_prefetch(addr,rw,locality) |
|
|
547 | #endif |
|
|
548 | |
|
|
549 | #define ecb_noinline ecb_attribute ((__noinline__)) |
|
|
550 | #define ecb_noreturn ecb_attribute ((__noreturn__)) |
|
|
551 | #define ecb_unused ecb_attribute ((__unused__)) |
|
|
552 | #define ecb_const ecb_attribute ((__const__)) |
|
|
553 | #define ecb_pure ecb_attribute ((__pure__)) |
|
|
554 | |
|
|
555 | #if ECB_GCC_VERSION(4,3) |
|
|
556 | #define ecb_artificial ecb_attribute ((__artificial__)) |
|
|
557 | #define ecb_hot ecb_attribute ((__hot__)) |
|
|
558 | #define ecb_cold ecb_attribute ((__cold__)) |
|
|
559 | #else |
|
|
560 | #define ecb_artificial |
|
|
561 | #define ecb_hot |
|
|
562 | #define ecb_cold |
|
|
563 | #endif |
|
|
564 | |
|
|
565 | /* put around conditional expressions if you are very sure that the */ |
|
|
566 | /* expression is mostly true or mostly false. note that these return */ |
|
|
567 | /* booleans, not the expression. */ |
480 | #define expect_false(expr) expect ((expr) != 0, 0) |
568 | #define ecb_expect_false(expr) ecb_expect (!!(expr), 0) |
481 | #define expect_true(expr) expect ((expr) != 0, 1) |
569 | #define ecb_expect_true(expr) ecb_expect (!!(expr), 1) |
|
|
570 | /* ecb.h end */ |
|
|
571 | |
|
|
572 | #define expect_false(cond) ecb_expect_false (cond) |
|
|
573 | #define expect_true(cond) ecb_expect_true (cond) |
|
|
574 | #define noinline ecb_noinline |
|
|
575 | |
482 | #define inline_size static inline |
576 | #define inline_size ecb_inline |
483 | |
577 | |
484 | #if EV_FEATURE_CODE |
578 | #if EV_FEATURE_CODE |
485 | # define inline_speed static inline |
579 | # define inline_speed ecb_inline |
486 | #else |
580 | #else |
487 | # define inline_speed static noinline |
581 | # define inline_speed static noinline |
488 | #endif |
582 | #endif |
489 | |
583 | |
490 | #define NUMPRI (EV_MAXPRI - EV_MINPRI + 1) |
584 | #define NUMPRI (EV_MAXPRI - EV_MINPRI + 1) |
… | |
… | |
581 | |
675 | |
582 | #ifdef __linux |
676 | #ifdef __linux |
583 | # include <sys/utsname.h> |
677 | # include <sys/utsname.h> |
584 | #endif |
678 | #endif |
585 | |
679 | |
586 | static unsigned int noinline |
680 | static unsigned int noinline ecb_cold |
587 | ev_linux_version (void) |
681 | ev_linux_version (void) |
588 | { |
682 | { |
589 | #ifdef __linux |
683 | #ifdef __linux |
590 | unsigned int v = 0; |
684 | unsigned int v = 0; |
591 | struct utsname buf; |
685 | struct utsname buf; |
… | |
… | |
620 | } |
714 | } |
621 | |
715 | |
622 | /*****************************************************************************/ |
716 | /*****************************************************************************/ |
623 | |
717 | |
624 | #if EV_AVOID_STDIO |
718 | #if EV_AVOID_STDIO |
625 | static void noinline |
719 | static void noinline ecb_cold |
626 | ev_printerr (const char *msg) |
720 | ev_printerr (const char *msg) |
627 | { |
721 | { |
628 | write (STDERR_FILENO, msg, strlen (msg)); |
722 | write (STDERR_FILENO, msg, strlen (msg)); |
629 | } |
723 | } |
630 | #endif |
724 | #endif |
631 | |
725 | |
632 | static void (*syserr_cb)(const char *msg); |
726 | static void (*syserr_cb)(const char *msg); |
633 | |
727 | |
634 | void |
728 | void ecb_cold |
635 | ev_set_syserr_cb (void (*cb)(const char *msg)) |
729 | ev_set_syserr_cb (void (*cb)(const char *msg)) |
636 | { |
730 | { |
637 | syserr_cb = cb; |
731 | syserr_cb = cb; |
638 | } |
732 | } |
639 | |
733 | |
640 | static void noinline |
734 | static void noinline ecb_cold |
641 | ev_syserr (const char *msg) |
735 | ev_syserr (const char *msg) |
642 | { |
736 | { |
643 | if (!msg) |
737 | if (!msg) |
644 | msg = "(libev) system error"; |
738 | msg = "(libev) system error"; |
645 | |
739 | |
… | |
… | |
678 | #endif |
772 | #endif |
679 | } |
773 | } |
680 | |
774 | |
681 | static void *(*alloc)(void *ptr, long size) = ev_realloc_emul; |
775 | static void *(*alloc)(void *ptr, long size) = ev_realloc_emul; |
682 | |
776 | |
683 | void |
777 | void ecb_cold |
684 | ev_set_allocator (void *(*cb)(void *ptr, long size)) |
778 | ev_set_allocator (void *(*cb)(void *ptr, long size)) |
685 | { |
779 | { |
686 | alloc = cb; |
780 | alloc = cb; |
687 | } |
781 | } |
688 | |
782 | |
… | |
… | |
870 | select (0, 0, 0, 0, &tv); |
964 | select (0, 0, 0, 0, &tv); |
871 | #endif |
965 | #endif |
872 | } |
966 | } |
873 | } |
967 | } |
874 | |
968 | |
875 | inline_speed int |
|
|
876 | ev_timeout_to_ms (ev_tstamp timeout) |
|
|
877 | { |
|
|
878 | int ms = timeout * 1000. + .999999; |
|
|
879 | |
|
|
880 | return expect_true (ms) ? ms : timeout < 1e-6 ? 0 : 1; |
|
|
881 | } |
|
|
882 | |
|
|
883 | /*****************************************************************************/ |
969 | /*****************************************************************************/ |
884 | |
970 | |
885 | #define MALLOC_ROUND 4096 /* prefer to allocate in chunks of this size, must be 2**n and >> 4 longs */ |
971 | #define MALLOC_ROUND 4096 /* prefer to allocate in chunks of this size, must be 2**n and >> 4 longs */ |
886 | |
972 | |
887 | /* find a suitable new size for the given array, */ |
973 | /* find a suitable new size for the given array, */ |
… | |
… | |
905 | } |
991 | } |
906 | |
992 | |
907 | return ncur; |
993 | return ncur; |
908 | } |
994 | } |
909 | |
995 | |
910 | static noinline void * |
996 | static void * noinline ecb_cold |
911 | array_realloc (int elem, void *base, int *cur, int cnt) |
997 | array_realloc (int elem, void *base, int *cur, int cnt) |
912 | { |
998 | { |
913 | *cur = array_nextsize (elem, *cur, cnt); |
999 | *cur = array_nextsize (elem, *cur, cnt); |
914 | return ev_realloc (base, elem * *cur); |
1000 | return ev_realloc (base, elem * *cur); |
915 | } |
1001 | } |
… | |
… | |
918 | memset ((void *)(base), 0, sizeof (*(base)) * (count)) |
1004 | memset ((void *)(base), 0, sizeof (*(base)) * (count)) |
919 | |
1005 | |
920 | #define array_needsize(type,base,cur,cnt,init) \ |
1006 | #define array_needsize(type,base,cur,cnt,init) \ |
921 | if (expect_false ((cnt) > (cur))) \ |
1007 | if (expect_false ((cnt) > (cur))) \ |
922 | { \ |
1008 | { \ |
923 | int ocur_ = (cur); \ |
1009 | int ecb_unused ocur_ = (cur); \ |
924 | (base) = (type *)array_realloc \ |
1010 | (base) = (type *)array_realloc \ |
925 | (sizeof (type), (base), &(cur), (cnt)); \ |
1011 | (sizeof (type), (base), &(cur), (cnt)); \ |
926 | init ((base) + (ocur_), (cur) - ocur_); \ |
1012 | init ((base) + (ocur_), (cur) - ocur_); \ |
927 | } |
1013 | } |
928 | |
1014 | |
… | |
… | |
1034 | for (i = 0; i < fdchangecnt; ++i) |
1120 | for (i = 0; i < fdchangecnt; ++i) |
1035 | { |
1121 | { |
1036 | int fd = fdchanges [i]; |
1122 | int fd = fdchanges [i]; |
1037 | ANFD *anfd = anfds + fd; |
1123 | ANFD *anfd = anfds + fd; |
1038 | |
1124 | |
1039 | if (anfd->reify & EV__IOFDSET) |
1125 | if (anfd->reify & EV__IOFDSET && anfd->head) |
1040 | { |
1126 | { |
1041 | SOCKET handle = EV_FD_TO_WIN32_HANDLE (fd); |
1127 | SOCKET handle = EV_FD_TO_WIN32_HANDLE (fd); |
1042 | |
1128 | |
1043 | if (handle != anfd->handle) |
1129 | if (handle != anfd->handle) |
1044 | { |
1130 | { |
… | |
… | |
1098 | fdchanges [fdchangecnt - 1] = fd; |
1184 | fdchanges [fdchangecnt - 1] = fd; |
1099 | } |
1185 | } |
1100 | } |
1186 | } |
1101 | |
1187 | |
1102 | /* the given fd is invalid/unusable, so make sure it doesn't hurt us anymore */ |
1188 | /* the given fd is invalid/unusable, so make sure it doesn't hurt us anymore */ |
1103 | inline_speed void |
1189 | inline_speed void ecb_cold |
1104 | fd_kill (EV_P_ int fd) |
1190 | fd_kill (EV_P_ int fd) |
1105 | { |
1191 | { |
1106 | ev_io *w; |
1192 | ev_io *w; |
1107 | |
1193 | |
1108 | while ((w = (ev_io *)anfds [fd].head)) |
1194 | while ((w = (ev_io *)anfds [fd].head)) |
… | |
… | |
1111 | ev_feed_event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE); |
1197 | ev_feed_event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE); |
1112 | } |
1198 | } |
1113 | } |
1199 | } |
1114 | |
1200 | |
1115 | /* check whether the given fd is actually valid, for error recovery */ |
1201 | /* check whether the given fd is actually valid, for error recovery */ |
1116 | inline_size int |
1202 | inline_size int ecb_cold |
1117 | fd_valid (int fd) |
1203 | fd_valid (int fd) |
1118 | { |
1204 | { |
1119 | #ifdef _WIN32 |
1205 | #ifdef _WIN32 |
1120 | return EV_FD_TO_WIN32_HANDLE (fd) != -1; |
1206 | return EV_FD_TO_WIN32_HANDLE (fd) != -1; |
1121 | #else |
1207 | #else |
1122 | return fcntl (fd, F_GETFD) != -1; |
1208 | return fcntl (fd, F_GETFD) != -1; |
1123 | #endif |
1209 | #endif |
1124 | } |
1210 | } |
1125 | |
1211 | |
1126 | /* called on EBADF to verify fds */ |
1212 | /* called on EBADF to verify fds */ |
1127 | static void noinline |
1213 | static void noinline ecb_cold |
1128 | fd_ebadf (EV_P) |
1214 | fd_ebadf (EV_P) |
1129 | { |
1215 | { |
1130 | int fd; |
1216 | int fd; |
1131 | |
1217 | |
1132 | for (fd = 0; fd < anfdmax; ++fd) |
1218 | for (fd = 0; fd < anfdmax; ++fd) |
… | |
… | |
1134 | if (!fd_valid (fd) && errno == EBADF) |
1220 | if (!fd_valid (fd) && errno == EBADF) |
1135 | fd_kill (EV_A_ fd); |
1221 | fd_kill (EV_A_ fd); |
1136 | } |
1222 | } |
1137 | |
1223 | |
1138 | /* called on ENOMEM in select/poll to kill some fds and retry */ |
1224 | /* called on ENOMEM in select/poll to kill some fds and retry */ |
1139 | static void noinline |
1225 | static void noinline ecb_cold |
1140 | fd_enomem (EV_P) |
1226 | fd_enomem (EV_P) |
1141 | { |
1227 | { |
1142 | int fd; |
1228 | int fd; |
1143 | |
1229 | |
1144 | for (fd = anfdmax; fd--; ) |
1230 | for (fd = anfdmax; fd--; ) |
… | |
… | |
1339 | |
1425 | |
1340 | /*****************************************************************************/ |
1426 | /*****************************************************************************/ |
1341 | |
1427 | |
1342 | #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE |
1428 | #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE |
1343 | |
1429 | |
1344 | static void noinline |
1430 | static void noinline ecb_cold |
1345 | evpipe_init (EV_P) |
1431 | evpipe_init (EV_P) |
1346 | { |
1432 | { |
1347 | if (!ev_is_active (&pipe_w)) |
1433 | if (!ev_is_active (&pipe_w)) |
1348 | { |
1434 | { |
1349 | # if EV_USE_EVENTFD |
1435 | # if EV_USE_EVENTFD |
… | |
… | |
1371 | ev_io_start (EV_A_ &pipe_w); |
1457 | ev_io_start (EV_A_ &pipe_w); |
1372 | ev_unref (EV_A); /* watcher should not keep loop alive */ |
1458 | ev_unref (EV_A); /* watcher should not keep loop alive */ |
1373 | } |
1459 | } |
1374 | } |
1460 | } |
1375 | |
1461 | |
1376 | inline_size void |
1462 | inline_speed void |
1377 | evpipe_write (EV_P_ EV_ATOMIC_T *flag) |
1463 | evpipe_write (EV_P_ EV_ATOMIC_T *flag) |
1378 | { |
1464 | { |
1379 | if (!*flag) |
1465 | if (expect_true (*flag)) |
|
|
1466 | return; |
|
|
1467 | |
|
|
1468 | *flag = 1; |
|
|
1469 | |
|
|
1470 | ECB_MEMORY_FENCE_RELEASE; /* make sure flag is visible before the wakeup */ |
|
|
1471 | |
|
|
1472 | pipe_write_skipped = 1; |
|
|
1473 | |
|
|
1474 | ECB_MEMORY_FENCE; /* make sure pipe_write_skipped is visible before we check pipe_write_wanted */ |
|
|
1475 | |
|
|
1476 | if (pipe_write_wanted) |
1380 | { |
1477 | { |
|
|
1478 | int old_errno; |
|
|
1479 | |
|
|
1480 | pipe_write_skipped = 0; /* just an optimsiation, no fence needed */ |
|
|
1481 | |
1381 | int old_errno = errno; /* save errno because write might clobber it */ |
1482 | old_errno = errno; /* save errno because write will clobber it */ |
1382 | char dummy; |
|
|
1383 | |
|
|
1384 | *flag = 1; |
|
|
1385 | |
1483 | |
1386 | #if EV_USE_EVENTFD |
1484 | #if EV_USE_EVENTFD |
1387 | if (evfd >= 0) |
1485 | if (evfd >= 0) |
1388 | { |
1486 | { |
1389 | uint64_t counter = 1; |
1487 | uint64_t counter = 1; |
1390 | write (evfd, &counter, sizeof (uint64_t)); |
1488 | write (evfd, &counter, sizeof (uint64_t)); |
1391 | } |
1489 | } |
1392 | else |
1490 | else |
1393 | #endif |
1491 | #endif |
|
|
1492 | { |
1394 | /* win32 people keep sending patches that change this write() to send() */ |
1493 | /* win32 people keep sending patches that change this write() to send() */ |
1395 | /* and then run away. but send() is wrong, it wants a socket handle on win32 */ |
1494 | /* and then run away. but send() is wrong, it wants a socket handle on win32 */ |
1396 | /* so when you think this write should be a send instead, please find out */ |
1495 | /* so when you think this write should be a send instead, please find out */ |
1397 | /* where your send() is from - it's definitely not the microsoft send, and */ |
1496 | /* where your send() is from - it's definitely not the microsoft send, and */ |
1398 | /* tell me. thank you. */ |
1497 | /* tell me. thank you. */ |
1399 | write (evpipe [1], &dummy, 1); |
1498 | write (evpipe [1], &(evpipe [1]), 1); |
|
|
1499 | } |
1400 | |
1500 | |
1401 | errno = old_errno; |
1501 | errno = old_errno; |
1402 | } |
1502 | } |
1403 | } |
1503 | } |
1404 | |
1504 | |
… | |
… | |
1407 | static void |
1507 | static void |
1408 | pipecb (EV_P_ ev_io *iow, int revents) |
1508 | pipecb (EV_P_ ev_io *iow, int revents) |
1409 | { |
1509 | { |
1410 | int i; |
1510 | int i; |
1411 | |
1511 | |
|
|
1512 | if (revents & EV_READ) |
|
|
1513 | { |
1412 | #if EV_USE_EVENTFD |
1514 | #if EV_USE_EVENTFD |
1413 | if (evfd >= 0) |
1515 | if (evfd >= 0) |
1414 | { |
1516 | { |
1415 | uint64_t counter; |
1517 | uint64_t counter; |
1416 | read (evfd, &counter, sizeof (uint64_t)); |
1518 | read (evfd, &counter, sizeof (uint64_t)); |
1417 | } |
1519 | } |
1418 | else |
1520 | else |
1419 | #endif |
1521 | #endif |
1420 | { |
1522 | { |
1421 | char dummy; |
1523 | char dummy; |
1422 | /* see discussion in evpipe_write when you think this read should be recv in win32 */ |
1524 | /* see discussion in evpipe_write when you think this read should be recv in win32 */ |
1423 | read (evpipe [0], &dummy, 1); |
1525 | read (evpipe [0], &dummy, 1); |
|
|
1526 | } |
1424 | } |
1527 | } |
|
|
1528 | |
|
|
1529 | pipe_write_skipped = 0; |
1425 | |
1530 | |
1426 | #if EV_SIGNAL_ENABLE |
1531 | #if EV_SIGNAL_ENABLE |
1427 | if (sig_pending) |
1532 | if (sig_pending) |
1428 | { |
1533 | { |
1429 | sig_pending = 0; |
1534 | sig_pending = 0; |
… | |
… | |
1458 | EV_P = signals [signum - 1].loop; |
1563 | EV_P = signals [signum - 1].loop; |
1459 | |
1564 | |
1460 | if (!EV_A) |
1565 | if (!EV_A) |
1461 | return; |
1566 | return; |
1462 | #endif |
1567 | #endif |
|
|
1568 | |
|
|
1569 | if (!ev_active (&pipe_w)) |
|
|
1570 | return; |
1463 | |
1571 | |
1464 | signals [signum - 1].pending = 1; |
1572 | signals [signum - 1].pending = 1; |
1465 | evpipe_write (EV_A_ &sig_pending); |
1573 | evpipe_write (EV_A_ &sig_pending); |
1466 | } |
1574 | } |
1467 | |
1575 | |
… | |
… | |
1599 | #endif |
1707 | #endif |
1600 | #if EV_USE_SELECT |
1708 | #if EV_USE_SELECT |
1601 | # include "ev_select.c" |
1709 | # include "ev_select.c" |
1602 | #endif |
1710 | #endif |
1603 | |
1711 | |
1604 | int |
1712 | int ecb_cold |
1605 | ev_version_major (void) |
1713 | ev_version_major (void) |
1606 | { |
1714 | { |
1607 | return EV_VERSION_MAJOR; |
1715 | return EV_VERSION_MAJOR; |
1608 | } |
1716 | } |
1609 | |
1717 | |
1610 | int |
1718 | int ecb_cold |
1611 | ev_version_minor (void) |
1719 | ev_version_minor (void) |
1612 | { |
1720 | { |
1613 | return EV_VERSION_MINOR; |
1721 | return EV_VERSION_MINOR; |
1614 | } |
1722 | } |
1615 | |
1723 | |
1616 | /* return true if we are running with elevated privileges and should ignore env variables */ |
1724 | /* return true if we are running with elevated privileges and should ignore env variables */ |
1617 | int inline_size |
1725 | int inline_size ecb_cold |
1618 | enable_secure (void) |
1726 | enable_secure (void) |
1619 | { |
1727 | { |
1620 | #ifdef _WIN32 |
1728 | #ifdef _WIN32 |
1621 | return 0; |
1729 | return 0; |
1622 | #else |
1730 | #else |
1623 | return getuid () != geteuid () |
1731 | return getuid () != geteuid () |
1624 | || getgid () != getegid (); |
1732 | || getgid () != getegid (); |
1625 | #endif |
1733 | #endif |
1626 | } |
1734 | } |
1627 | |
1735 | |
1628 | unsigned int |
1736 | unsigned int ecb_cold |
1629 | ev_supported_backends (void) |
1737 | ev_supported_backends (void) |
1630 | { |
1738 | { |
1631 | unsigned int flags = 0; |
1739 | unsigned int flags = 0; |
1632 | |
1740 | |
1633 | if (EV_USE_PORT ) flags |= EVBACKEND_PORT; |
1741 | if (EV_USE_PORT ) flags |= EVBACKEND_PORT; |
… | |
… | |
1637 | if (EV_USE_SELECT) flags |= EVBACKEND_SELECT; |
1745 | if (EV_USE_SELECT) flags |= EVBACKEND_SELECT; |
1638 | |
1746 | |
1639 | return flags; |
1747 | return flags; |
1640 | } |
1748 | } |
1641 | |
1749 | |
1642 | unsigned int |
1750 | unsigned int ecb_cold |
1643 | ev_recommended_backends (void) |
1751 | ev_recommended_backends (void) |
1644 | { |
1752 | { |
1645 | unsigned int flags = ev_supported_backends (); |
1753 | unsigned int flags = ev_supported_backends (); |
1646 | |
1754 | |
1647 | #ifndef __NetBSD__ |
1755 | #ifndef __NetBSD__ |
… | |
… | |
1659 | #endif |
1767 | #endif |
1660 | |
1768 | |
1661 | return flags; |
1769 | return flags; |
1662 | } |
1770 | } |
1663 | |
1771 | |
1664 | unsigned int |
1772 | unsigned int ecb_cold |
1665 | ev_embeddable_backends (void) |
1773 | ev_embeddable_backends (void) |
1666 | { |
1774 | { |
1667 | int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT; |
1775 | int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT; |
1668 | |
1776 | |
1669 | /* epoll embeddability broken on all linux versions up to at least 2.6.23 */ |
1777 | /* epoll embeddability broken on all linux versions up to at least 2.6.23 */ |
… | |
… | |
1714 | ev_userdata (EV_P) |
1822 | ev_userdata (EV_P) |
1715 | { |
1823 | { |
1716 | return userdata; |
1824 | return userdata; |
1717 | } |
1825 | } |
1718 | |
1826 | |
|
|
1827 | void |
1719 | void ev_set_invoke_pending_cb (EV_P_ void (*invoke_pending_cb)(EV_P)) |
1828 | ev_set_invoke_pending_cb (EV_P_ void (*invoke_pending_cb)(EV_P)) |
1720 | { |
1829 | { |
1721 | invoke_cb = invoke_pending_cb; |
1830 | invoke_cb = invoke_pending_cb; |
1722 | } |
1831 | } |
1723 | |
1832 | |
|
|
1833 | void |
1724 | void ev_set_loop_release_cb (EV_P_ void (*release)(EV_P), void (*acquire)(EV_P)) |
1834 | ev_set_loop_release_cb (EV_P_ void (*release)(EV_P), void (*acquire)(EV_P)) |
1725 | { |
1835 | { |
1726 | release_cb = release; |
1836 | release_cb = release; |
1727 | acquire_cb = acquire; |
1837 | acquire_cb = acquire; |
1728 | } |
1838 | } |
1729 | #endif |
1839 | #endif |
1730 | |
1840 | |
1731 | /* initialise a loop structure, must be zero-initialised */ |
1841 | /* initialise a loop structure, must be zero-initialised */ |
1732 | static void noinline |
1842 | static void noinline ecb_cold |
1733 | loop_init (EV_P_ unsigned int flags) |
1843 | loop_init (EV_P_ unsigned int flags) |
1734 | { |
1844 | { |
1735 | if (!backend) |
1845 | if (!backend) |
1736 | { |
1846 | { |
1737 | origflags = flags; |
1847 | origflags = flags; |
… | |
… | |
1765 | if (!(flags & EVFLAG_NOENV) |
1875 | if (!(flags & EVFLAG_NOENV) |
1766 | && !enable_secure () |
1876 | && !enable_secure () |
1767 | && getenv ("LIBEV_FLAGS")) |
1877 | && getenv ("LIBEV_FLAGS")) |
1768 | flags = atoi (getenv ("LIBEV_FLAGS")); |
1878 | flags = atoi (getenv ("LIBEV_FLAGS")); |
1769 | |
1879 | |
1770 | ev_rt_now = ev_time (); |
1880 | ev_rt_now = ev_time (); |
1771 | mn_now = get_clock (); |
1881 | mn_now = get_clock (); |
1772 | now_floor = mn_now; |
1882 | now_floor = mn_now; |
1773 | rtmn_diff = ev_rt_now - mn_now; |
1883 | rtmn_diff = ev_rt_now - mn_now; |
1774 | #if EV_FEATURE_API |
1884 | #if EV_FEATURE_API |
1775 | invoke_cb = ev_invoke_pending; |
1885 | invoke_cb = ev_invoke_pending; |
1776 | #endif |
1886 | #endif |
1777 | |
1887 | |
1778 | io_blocktime = 0.; |
1888 | io_blocktime = 0.; |
1779 | timeout_blocktime = 0.; |
1889 | timeout_blocktime = 0.; |
1780 | backend = 0; |
1890 | backend = 0; |
1781 | backend_fd = -1; |
1891 | backend_fd = -1; |
1782 | sig_pending = 0; |
1892 | sig_pending = 0; |
1783 | #if EV_ASYNC_ENABLE |
1893 | #if EV_ASYNC_ENABLE |
1784 | async_pending = 0; |
1894 | async_pending = 0; |
1785 | #endif |
1895 | #endif |
|
|
1896 | pipe_write_skipped = 0; |
|
|
1897 | pipe_write_wanted = 0; |
1786 | #if EV_USE_INOTIFY |
1898 | #if EV_USE_INOTIFY |
1787 | fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2; |
1899 | fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2; |
1788 | #endif |
1900 | #endif |
1789 | #if EV_USE_SIGNALFD |
1901 | #if EV_USE_SIGNALFD |
1790 | sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1; |
1902 | sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1; |
1791 | #endif |
1903 | #endif |
1792 | |
1904 | |
1793 | if (!(flags & EVBACKEND_MASK)) |
1905 | if (!(flags & EVBACKEND_MASK)) |
1794 | flags |= ev_recommended_backends (); |
1906 | flags |= ev_recommended_backends (); |
1795 | |
1907 | |
… | |
… | |
1820 | #endif |
1932 | #endif |
1821 | } |
1933 | } |
1822 | } |
1934 | } |
1823 | |
1935 | |
1824 | /* free up a loop structure */ |
1936 | /* free up a loop structure */ |
1825 | void |
1937 | void ecb_cold |
1826 | ev_loop_destroy (EV_P) |
1938 | ev_loop_destroy (EV_P) |
1827 | { |
1939 | { |
1828 | int i; |
1940 | int i; |
1829 | |
1941 | |
1830 | #if EV_MULTIPLICITY |
1942 | #if EV_MULTIPLICITY |
… | |
… | |
1960 | infy_fork (EV_A); |
2072 | infy_fork (EV_A); |
1961 | #endif |
2073 | #endif |
1962 | |
2074 | |
1963 | if (ev_is_active (&pipe_w)) |
2075 | if (ev_is_active (&pipe_w)) |
1964 | { |
2076 | { |
1965 | /* this "locks" the handlers against writing to the pipe */ |
2077 | /* pipe_write_wanted must be false now, so modifying fd vars should be safe */ |
1966 | /* while we modify the fd vars */ |
|
|
1967 | sig_pending = 1; |
|
|
1968 | #if EV_ASYNC_ENABLE |
|
|
1969 | async_pending = 1; |
|
|
1970 | #endif |
|
|
1971 | |
2078 | |
1972 | ev_ref (EV_A); |
2079 | ev_ref (EV_A); |
1973 | ev_io_stop (EV_A_ &pipe_w); |
2080 | ev_io_stop (EV_A_ &pipe_w); |
1974 | |
2081 | |
1975 | #if EV_USE_EVENTFD |
2082 | #if EV_USE_EVENTFD |
… | |
… | |
1993 | postfork = 0; |
2100 | postfork = 0; |
1994 | } |
2101 | } |
1995 | |
2102 | |
1996 | #if EV_MULTIPLICITY |
2103 | #if EV_MULTIPLICITY |
1997 | |
2104 | |
1998 | struct ev_loop * |
2105 | struct ev_loop * ecb_cold |
1999 | ev_loop_new (unsigned int flags) |
2106 | ev_loop_new (unsigned int flags) |
2000 | { |
2107 | { |
2001 | EV_P = (struct ev_loop *)ev_malloc (sizeof (struct ev_loop)); |
2108 | EV_P = (struct ev_loop *)ev_malloc (sizeof (struct ev_loop)); |
2002 | |
2109 | |
2003 | memset (EV_A, 0, sizeof (struct ev_loop)); |
2110 | memset (EV_A, 0, sizeof (struct ev_loop)); |
… | |
… | |
2011 | } |
2118 | } |
2012 | |
2119 | |
2013 | #endif /* multiplicity */ |
2120 | #endif /* multiplicity */ |
2014 | |
2121 | |
2015 | #if EV_VERIFY |
2122 | #if EV_VERIFY |
2016 | static void noinline |
2123 | static void noinline ecb_cold |
2017 | verify_watcher (EV_P_ W w) |
2124 | verify_watcher (EV_P_ W w) |
2018 | { |
2125 | { |
2019 | assert (("libev: watcher has invalid priority", ABSPRI (w) >= 0 && ABSPRI (w) < NUMPRI)); |
2126 | assert (("libev: watcher has invalid priority", ABSPRI (w) >= 0 && ABSPRI (w) < NUMPRI)); |
2020 | |
2127 | |
2021 | if (w->pending) |
2128 | if (w->pending) |
2022 | assert (("libev: pending watcher not on pending queue", pendings [ABSPRI (w)][w->pending - 1].w == w)); |
2129 | assert (("libev: pending watcher not on pending queue", pendings [ABSPRI (w)][w->pending - 1].w == w)); |
2023 | } |
2130 | } |
2024 | |
2131 | |
2025 | static void noinline |
2132 | static void noinline ecb_cold |
2026 | verify_heap (EV_P_ ANHE *heap, int N) |
2133 | verify_heap (EV_P_ ANHE *heap, int N) |
2027 | { |
2134 | { |
2028 | int i; |
2135 | int i; |
2029 | |
2136 | |
2030 | for (i = HEAP0; i < N + HEAP0; ++i) |
2137 | for (i = HEAP0; i < N + HEAP0; ++i) |
… | |
… | |
2035 | |
2142 | |
2036 | verify_watcher (EV_A_ (W)ANHE_w (heap [i])); |
2143 | verify_watcher (EV_A_ (W)ANHE_w (heap [i])); |
2037 | } |
2144 | } |
2038 | } |
2145 | } |
2039 | |
2146 | |
2040 | static void noinline |
2147 | static void noinline ecb_cold |
2041 | array_verify (EV_P_ W *ws, int cnt) |
2148 | array_verify (EV_P_ W *ws, int cnt) |
2042 | { |
2149 | { |
2043 | while (cnt--) |
2150 | while (cnt--) |
2044 | { |
2151 | { |
2045 | assert (("libev: active index mismatch", ev_active (ws [cnt]) == cnt + 1)); |
2152 | assert (("libev: active index mismatch", ev_active (ws [cnt]) == cnt + 1)); |
… | |
… | |
2047 | } |
2154 | } |
2048 | } |
2155 | } |
2049 | #endif |
2156 | #endif |
2050 | |
2157 | |
2051 | #if EV_FEATURE_API |
2158 | #if EV_FEATURE_API |
2052 | void |
2159 | void ecb_cold |
2053 | ev_verify (EV_P) |
2160 | ev_verify (EV_P) |
2054 | { |
2161 | { |
2055 | #if EV_VERIFY |
2162 | #if EV_VERIFY |
2056 | int i; |
2163 | int i; |
2057 | WL w; |
2164 | WL w; |
… | |
… | |
2123 | #endif |
2230 | #endif |
2124 | } |
2231 | } |
2125 | #endif |
2232 | #endif |
2126 | |
2233 | |
2127 | #if EV_MULTIPLICITY |
2234 | #if EV_MULTIPLICITY |
2128 | struct ev_loop * |
2235 | struct ev_loop * ecb_cold |
2129 | #else |
2236 | #else |
2130 | int |
2237 | int |
2131 | #endif |
2238 | #endif |
2132 | ev_default_loop (unsigned int flags) |
2239 | ev_default_loop (unsigned int flags) |
2133 | { |
2240 | { |
… | |
… | |
2332 | } |
2439 | } |
2333 | } |
2440 | } |
2334 | |
2441 | |
2335 | /* simply recalculate all periodics */ |
2442 | /* simply recalculate all periodics */ |
2336 | /* TODO: maybe ensure that at least one event happens when jumping forward? */ |
2443 | /* TODO: maybe ensure that at least one event happens when jumping forward? */ |
2337 | static void noinline |
2444 | static void noinline ecb_cold |
2338 | periodics_reschedule (EV_P) |
2445 | periodics_reschedule (EV_P) |
2339 | { |
2446 | { |
2340 | int i; |
2447 | int i; |
2341 | |
2448 | |
2342 | /* adjust periodics after time jump */ |
2449 | /* adjust periodics after time jump */ |
… | |
… | |
2355 | reheap (periodics, periodiccnt); |
2462 | reheap (periodics, periodiccnt); |
2356 | } |
2463 | } |
2357 | #endif |
2464 | #endif |
2358 | |
2465 | |
2359 | /* adjust all timers by a given offset */ |
2466 | /* adjust all timers by a given offset */ |
2360 | static void noinline |
2467 | static void noinline ecb_cold |
2361 | timers_reschedule (EV_P_ ev_tstamp adjust) |
2468 | timers_reschedule (EV_P_ ev_tstamp adjust) |
2362 | { |
2469 | { |
2363 | int i; |
2470 | int i; |
2364 | |
2471 | |
2365 | for (i = 0; i < timercnt; ++i) |
2472 | for (i = 0; i < timercnt; ++i) |
… | |
… | |
2507 | ev_tstamp prev_mn_now = mn_now; |
2614 | ev_tstamp prev_mn_now = mn_now; |
2508 | |
2615 | |
2509 | /* update time to cancel out callback processing overhead */ |
2616 | /* update time to cancel out callback processing overhead */ |
2510 | time_update (EV_A_ 1e100); |
2617 | time_update (EV_A_ 1e100); |
2511 | |
2618 | |
|
|
2619 | /* from now on, we want a pipe-wake-up */ |
|
|
2620 | pipe_write_wanted = 1; |
|
|
2621 | |
|
|
2622 | ECB_MEMORY_FENCE; /* amke sure pipe_write_wanted is visible before we check for potential skips */ |
|
|
2623 | |
2512 | if (expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt))) |
2624 | if (expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped))) |
2513 | { |
2625 | { |
2514 | waittime = MAX_BLOCKTIME; |
2626 | waittime = MAX_BLOCKTIME; |
2515 | |
2627 | |
2516 | if (timercnt) |
2628 | if (timercnt) |
2517 | { |
2629 | { |
2518 | ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now + backend_fudge; |
2630 | ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now; |
2519 | if (waittime > to) waittime = to; |
2631 | if (waittime > to) waittime = to; |
2520 | } |
2632 | } |
2521 | |
2633 | |
2522 | #if EV_PERIODIC_ENABLE |
2634 | #if EV_PERIODIC_ENABLE |
2523 | if (periodiccnt) |
2635 | if (periodiccnt) |
2524 | { |
2636 | { |
2525 | ev_tstamp to = ANHE_at (periodics [HEAP0]) - ev_rt_now + backend_fudge; |
2637 | ev_tstamp to = ANHE_at (periodics [HEAP0]) - ev_rt_now; |
2526 | if (waittime > to) waittime = to; |
2638 | if (waittime > to) waittime = to; |
2527 | } |
2639 | } |
2528 | #endif |
2640 | #endif |
2529 | |
2641 | |
2530 | /* don't let timeouts decrease the waittime below timeout_blocktime */ |
2642 | /* don't let timeouts decrease the waittime below timeout_blocktime */ |
2531 | if (expect_false (waittime < timeout_blocktime)) |
2643 | if (expect_false (waittime < timeout_blocktime)) |
2532 | waittime = timeout_blocktime; |
2644 | waittime = timeout_blocktime; |
|
|
2645 | |
|
|
2646 | /* at this point, we NEED to wait, so we have to ensure */ |
|
|
2647 | /* to pass a minimum nonzero value to the backend */ |
|
|
2648 | if (expect_false (waittime < backend_mintime)) |
|
|
2649 | waittime = backend_mintime; |
2533 | |
2650 | |
2534 | /* extra check because io_blocktime is commonly 0 */ |
2651 | /* extra check because io_blocktime is commonly 0 */ |
2535 | if (expect_false (io_blocktime)) |
2652 | if (expect_false (io_blocktime)) |
2536 | { |
2653 | { |
2537 | sleeptime = io_blocktime - (mn_now - prev_mn_now); |
2654 | sleeptime = io_blocktime - (mn_now - prev_mn_now); |
2538 | |
2655 | |
2539 | if (sleeptime > waittime - backend_fudge) |
2656 | if (sleeptime > waittime - backend_mintime) |
2540 | sleeptime = waittime - backend_fudge; |
2657 | sleeptime = waittime - backend_mintime; |
2541 | |
2658 | |
2542 | if (expect_true (sleeptime > 0.)) |
2659 | if (expect_true (sleeptime > 0.)) |
2543 | { |
2660 | { |
2544 | ev_sleep (sleeptime); |
2661 | ev_sleep (sleeptime); |
2545 | waittime -= sleeptime; |
2662 | waittime -= sleeptime; |
… | |
… | |
2551 | ++loop_count; |
2668 | ++loop_count; |
2552 | #endif |
2669 | #endif |
2553 | assert ((loop_done = EVBREAK_RECURSE, 1)); /* assert for side effect */ |
2670 | assert ((loop_done = EVBREAK_RECURSE, 1)); /* assert for side effect */ |
2554 | backend_poll (EV_A_ waittime); |
2671 | backend_poll (EV_A_ waittime); |
2555 | assert ((loop_done = EVBREAK_CANCEL, 1)); /* assert for side effect */ |
2672 | assert ((loop_done = EVBREAK_CANCEL, 1)); /* assert for side effect */ |
|
|
2673 | |
|
|
2674 | pipe_write_wanted = 0; /* just an optimsiation, no fence needed */ |
|
|
2675 | |
|
|
2676 | if (pipe_write_skipped) |
|
|
2677 | { |
|
|
2678 | assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w))); |
|
|
2679 | ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM); |
|
|
2680 | } |
|
|
2681 | |
2556 | |
2682 | |
2557 | /* update ev_rt_now, do magic */ |
2683 | /* update ev_rt_now, do magic */ |
2558 | time_update (EV_A_ waittime + sleeptime); |
2684 | time_update (EV_A_ waittime + sleeptime); |
2559 | } |
2685 | } |
2560 | |
2686 | |
… | |
… | |
3214 | infy_wd (EV_A_ ev->wd, ev->wd, ev); |
3340 | infy_wd (EV_A_ ev->wd, ev->wd, ev); |
3215 | ofs += sizeof (struct inotify_event) + ev->len; |
3341 | ofs += sizeof (struct inotify_event) + ev->len; |
3216 | } |
3342 | } |
3217 | } |
3343 | } |
3218 | |
3344 | |
3219 | inline_size void |
3345 | inline_size void ecb_cold |
3220 | ev_check_2625 (EV_P) |
3346 | ev_check_2625 (EV_P) |
3221 | { |
3347 | { |
3222 | /* kernels < 2.6.25 are borked |
3348 | /* kernels < 2.6.25 are borked |
3223 | * http://www.ussg.indiana.edu/hypermail/linux/kernel/0711.3/1208.html |
3349 | * http://www.ussg.indiana.edu/hypermail/linux/kernel/0711.3/1208.html |
3224 | */ |
3350 | */ |
… | |
… | |
3849 | } |
3975 | } |
3850 | |
3976 | |
3851 | /*****************************************************************************/ |
3977 | /*****************************************************************************/ |
3852 | |
3978 | |
3853 | #if EV_WALK_ENABLE |
3979 | #if EV_WALK_ENABLE |
3854 | void |
3980 | void ecb_cold |
3855 | ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w)) |
3981 | ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w)) |
3856 | { |
3982 | { |
3857 | int i, j; |
3983 | int i, j; |
3858 | ev_watcher_list *wl, *wn; |
3984 | ev_watcher_list *wl, *wn; |
3859 | |
3985 | |