ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev.c
(Generate patch)

Comparing libev/ev.c (file contents):
Revision 1.371 by root, Mon Feb 7 21:45:32 2011 UTC vs.
Revision 1.388 by root, Fri Jul 29 12:17:26 2011 UTC

4 * Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann <libev@schmorp.de> 4 * Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann <libev@schmorp.de>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without modifica- 7 * Redistribution and use in source and binary forms, with or without modifica-
8 * tion, are permitted provided that the following conditions are met: 8 * tion, are permitted provided that the following conditions are met:
9 * 9 *
10 * 1. Redistributions of source code must retain the above copyright notice, 10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer. 11 * this list of conditions and the following disclaimer.
12 * 12 *
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution. 15 * documentation and/or other materials provided with the distribution.
16 * 16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
19 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 19 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
20 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 20 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
21 * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 21 * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
42# ifdef EV_CONFIG_H 42# ifdef EV_CONFIG_H
43# include EV_CONFIG_H 43# include EV_CONFIG_H
44# else 44# else
45# include "config.h" 45# include "config.h"
46# endif 46# endif
47
48#if HAVE_FLOOR
49# ifndef EV_USE_FLOOR
50# define EV_USE_FLOOR 1
51# endif
52#endif
47 53
48# if HAVE_CLOCK_SYSCALL 54# if HAVE_CLOCK_SYSCALL
49# ifndef EV_USE_CLOCK_SYSCALL 55# ifndef EV_USE_CLOCK_SYSCALL
50# define EV_USE_CLOCK_SYSCALL 1 56# define EV_USE_CLOCK_SYSCALL 1
51# ifndef EV_USE_REALTIME 57# ifndef EV_USE_REALTIME
156# define EV_USE_EVENTFD 0 162# define EV_USE_EVENTFD 0
157# endif 163# endif
158 164
159#endif 165#endif
160 166
161#include <math.h>
162#include <stdlib.h> 167#include <stdlib.h>
163#include <string.h> 168#include <string.h>
164#include <fcntl.h> 169#include <fcntl.h>
165#include <stddef.h> 170#include <stddef.h>
166 171
232/* to make it compile regardless, just remove the above line, */ 237/* to make it compile regardless, just remove the above line, */
233/* but consider reporting it, too! :) */ 238/* but consider reporting it, too! :) */
234# define EV_NSIG 65 239# define EV_NSIG 65
235#endif 240#endif
236 241
242#ifndef EV_USE_FLOOR
243# define EV_USE_FLOOR 0
244#endif
245
237#ifndef EV_USE_CLOCK_SYSCALL 246#ifndef EV_USE_CLOCK_SYSCALL
238# if __linux && __GLIBC__ >= 2 247# if __linux && __GLIBC__ >= 2
239# define EV_USE_CLOCK_SYSCALL EV_FEATURE_OS 248# define EV_USE_CLOCK_SYSCALL EV_FEATURE_OS
240# else 249# else
241# define EV_USE_CLOCK_SYSCALL 0 250# define EV_USE_CLOCK_SYSCALL 0
443#else 452#else
444# define EV_FREQUENT_CHECK do { } while (0) 453# define EV_FREQUENT_CHECK do { } while (0)
445#endif 454#endif
446 455
447/* 456/*
448 * This is used to avoid floating point rounding problems. 457 * This is used to work around floating point rounding problems.
449 * It is added to ev_rt_now when scheduling periodics
450 * to ensure progress, time-wise, even when rounding
451 * errors are against us.
452 * This value is good at least till the year 4000. 458 * This value is good at least till the year 4000.
453 * Better solutions welcome.
454 */ 459 */
455#define TIME_EPSILON 0.0001220703125 /* 1/8192 */ 460#define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */
461/*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */
456 462
457#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */ 463#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
458#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */ 464#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
459 465
460#define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0) 466#define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0)
461#define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0) 467#define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0)
462 468
463#if __GNUC__ >= 4 469/* the following are taken from libecb */
464# define expect(expr,value) __builtin_expect ((expr),(value)) 470/* ecb.h start */
465# define noinline __attribute__ ((noinline)) 471
472/* many compilers define _GNUC_ to some versions but then only implement
473 * what their idiot authors think are the "more important" extensions,
474 * causing enourmous grief in return for some better fake benchmark numbers.
475 * or so.
476 * we try to detect these and simply assume they are not gcc - if they have
477 * an issue with that they should have done it right in the first place.
478 */
479#ifndef ECB_GCC_VERSION
480 #if !defined(__GNUC_MINOR__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_C) || defined(__SUNPRO_CC) || defined(__llvm__) || defined(__clang__)
481 #define ECB_GCC_VERSION(major,minor) 0
482 #else
483 #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))
484 #endif
485#endif
486
487#if __cplusplus
488 #define ecb_inline static inline
489#elif ECB_GCC_VERSION(2,5)
490 #define ecb_inline static __inline__
491#elif ECB_C99
492 #define ecb_inline static inline
466#else 493#else
467# define expect(expr,value) (expr) 494 #define ecb_inline static
468# define noinline
469# if __STDC_VERSION__ < 199901L && __GNUC__ < 2
470# define inline
471# endif 495#endif
496
497#ifndef ECB_MEMORY_FENCE
498 #if ECB_GCC_VERSION(2,5)
499 #if __x86
500 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
501 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
502 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE /* better be safe than sorry */
503 #elif __amd64
504 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory")
505 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("lfence" : : : "memory")
506 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("sfence")
507 #endif
472#endif 508 #endif
509#endif
473 510
511#ifndef ECB_MEMORY_FENCE
512 #if ECB_GCC_VERSION(4,4)
513 #define ECB_MEMORY_FENCE __sync_synchronize ()
514 #define ECB_MEMORY_FENCE_ACQUIRE ({ char dummy = 0; __sync_lock_test_and_set (&dummy, 1); })
515 #define ECB_MEMORY_FENCE_RELEASE ({ char dummy = 1; __sync_lock_release (&dummy ); })
516 #elif _MSC_VER >= 1400 && 0 /* TODO: only true when using volatiles */
517 #define ECB_MEMORY_FENCE do { } while (0)
518 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
519 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
520 #elif defined(_WIN32)
521 #include <WinNT.h>
522 #define ECB_MEMORY_FENCE MemoryBarrier ()
523 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
524 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
525 #endif
526#endif
527
528#ifndef ECB_MEMORY_FENCE
529 #include <pthread.h>
530
531 static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER;
532 #define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0)
533 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
534 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
535#endif
536
537#if ECB_GCC_VERSION(3,1)
538 #define ecb_attribute(attrlist) __attribute__(attrlist)
539 #define ecb_is_constant(expr) __builtin_constant_p (expr)
540 #define ecb_expect(expr,value) __builtin_expect ((expr),(value))
541 #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality)
542#else
543 #define ecb_attribute(attrlist)
544 #define ecb_is_constant(expr) 0
545 #define ecb_expect(expr,value) (expr)
546 #define ecb_prefetch(addr,rw,locality)
547#endif
548
549#define ecb_noinline ecb_attribute ((__noinline__))
550#define ecb_noreturn ecb_attribute ((__noreturn__))
551#define ecb_unused ecb_attribute ((__unused__))
552#define ecb_const ecb_attribute ((__const__))
553#define ecb_pure ecb_attribute ((__pure__))
554
555#if ECB_GCC_VERSION(4,3)
556 #define ecb_artificial ecb_attribute ((__artificial__))
557 #define ecb_hot ecb_attribute ((__hot__))
558 #define ecb_cold ecb_attribute ((__cold__))
559#else
560 #define ecb_artificial
561 #define ecb_hot
562 #define ecb_cold
563#endif
564
565/* put around conditional expressions if you are very sure that the */
566/* expression is mostly true or mostly false. note that these return */
567/* booleans, not the expression. */
474#define expect_false(expr) expect ((expr) != 0, 0) 568#define ecb_expect_false(expr) ecb_expect (!!(expr), 0)
475#define expect_true(expr) expect ((expr) != 0, 1) 569#define ecb_expect_true(expr) ecb_expect (!!(expr), 1)
570/* ecb.h end */
571
572#define expect_false(cond) ecb_expect_false (cond)
573#define expect_true(cond) ecb_expect_true (cond)
574#define noinline ecb_noinline
575
476#define inline_size static inline 576#define inline_size ecb_inline
477 577
478#if EV_FEATURE_CODE 578#if EV_FEATURE_CODE
479# define inline_speed static inline 579# define inline_speed ecb_inline
480#else 580#else
481# define inline_speed static noinline 581# define inline_speed static noinline
482#endif 582#endif
483 583
484#define NUMPRI (EV_MAXPRI - EV_MINPRI + 1) 584#define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)
523# include "ev_win32.c" 623# include "ev_win32.c"
524#endif 624#endif
525 625
526/*****************************************************************************/ 626/*****************************************************************************/
527 627
628/* define a suitable floor function (only used by periodics atm) */
629
630#if EV_USE_FLOOR
631# include <math.h>
632# define ev_floor(v) floor (v)
633#else
634
635#include <float.h>
636
637/* a floor() replacement function, should be independent of ev_tstamp type */
638static ev_tstamp noinline
639ev_floor (ev_tstamp v)
640{
641 /* the choice of shift factor is not terribly important */
642#if FLT_RADIX != 2 /* assume FLT_RADIX == 10 */
643 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 10000000000000000000. : 1000000000.;
644#else
645 const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.;
646#endif
647
648 /* argument too large for an unsigned long? */
649 if (expect_false (v >= shift))
650 {
651 ev_tstamp f;
652
653 if (v == v - 1.)
654 return v; /* very large number */
655
656 f = shift * ev_floor (v * (1. / shift));
657 return f + ev_floor (v - f);
658 }
659
660 /* special treatment for negative args? */
661 if (expect_false (v < 0.))
662 {
663 ev_tstamp f = -ev_floor (-v);
664
665 return f - (f == v ? 0 : 1);
666 }
667
668 /* fits into an unsigned long */
669 return (unsigned long)v;
670}
671
672#endif
673
674/*****************************************************************************/
675
528#ifdef __linux 676#ifdef __linux
529# include <sys/utsname.h> 677# include <sys/utsname.h>
530#endif 678#endif
531 679
532static unsigned int noinline 680static unsigned int noinline ecb_cold
533ev_linux_version (void) 681ev_linux_version (void)
534{ 682{
535#ifdef __linux 683#ifdef __linux
536 unsigned int v = 0; 684 unsigned int v = 0;
537 struct utsname buf; 685 struct utsname buf;
566} 714}
567 715
568/*****************************************************************************/ 716/*****************************************************************************/
569 717
570#if EV_AVOID_STDIO 718#if EV_AVOID_STDIO
571static void noinline 719static void noinline ecb_cold
572ev_printerr (const char *msg) 720ev_printerr (const char *msg)
573{ 721{
574 write (STDERR_FILENO, msg, strlen (msg)); 722 write (STDERR_FILENO, msg, strlen (msg));
575} 723}
576#endif 724#endif
577 725
578static void (*syserr_cb)(const char *msg); 726static void (*syserr_cb)(const char *msg);
579 727
580void 728void ecb_cold
581ev_set_syserr_cb (void (*cb)(const char *msg)) 729ev_set_syserr_cb (void (*cb)(const char *msg))
582{ 730{
583 syserr_cb = cb; 731 syserr_cb = cb;
584} 732}
585 733
586static void noinline 734static void noinline ecb_cold
587ev_syserr (const char *msg) 735ev_syserr (const char *msg)
588{ 736{
589 if (!msg) 737 if (!msg)
590 msg = "(libev) system error"; 738 msg = "(libev) system error";
591 739
624#endif 772#endif
625} 773}
626 774
627static void *(*alloc)(void *ptr, long size) = ev_realloc_emul; 775static void *(*alloc)(void *ptr, long size) = ev_realloc_emul;
628 776
629void 777void ecb_cold
630ev_set_allocator (void *(*cb)(void *ptr, long size)) 778ev_set_allocator (void *(*cb)(void *ptr, long size))
631{ 779{
632 alloc = cb; 780 alloc = cb;
633} 781}
634 782
816 select (0, 0, 0, 0, &tv); 964 select (0, 0, 0, 0, &tv);
817#endif 965#endif
818 } 966 }
819} 967}
820 968
821inline_speed int
822ev_timeout_to_ms (ev_tstamp timeout)
823{
824 int ms = timeout * 1000. + .999999;
825
826 return expect_true (ms) ? ms : timeout < 1e-6 ? 0 : 1;
827}
828
829/*****************************************************************************/ 969/*****************************************************************************/
830 970
831#define MALLOC_ROUND 4096 /* prefer to allocate in chunks of this size, must be 2**n and >> 4 longs */ 971#define MALLOC_ROUND 4096 /* prefer to allocate in chunks of this size, must be 2**n and >> 4 longs */
832 972
833/* find a suitable new size for the given array, */ 973/* find a suitable new size for the given array, */
851 } 991 }
852 992
853 return ncur; 993 return ncur;
854} 994}
855 995
856static noinline void * 996static void * noinline ecb_cold
857array_realloc (int elem, void *base, int *cur, int cnt) 997array_realloc (int elem, void *base, int *cur, int cnt)
858{ 998{
859 *cur = array_nextsize (elem, *cur, cnt); 999 *cur = array_nextsize (elem, *cur, cnt);
860 return ev_realloc (base, elem * *cur); 1000 return ev_realloc (base, elem * *cur);
861} 1001}
864 memset ((void *)(base), 0, sizeof (*(base)) * (count)) 1004 memset ((void *)(base), 0, sizeof (*(base)) * (count))
865 1005
866#define array_needsize(type,base,cur,cnt,init) \ 1006#define array_needsize(type,base,cur,cnt,init) \
867 if (expect_false ((cnt) > (cur))) \ 1007 if (expect_false ((cnt) > (cur))) \
868 { \ 1008 { \
869 int ocur_ = (cur); \ 1009 int ecb_unused ocur_ = (cur); \
870 (base) = (type *)array_realloc \ 1010 (base) = (type *)array_realloc \
871 (sizeof (type), (base), &(cur), (cnt)); \ 1011 (sizeof (type), (base), &(cur), (cnt)); \
872 init ((base) + (ocur_), (cur) - ocur_); \ 1012 init ((base) + (ocur_), (cur) - ocur_); \
873 } 1013 }
874 1014
980 for (i = 0; i < fdchangecnt; ++i) 1120 for (i = 0; i < fdchangecnt; ++i)
981 { 1121 {
982 int fd = fdchanges [i]; 1122 int fd = fdchanges [i];
983 ANFD *anfd = anfds + fd; 1123 ANFD *anfd = anfds + fd;
984 1124
985 if (anfd->reify & EV__IOFDSET) 1125 if (anfd->reify & EV__IOFDSET && anfd->head)
986 { 1126 {
987 SOCKET handle = EV_FD_TO_WIN32_HANDLE (fd); 1127 SOCKET handle = EV_FD_TO_WIN32_HANDLE (fd);
988 1128
989 if (handle != anfd->handle) 1129 if (handle != anfd->handle)
990 { 1130 {
1044 fdchanges [fdchangecnt - 1] = fd; 1184 fdchanges [fdchangecnt - 1] = fd;
1045 } 1185 }
1046} 1186}
1047 1187
1048/* the given fd is invalid/unusable, so make sure it doesn't hurt us anymore */ 1188/* the given fd is invalid/unusable, so make sure it doesn't hurt us anymore */
1049inline_speed void 1189inline_speed void ecb_cold
1050fd_kill (EV_P_ int fd) 1190fd_kill (EV_P_ int fd)
1051{ 1191{
1052 ev_io *w; 1192 ev_io *w;
1053 1193
1054 while ((w = (ev_io *)anfds [fd].head)) 1194 while ((w = (ev_io *)anfds [fd].head))
1057 ev_feed_event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE); 1197 ev_feed_event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE);
1058 } 1198 }
1059} 1199}
1060 1200
1061/* check whether the given fd is actually valid, for error recovery */ 1201/* check whether the given fd is actually valid, for error recovery */
1062inline_size int 1202inline_size int ecb_cold
1063fd_valid (int fd) 1203fd_valid (int fd)
1064{ 1204{
1065#ifdef _WIN32 1205#ifdef _WIN32
1066 return EV_FD_TO_WIN32_HANDLE (fd) != -1; 1206 return EV_FD_TO_WIN32_HANDLE (fd) != -1;
1067#else 1207#else
1068 return fcntl (fd, F_GETFD) != -1; 1208 return fcntl (fd, F_GETFD) != -1;
1069#endif 1209#endif
1070} 1210}
1071 1211
1072/* called on EBADF to verify fds */ 1212/* called on EBADF to verify fds */
1073static void noinline 1213static void noinline ecb_cold
1074fd_ebadf (EV_P) 1214fd_ebadf (EV_P)
1075{ 1215{
1076 int fd; 1216 int fd;
1077 1217
1078 for (fd = 0; fd < anfdmax; ++fd) 1218 for (fd = 0; fd < anfdmax; ++fd)
1080 if (!fd_valid (fd) && errno == EBADF) 1220 if (!fd_valid (fd) && errno == EBADF)
1081 fd_kill (EV_A_ fd); 1221 fd_kill (EV_A_ fd);
1082} 1222}
1083 1223
1084/* called on ENOMEM in select/poll to kill some fds and retry */ 1224/* called on ENOMEM in select/poll to kill some fds and retry */
1085static void noinline 1225static void noinline ecb_cold
1086fd_enomem (EV_P) 1226fd_enomem (EV_P)
1087{ 1227{
1088 int fd; 1228 int fd;
1089 1229
1090 for (fd = anfdmax; fd--; ) 1230 for (fd = anfdmax; fd--; )
1285 1425
1286/*****************************************************************************/ 1426/*****************************************************************************/
1287 1427
1288#if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE 1428#if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
1289 1429
1290static void noinline 1430static void noinline ecb_cold
1291evpipe_init (EV_P) 1431evpipe_init (EV_P)
1292{ 1432{
1293 if (!ev_is_active (&pipe_w)) 1433 if (!ev_is_active (&pipe_w))
1294 { 1434 {
1295# if EV_USE_EVENTFD 1435# if EV_USE_EVENTFD
1317 ev_io_start (EV_A_ &pipe_w); 1457 ev_io_start (EV_A_ &pipe_w);
1318 ev_unref (EV_A); /* watcher should not keep loop alive */ 1458 ev_unref (EV_A); /* watcher should not keep loop alive */
1319 } 1459 }
1320} 1460}
1321 1461
1322inline_size void 1462inline_speed void
1323evpipe_write (EV_P_ EV_ATOMIC_T *flag) 1463evpipe_write (EV_P_ EV_ATOMIC_T *flag)
1324{ 1464{
1325 if (!*flag) 1465 if (expect_true (*flag))
1466 return;
1467
1468 *flag = 1;
1469
1470 ECB_MEMORY_FENCE_RELEASE; /* make sure flag is visible before the wakeup */
1471
1472 pipe_write_skipped = 1;
1473
1474 ECB_MEMORY_FENCE; /* make sure pipe_write_skipped is visible before we check pipe_write_wanted */
1475
1476 if (pipe_write_wanted)
1326 { 1477 {
1478 int old_errno;
1479
1480 pipe_write_skipped = 0; /* just an optimsiation, no fence needed */
1481
1327 int old_errno = errno; /* save errno because write might clobber it */ 1482 old_errno = errno; /* save errno because write will clobber it */
1328 char dummy;
1329
1330 *flag = 1;
1331 1483
1332#if EV_USE_EVENTFD 1484#if EV_USE_EVENTFD
1333 if (evfd >= 0) 1485 if (evfd >= 0)
1334 { 1486 {
1335 uint64_t counter = 1; 1487 uint64_t counter = 1;
1336 write (evfd, &counter, sizeof (uint64_t)); 1488 write (evfd, &counter, sizeof (uint64_t));
1337 } 1489 }
1338 else 1490 else
1339#endif 1491#endif
1492 {
1340 /* win32 people keep sending patches that change this write() to send() */ 1493 /* win32 people keep sending patches that change this write() to send() */
1341 /* and then run away. but send() is wrong, it wants a socket handle on win32 */ 1494 /* and then run away. but send() is wrong, it wants a socket handle on win32 */
1342 /* so when you think this write should be a send instead, please find out */ 1495 /* so when you think this write should be a send instead, please find out */
1343 /* where your send() is from - it's definitely not the microsoft send, and */ 1496 /* where your send() is from - it's definitely not the microsoft send, and */
1344 /* tell me. thank you. */ 1497 /* tell me. thank you. */
1345 write (evpipe [1], &dummy, 1); 1498 write (evpipe [1], &(evpipe [1]), 1);
1499 }
1346 1500
1347 errno = old_errno; 1501 errno = old_errno;
1348 } 1502 }
1349} 1503}
1350 1504
1353static void 1507static void
1354pipecb (EV_P_ ev_io *iow, int revents) 1508pipecb (EV_P_ ev_io *iow, int revents)
1355{ 1509{
1356 int i; 1510 int i;
1357 1511
1512 if (revents & EV_READ)
1513 {
1358#if EV_USE_EVENTFD 1514#if EV_USE_EVENTFD
1359 if (evfd >= 0) 1515 if (evfd >= 0)
1360 { 1516 {
1361 uint64_t counter; 1517 uint64_t counter;
1362 read (evfd, &counter, sizeof (uint64_t)); 1518 read (evfd, &counter, sizeof (uint64_t));
1363 } 1519 }
1364 else 1520 else
1365#endif 1521#endif
1366 { 1522 {
1367 char dummy; 1523 char dummy;
1368 /* see discussion in evpipe_write when you think this read should be recv in win32 */ 1524 /* see discussion in evpipe_write when you think this read should be recv in win32 */
1369 read (evpipe [0], &dummy, 1); 1525 read (evpipe [0], &dummy, 1);
1526 }
1370 } 1527 }
1528
1529 pipe_write_skipped = 0;
1371 1530
1372#if EV_SIGNAL_ENABLE 1531#if EV_SIGNAL_ENABLE
1373 if (sig_pending) 1532 if (sig_pending)
1374 { 1533 {
1375 sig_pending = 0; 1534 sig_pending = 0;
1376 1535
1377 for (i = EV_NSIG - 1; i--; ) 1536 for (i = EV_NSIG - 1; i--; )
1378 if (expect_false (signals [i].pending)) 1537 if (expect_false (signals [i].pending))
1379 ev_feed_signal_event (EV_A_ i + 1); 1538 ev_feed_signal_event (EV_A_ i + 1);
1404 EV_P = signals [signum - 1].loop; 1563 EV_P = signals [signum - 1].loop;
1405 1564
1406 if (!EV_A) 1565 if (!EV_A)
1407 return; 1566 return;
1408#endif 1567#endif
1568
1569 if (!ev_active (&pipe_w))
1570 return;
1409 1571
1410 signals [signum - 1].pending = 1; 1572 signals [signum - 1].pending = 1;
1411 evpipe_write (EV_A_ &sig_pending); 1573 evpipe_write (EV_A_ &sig_pending);
1412} 1574}
1413 1575
1545#endif 1707#endif
1546#if EV_USE_SELECT 1708#if EV_USE_SELECT
1547# include "ev_select.c" 1709# include "ev_select.c"
1548#endif 1710#endif
1549 1711
1550int 1712int ecb_cold
1551ev_version_major (void) 1713ev_version_major (void)
1552{ 1714{
1553 return EV_VERSION_MAJOR; 1715 return EV_VERSION_MAJOR;
1554} 1716}
1555 1717
1556int 1718int ecb_cold
1557ev_version_minor (void) 1719ev_version_minor (void)
1558{ 1720{
1559 return EV_VERSION_MINOR; 1721 return EV_VERSION_MINOR;
1560} 1722}
1561 1723
1562/* return true if we are running with elevated privileges and should ignore env variables */ 1724/* return true if we are running with elevated privileges and should ignore env variables */
1563int inline_size 1725int inline_size ecb_cold
1564enable_secure (void) 1726enable_secure (void)
1565{ 1727{
1566#ifdef _WIN32 1728#ifdef _WIN32
1567 return 0; 1729 return 0;
1568#else 1730#else
1569 return getuid () != geteuid () 1731 return getuid () != geteuid ()
1570 || getgid () != getegid (); 1732 || getgid () != getegid ();
1571#endif 1733#endif
1572} 1734}
1573 1735
1574unsigned int 1736unsigned int ecb_cold
1575ev_supported_backends (void) 1737ev_supported_backends (void)
1576{ 1738{
1577 unsigned int flags = 0; 1739 unsigned int flags = 0;
1578 1740
1579 if (EV_USE_PORT ) flags |= EVBACKEND_PORT; 1741 if (EV_USE_PORT ) flags |= EVBACKEND_PORT;
1583 if (EV_USE_SELECT) flags |= EVBACKEND_SELECT; 1745 if (EV_USE_SELECT) flags |= EVBACKEND_SELECT;
1584 1746
1585 return flags; 1747 return flags;
1586} 1748}
1587 1749
1588unsigned int 1750unsigned int ecb_cold
1589ev_recommended_backends (void) 1751ev_recommended_backends (void)
1590{ 1752{
1591 unsigned int flags = ev_supported_backends (); 1753 unsigned int flags = ev_supported_backends ();
1592 1754
1593#ifndef __NetBSD__ 1755#ifndef __NetBSD__
1605#endif 1767#endif
1606 1768
1607 return flags; 1769 return flags;
1608} 1770}
1609 1771
1610unsigned int 1772unsigned int ecb_cold
1611ev_embeddable_backends (void) 1773ev_embeddable_backends (void)
1612{ 1774{
1613 int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT; 1775 int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT;
1614 1776
1615 /* epoll embeddability broken on all linux versions up to at least 2.6.23 */ 1777 /* epoll embeddability broken on all linux versions up to at least 2.6.23 */
1660ev_userdata (EV_P) 1822ev_userdata (EV_P)
1661{ 1823{
1662 return userdata; 1824 return userdata;
1663} 1825}
1664 1826
1827void
1665void ev_set_invoke_pending_cb (EV_P_ void (*invoke_pending_cb)(EV_P)) 1828ev_set_invoke_pending_cb (EV_P_ void (*invoke_pending_cb)(EV_P))
1666{ 1829{
1667 invoke_cb = invoke_pending_cb; 1830 invoke_cb = invoke_pending_cb;
1668} 1831}
1669 1832
1833void
1670void ev_set_loop_release_cb (EV_P_ void (*release)(EV_P), void (*acquire)(EV_P)) 1834ev_set_loop_release_cb (EV_P_ void (*release)(EV_P), void (*acquire)(EV_P))
1671{ 1835{
1672 release_cb = release; 1836 release_cb = release;
1673 acquire_cb = acquire; 1837 acquire_cb = acquire;
1674} 1838}
1675#endif 1839#endif
1676 1840
1677/* initialise a loop structure, must be zero-initialised */ 1841/* initialise a loop structure, must be zero-initialised */
1678static void noinline 1842static void noinline ecb_cold
1679loop_init (EV_P_ unsigned int flags) 1843loop_init (EV_P_ unsigned int flags)
1680{ 1844{
1681 if (!backend) 1845 if (!backend)
1682 { 1846 {
1683 origflags = flags; 1847 origflags = flags;
1711 if (!(flags & EVFLAG_NOENV) 1875 if (!(flags & EVFLAG_NOENV)
1712 && !enable_secure () 1876 && !enable_secure ()
1713 && getenv ("LIBEV_FLAGS")) 1877 && getenv ("LIBEV_FLAGS"))
1714 flags = atoi (getenv ("LIBEV_FLAGS")); 1878 flags = atoi (getenv ("LIBEV_FLAGS"));
1715 1879
1716 ev_rt_now = ev_time (); 1880 ev_rt_now = ev_time ();
1717 mn_now = get_clock (); 1881 mn_now = get_clock ();
1718 now_floor = mn_now; 1882 now_floor = mn_now;
1719 rtmn_diff = ev_rt_now - mn_now; 1883 rtmn_diff = ev_rt_now - mn_now;
1720#if EV_FEATURE_API 1884#if EV_FEATURE_API
1721 invoke_cb = ev_invoke_pending; 1885 invoke_cb = ev_invoke_pending;
1722#endif 1886#endif
1723 1887
1724 io_blocktime = 0.; 1888 io_blocktime = 0.;
1725 timeout_blocktime = 0.; 1889 timeout_blocktime = 0.;
1726 backend = 0; 1890 backend = 0;
1727 backend_fd = -1; 1891 backend_fd = -1;
1728 sig_pending = 0; 1892 sig_pending = 0;
1729#if EV_ASYNC_ENABLE 1893#if EV_ASYNC_ENABLE
1730 async_pending = 0; 1894 async_pending = 0;
1731#endif 1895#endif
1896 pipe_write_skipped = 0;
1897 pipe_write_wanted = 0;
1732#if EV_USE_INOTIFY 1898#if EV_USE_INOTIFY
1733 fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2; 1899 fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2;
1734#endif 1900#endif
1735#if EV_USE_SIGNALFD 1901#if EV_USE_SIGNALFD
1736 sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1; 1902 sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1;
1737#endif 1903#endif
1738 1904
1739 if (!(flags & EVBACKEND_MASK)) 1905 if (!(flags & EVBACKEND_MASK))
1740 flags |= ev_recommended_backends (); 1906 flags |= ev_recommended_backends ();
1741 1907
1766#endif 1932#endif
1767 } 1933 }
1768} 1934}
1769 1935
1770/* free up a loop structure */ 1936/* free up a loop structure */
1771void 1937void ecb_cold
1772ev_loop_destroy (EV_P) 1938ev_loop_destroy (EV_P)
1773{ 1939{
1774 int i; 1940 int i;
1775 1941
1776#if EV_MULTIPLICITY 1942#if EV_MULTIPLICITY
1906 infy_fork (EV_A); 2072 infy_fork (EV_A);
1907#endif 2073#endif
1908 2074
1909 if (ev_is_active (&pipe_w)) 2075 if (ev_is_active (&pipe_w))
1910 { 2076 {
1911 /* this "locks" the handlers against writing to the pipe */ 2077 /* pipe_write_wanted must be false now, so modifying fd vars should be safe */
1912 /* while we modify the fd vars */
1913 sig_pending = 1;
1914#if EV_ASYNC_ENABLE
1915 async_pending = 1;
1916#endif
1917 2078
1918 ev_ref (EV_A); 2079 ev_ref (EV_A);
1919 ev_io_stop (EV_A_ &pipe_w); 2080 ev_io_stop (EV_A_ &pipe_w);
1920 2081
1921#if EV_USE_EVENTFD 2082#if EV_USE_EVENTFD
1939 postfork = 0; 2100 postfork = 0;
1940} 2101}
1941 2102
1942#if EV_MULTIPLICITY 2103#if EV_MULTIPLICITY
1943 2104
1944struct ev_loop * 2105struct ev_loop * ecb_cold
1945ev_loop_new (unsigned int flags) 2106ev_loop_new (unsigned int flags)
1946{ 2107{
1947 EV_P = (struct ev_loop *)ev_malloc (sizeof (struct ev_loop)); 2108 EV_P = (struct ev_loop *)ev_malloc (sizeof (struct ev_loop));
1948 2109
1949 memset (EV_A, 0, sizeof (struct ev_loop)); 2110 memset (EV_A, 0, sizeof (struct ev_loop));
1957} 2118}
1958 2119
1959#endif /* multiplicity */ 2120#endif /* multiplicity */
1960 2121
1961#if EV_VERIFY 2122#if EV_VERIFY
1962static void noinline 2123static void noinline ecb_cold
1963verify_watcher (EV_P_ W w) 2124verify_watcher (EV_P_ W w)
1964{ 2125{
1965 assert (("libev: watcher has invalid priority", ABSPRI (w) >= 0 && ABSPRI (w) < NUMPRI)); 2126 assert (("libev: watcher has invalid priority", ABSPRI (w) >= 0 && ABSPRI (w) < NUMPRI));
1966 2127
1967 if (w->pending) 2128 if (w->pending)
1968 assert (("libev: pending watcher not on pending queue", pendings [ABSPRI (w)][w->pending - 1].w == w)); 2129 assert (("libev: pending watcher not on pending queue", pendings [ABSPRI (w)][w->pending - 1].w == w));
1969} 2130}
1970 2131
1971static void noinline 2132static void noinline ecb_cold
1972verify_heap (EV_P_ ANHE *heap, int N) 2133verify_heap (EV_P_ ANHE *heap, int N)
1973{ 2134{
1974 int i; 2135 int i;
1975 2136
1976 for (i = HEAP0; i < N + HEAP0; ++i) 2137 for (i = HEAP0; i < N + HEAP0; ++i)
1981 2142
1982 verify_watcher (EV_A_ (W)ANHE_w (heap [i])); 2143 verify_watcher (EV_A_ (W)ANHE_w (heap [i]));
1983 } 2144 }
1984} 2145}
1985 2146
1986static void noinline 2147static void noinline ecb_cold
1987array_verify (EV_P_ W *ws, int cnt) 2148array_verify (EV_P_ W *ws, int cnt)
1988{ 2149{
1989 while (cnt--) 2150 while (cnt--)
1990 { 2151 {
1991 assert (("libev: active index mismatch", ev_active (ws [cnt]) == cnt + 1)); 2152 assert (("libev: active index mismatch", ev_active (ws [cnt]) == cnt + 1));
1993 } 2154 }
1994} 2155}
1995#endif 2156#endif
1996 2157
1997#if EV_FEATURE_API 2158#if EV_FEATURE_API
1998void 2159void ecb_cold
1999ev_verify (EV_P) 2160ev_verify (EV_P)
2000{ 2161{
2001#if EV_VERIFY 2162#if EV_VERIFY
2002 int i; 2163 int i;
2003 WL w; 2164 WL w;
2069#endif 2230#endif
2070} 2231}
2071#endif 2232#endif
2072 2233
2073#if EV_MULTIPLICITY 2234#if EV_MULTIPLICITY
2074struct ev_loop * 2235struct ev_loop * ecb_cold
2075#else 2236#else
2076int 2237int
2077#endif 2238#endif
2078ev_default_loop (unsigned int flags) 2239ev_default_loop (unsigned int flags)
2079{ 2240{
2208 } 2369 }
2209} 2370}
2210 2371
2211#if EV_PERIODIC_ENABLE 2372#if EV_PERIODIC_ENABLE
2212 2373
2213inline_speed void 2374static void noinline
2214periodic_recalc (EV_P_ ev_periodic *w) 2375periodic_recalc (EV_P_ ev_periodic *w)
2215{ 2376{
2216 /* TODO: use slow but potentially more correct incremental algo, */ 2377 ev_tstamp interval = w->interval > MIN_INTERVAL ? w->interval : MIN_INTERVAL;
2217 /* also do not rely on ceil */ 2378 ev_tstamp at = w->offset + interval * ev_floor ((ev_rt_now - w->offset) / interval);
2218 ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval; 2379
2380 /* the above almost always errs on the low side */
2381 while (at <= ev_rt_now)
2382 {
2383 ev_tstamp nat = at + w->interval;
2384
2385 /* when resolution fails us, we use ev_rt_now */
2386 if (expect_false (nat == at))
2387 {
2388 at = ev_rt_now;
2389 break;
2390 }
2391
2392 at = nat;
2393 }
2394
2395 ev_at (w) = at;
2219} 2396}
2220 2397
2221/* make periodics pending */ 2398/* make periodics pending */
2222inline_size void 2399inline_size void
2223periodics_reify (EV_P) 2400periodics_reify (EV_P)
2245 downheap (periodics, periodiccnt, HEAP0); 2422 downheap (periodics, periodiccnt, HEAP0);
2246 } 2423 }
2247 else if (w->interval) 2424 else if (w->interval)
2248 { 2425 {
2249 periodic_recalc (EV_A_ w); 2426 periodic_recalc (EV_A_ w);
2250
2251 /* if next trigger time is not sufficiently in the future, put it there */
2252 /* this might happen because of floating point inexactness */
2253 if (ev_at (w) - ev_rt_now < TIME_EPSILON)
2254 {
2255 ev_at (w) += w->interval;
2256
2257 /* if interval is unreasonably low we might still have a time in the past */
2258 /* so correct this. this will make the periodic very inexact, but the user */
2259 /* has effectively asked to get triggered more often than possible */
2260 if (ev_at (w) < ev_rt_now)
2261 ev_at (w) = ev_rt_now;
2262 }
2263
2264 ANHE_at_cache (periodics [HEAP0]); 2427 ANHE_at_cache (periodics [HEAP0]);
2265 downheap (periodics, periodiccnt, HEAP0); 2428 downheap (periodics, periodiccnt, HEAP0);
2266 } 2429 }
2267 else 2430 else
2268 ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */ 2431 ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */
2276 } 2439 }
2277} 2440}
2278 2441
2279/* simply recalculate all periodics */ 2442/* simply recalculate all periodics */
2280/* TODO: maybe ensure that at least one event happens when jumping forward? */ 2443/* TODO: maybe ensure that at least one event happens when jumping forward? */
2281static void noinline 2444static void noinline ecb_cold
2282periodics_reschedule (EV_P) 2445periodics_reschedule (EV_P)
2283{ 2446{
2284 int i; 2447 int i;
2285 2448
2286 /* adjust periodics after time jump */ 2449 /* adjust periodics after time jump */
2299 reheap (periodics, periodiccnt); 2462 reheap (periodics, periodiccnt);
2300} 2463}
2301#endif 2464#endif
2302 2465
2303/* adjust all timers by a given offset */ 2466/* adjust all timers by a given offset */
2304static void noinline 2467static void noinline ecb_cold
2305timers_reschedule (EV_P_ ev_tstamp adjust) 2468timers_reschedule (EV_P_ ev_tstamp adjust)
2306{ 2469{
2307 int i; 2470 int i;
2308 2471
2309 for (i = 0; i < timercnt; ++i) 2472 for (i = 0; i < timercnt; ++i)
2346 * doesn't hurt either as we only do this on time-jumps or 2509 * doesn't hurt either as we only do this on time-jumps or
2347 * in the unlikely event of having been preempted here. 2510 * in the unlikely event of having been preempted here.
2348 */ 2511 */
2349 for (i = 4; --i; ) 2512 for (i = 4; --i; )
2350 { 2513 {
2514 ev_tstamp diff;
2351 rtmn_diff = ev_rt_now - mn_now; 2515 rtmn_diff = ev_rt_now - mn_now;
2352 2516
2517 diff = odiff - rtmn_diff;
2518
2353 if (expect_true (fabs (odiff - rtmn_diff) < MIN_TIMEJUMP)) 2519 if (expect_true ((diff < 0. ? -diff : diff) < MIN_TIMEJUMP))
2354 return; /* all is well */ 2520 return; /* all is well */
2355 2521
2356 ev_rt_now = ev_time (); 2522 ev_rt_now = ev_time ();
2357 mn_now = get_clock (); 2523 mn_now = get_clock ();
2358 now_floor = mn_now; 2524 now_floor = mn_now;
2448 ev_tstamp prev_mn_now = mn_now; 2614 ev_tstamp prev_mn_now = mn_now;
2449 2615
2450 /* update time to cancel out callback processing overhead */ 2616 /* update time to cancel out callback processing overhead */
2451 time_update (EV_A_ 1e100); 2617 time_update (EV_A_ 1e100);
2452 2618
2619 /* from now on, we want a pipe-wake-up */
2620 pipe_write_wanted = 1;
2621
2622 ECB_MEMORY_FENCE; /* amke sure pipe_write_wanted is visible before we check for potential skips */
2623
2453 if (expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt))) 2624 if (expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped)))
2454 { 2625 {
2455 waittime = MAX_BLOCKTIME; 2626 waittime = MAX_BLOCKTIME;
2456 2627
2457 if (timercnt) 2628 if (timercnt)
2458 { 2629 {
2459 ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now + backend_fudge; 2630 ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now;
2460 if (waittime > to) waittime = to; 2631 if (waittime > to) waittime = to;
2461 } 2632 }
2462 2633
2463#if EV_PERIODIC_ENABLE 2634#if EV_PERIODIC_ENABLE
2464 if (periodiccnt) 2635 if (periodiccnt)
2465 { 2636 {
2466 ev_tstamp to = ANHE_at (periodics [HEAP0]) - ev_rt_now + backend_fudge; 2637 ev_tstamp to = ANHE_at (periodics [HEAP0]) - ev_rt_now;
2467 if (waittime > to) waittime = to; 2638 if (waittime > to) waittime = to;
2468 } 2639 }
2469#endif 2640#endif
2470 2641
2471 /* don't let timeouts decrease the waittime below timeout_blocktime */ 2642 /* don't let timeouts decrease the waittime below timeout_blocktime */
2472 if (expect_false (waittime < timeout_blocktime)) 2643 if (expect_false (waittime < timeout_blocktime))
2473 waittime = timeout_blocktime; 2644 waittime = timeout_blocktime;
2645
2646 /* at this point, we NEED to wait, so we have to ensure */
2647 /* to pass a minimum nonzero value to the backend */
2648 if (expect_false (waittime < backend_mintime))
2649 waittime = backend_mintime;
2474 2650
2475 /* extra check because io_blocktime is commonly 0 */ 2651 /* extra check because io_blocktime is commonly 0 */
2476 if (expect_false (io_blocktime)) 2652 if (expect_false (io_blocktime))
2477 { 2653 {
2478 sleeptime = io_blocktime - (mn_now - prev_mn_now); 2654 sleeptime = io_blocktime - (mn_now - prev_mn_now);
2479 2655
2480 if (sleeptime > waittime - backend_fudge) 2656 if (sleeptime > waittime - backend_mintime)
2481 sleeptime = waittime - backend_fudge; 2657 sleeptime = waittime - backend_mintime;
2482 2658
2483 if (expect_true (sleeptime > 0.)) 2659 if (expect_true (sleeptime > 0.))
2484 { 2660 {
2485 ev_sleep (sleeptime); 2661 ev_sleep (sleeptime);
2486 waittime -= sleeptime; 2662 waittime -= sleeptime;
2492 ++loop_count; 2668 ++loop_count;
2493#endif 2669#endif
2494 assert ((loop_done = EVBREAK_RECURSE, 1)); /* assert for side effect */ 2670 assert ((loop_done = EVBREAK_RECURSE, 1)); /* assert for side effect */
2495 backend_poll (EV_A_ waittime); 2671 backend_poll (EV_A_ waittime);
2496 assert ((loop_done = EVBREAK_CANCEL, 1)); /* assert for side effect */ 2672 assert ((loop_done = EVBREAK_CANCEL, 1)); /* assert for side effect */
2673
2674 pipe_write_wanted = 0; /* just an optimsiation, no fence needed */
2675
2676 if (pipe_write_skipped)
2677 {
2678 assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w)));
2679 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
2680 }
2681
2497 2682
2498 /* update ev_rt_now, do magic */ 2683 /* update ev_rt_now, do magic */
2499 time_update (EV_A_ waittime + sleeptime); 2684 time_update (EV_A_ waittime + sleeptime);
2500 } 2685 }
2501 2686
3078 if (!pend || pend == path) 3263 if (!pend || pend == path)
3079 break; 3264 break;
3080 3265
3081 *pend = 0; 3266 *pend = 0;
3082 w->wd = inotify_add_watch (fs_fd, path, mask); 3267 w->wd = inotify_add_watch (fs_fd, path, mask);
3083 } 3268 }
3084 while (w->wd < 0 && (errno == ENOENT || errno == EACCES)); 3269 while (w->wd < 0 && (errno == ENOENT || errno == EACCES));
3085 } 3270 }
3086 } 3271 }
3087 3272
3088 if (w->wd >= 0) 3273 if (w->wd >= 0)
3155 infy_wd (EV_A_ ev->wd, ev->wd, ev); 3340 infy_wd (EV_A_ ev->wd, ev->wd, ev);
3156 ofs += sizeof (struct inotify_event) + ev->len; 3341 ofs += sizeof (struct inotify_event) + ev->len;
3157 } 3342 }
3158} 3343}
3159 3344
3160inline_size void 3345inline_size void ecb_cold
3161ev_check_2625 (EV_P) 3346ev_check_2625 (EV_P)
3162{ 3347{
3163 /* kernels < 2.6.25 are borked 3348 /* kernels < 2.6.25 are borked
3164 * http://www.ussg.indiana.edu/hypermail/linux/kernel/0711.3/1208.html 3349 * http://www.ussg.indiana.edu/hypermail/linux/kernel/0711.3/1208.html
3165 */ 3350 */
3790} 3975}
3791 3976
3792/*****************************************************************************/ 3977/*****************************************************************************/
3793 3978
3794#if EV_WALK_ENABLE 3979#if EV_WALK_ENABLE
3795void 3980void ecb_cold
3796ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w)) 3981ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w))
3797{ 3982{
3798 int i, j; 3983 int i, j;
3799 ev_watcher_list *wl, *wn; 3984 ev_watcher_list *wl, *wn;
3800 3985

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines