ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev.c
(Generate patch)

Comparing libev/ev.c (file contents):
Revision 1.391 by root, Thu Aug 4 13:57:16 2011 UTC vs.
Revision 1.407 by root, Wed Jan 25 01:32:12 2012 UTC

183# include EV_H 183# include EV_H
184#else 184#else
185# include "ev.h" 185# include "ev.h"
186#endif 186#endif
187 187
188EV_CPP(extern "C" {)
189
190#ifndef _WIN32 188#ifndef _WIN32
191# include <sys/time.h> 189# include <sys/time.h>
192# include <sys/wait.h> 190# include <sys/wait.h>
193# include <unistd.h> 191# include <unistd.h>
194#else 192#else
469/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */ 467/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */
470/* ECB.H BEGIN */ 468/* ECB.H BEGIN */
471/* 469/*
472 * libecb - http://software.schmorp.de/pkg/libecb 470 * libecb - http://software.schmorp.de/pkg/libecb
473 * 471 *
474 * Copyright (©) 2009-2011 Marc Alexander Lehmann <libecb@schmorp.de> 472 * Copyright (©) 2009-2012 Marc Alexander Lehmann <libecb@schmorp.de>
475 * Copyright (©) 2011 Emanuele Giaquinta 473 * Copyright (©) 2011 Emanuele Giaquinta
476 * All rights reserved. 474 * All rights reserved.
477 * 475 *
478 * Redistribution and use in source and binary forms, with or without modifica- 476 * Redistribution and use in source and binary forms, with or without modifica-
479 * tion, are permitted provided that the following conditions are met: 477 * tion, are permitted provided that the following conditions are met:
537 535
538/* ECB_NO_THREADS - ecb is not used by multiple threads, ever */ 536/* ECB_NO_THREADS - ecb is not used by multiple threads, ever */
539/* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */ 537/* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */
540 538
541#if ECB_NO_THREADS || ECB_NO_SMP 539#if ECB_NO_THREADS || ECB_NO_SMP
542 #define ECB_MEMORY_FENCE do { } while (0) 540 #define ECB_MEMORY_FENCE do { } while (0)
543 #define ECB_MEMORY_FENCE_ACQUIRE do { } while (0)
544 #define ECB_MEMORY_FENCE_RELEASE do { } while (0)
545#endif 541#endif
546 542
547#ifndef ECB_MEMORY_FENCE 543#ifndef ECB_MEMORY_FENCE
548 #if ECB_GCC_VERSION(2,5) 544 #if ECB_GCC_VERSION(2,5) || defined(__INTEL_COMPILER) || defined(__clang__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
549 #if __x86 545 #if __i386 || __i386__
550 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") 546 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
551 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE /* non-lock xchg might be enough */ 547 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE /* non-lock xchg might be enough */
552 #define ECB_MEMORY_FENCE_RELEASE do { } while (0) /* unlikely to change in future cpus */ 548 #define ECB_MEMORY_FENCE_RELEASE do { } while (0) /* unlikely to change in future cpus */
553 #elif __amd64 549 #elif __amd64 || __amd64__ || __x86_64 || __x86_64__
554 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") 550 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory")
555 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("lfence" : : : "memory") 551 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("lfence" : : : "memory")
556 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("sfence") /* play safe - not needed in any current cpu */ 552 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("sfence") /* play safe - not needed in any current cpu */
553 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__
554 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory")
555 #elif defined(__ARM_ARCH_6__ ) || defined(__ARM_ARCH_6J__ ) \
556 || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6ZK__)
557 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory")
558 #elif defined(__ARM_ARCH_7__ ) || defined(__ARM_ARCH_7A__ ) \
559 || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7R__ )
560 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory")
561 #elif __sparc || __sparc__
562 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad | " : : : "memory")
563 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory")
564 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore")
557 #endif 565 #endif
558 #endif 566 #endif
559#endif 567#endif
560 568
561#ifndef ECB_MEMORY_FENCE 569#ifndef ECB_MEMORY_FENCE
562 #if ECB_GCC_VERSION(4,4) 570 #if ECB_GCC_VERSION(4,4) || defined(__INTEL_COMPILER) || defined(__clang__)
563 #define ECB_MEMORY_FENCE __sync_synchronize () 571 #define ECB_MEMORY_FENCE __sync_synchronize ()
564 #define ECB_MEMORY_FENCE_ACQUIRE ({ char dummy = 0; __sync_lock_test_and_set (&dummy, 1); }) 572 /*#define ECB_MEMORY_FENCE_ACQUIRE ({ char dummy = 0; __sync_lock_test_and_set (&dummy, 1); }) */
565 #define ECB_MEMORY_FENCE_RELEASE ({ char dummy = 1; __sync_lock_release (&dummy ); }) 573 /*#define ECB_MEMORY_FENCE_RELEASE ({ char dummy = 1; __sync_lock_release (&dummy ); }) */
566 #elif _MSC_VER >= 1400 /* VC++ 2005 */ 574 #elif _MSC_VER >= 1400 /* VC++ 2005 */
567 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier) 575 #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
568 #define ECB_MEMORY_FENCE _ReadWriteBarrier () 576 #define ECB_MEMORY_FENCE _ReadWriteBarrier ()
569 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */ 577 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */
570 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier () 578 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier ()
571 #elif defined(_WIN32) 579 #elif defined(_WIN32)
572 #include <WinNT.h> 580 #include <WinNT.h>
573 #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */ 581 #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */
574 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE 582 #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
575 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE 583 #include <mbarrier.h>
584 #define ECB_MEMORY_FENCE __machine_rw_barrier ()
585 #define ECB_MEMORY_FENCE_ACQUIRE __machine_r_barrier ()
586 #define ECB_MEMORY_FENCE_RELEASE __machine_w_barrier ()
576 #endif 587 #endif
577#endif 588#endif
578 589
579#ifndef ECB_MEMORY_FENCE 590#ifndef ECB_MEMORY_FENCE
591 #if !ECB_AVOID_PTHREADS
580 /* 592 /*
581 * if you get undefined symbol references to pthread_mutex_lock, 593 * if you get undefined symbol references to pthread_mutex_lock,
582 * or failure to find pthread.h, then you should implement 594 * or failure to find pthread.h, then you should implement
583 * the ECB_MEMORY_FENCE operations for your cpu/compiler 595 * the ECB_MEMORY_FENCE operations for your cpu/compiler
584 * OR provide pthread.h and link against the posix thread library 596 * OR provide pthread.h and link against the posix thread library
585 * of your system. 597 * of your system.
586 */ 598 */
587 #include <pthread.h> 599 #include <pthread.h>
588 #define ECB_NEEDS_PTHREADS 1 600 #define ECB_NEEDS_PTHREADS 1
589 #define ECB_MEMORY_FENCE_NEEDS_PTHREADS 1 601 #define ECB_MEMORY_FENCE_NEEDS_PTHREADS 1
590 602
591 static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER; 603 static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER;
592 #define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0) 604 #define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0)
605 #endif
606#endif
607
608#if !defined(ECB_MEMORY_FENCE_ACQUIRE) && defined(ECB_MEMORY_FENCE)
593 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE 609 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
610#endif
611
612#if !defined(ECB_MEMORY_FENCE_RELEASE) && defined(ECB_MEMORY_FENCE)
594 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE 613 #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
595#endif 614#endif
596 615
597/*****************************************************************************/ 616/*****************************************************************************/
598 617
747 766
748 return r + ecb_ld32 (x); 767 return r + ecb_ld32 (x);
749 } 768 }
750#endif 769#endif
751 770
771ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) ecb_const;
772ecb_function_ uint8_t ecb_bitrev8 (uint8_t x)
773{
774 return ( (x * 0x0802U & 0x22110U)
775 | (x * 0x8020U & 0x88440U)) * 0x10101U >> 16;
776}
777
778ecb_function_ uint16_t ecb_bitrev16 (uint16_t x) ecb_const;
779ecb_function_ uint16_t ecb_bitrev16 (uint16_t x)
780{
781 x = ((x >> 1) & 0x5555) | ((x & 0x5555) << 1);
782 x = ((x >> 2) & 0x3333) | ((x & 0x3333) << 2);
783 x = ((x >> 4) & 0x0f0f) | ((x & 0x0f0f) << 4);
784 x = ( x >> 8 ) | ( x << 8);
785
786 return x;
787}
788
789ecb_function_ uint32_t ecb_bitrev32 (uint32_t x) ecb_const;
790ecb_function_ uint32_t ecb_bitrev32 (uint32_t x)
791{
792 x = ((x >> 1) & 0x55555555) | ((x & 0x55555555) << 1);
793 x = ((x >> 2) & 0x33333333) | ((x & 0x33333333) << 2);
794 x = ((x >> 4) & 0x0f0f0f0f) | ((x & 0x0f0f0f0f) << 4);
795 x = ((x >> 8) & 0x00ff00ff) | ((x & 0x00ff00ff) << 8);
796 x = ( x >> 16 ) | ( x << 16);
797
798 return x;
799}
800
752/* popcount64 is only available on 64 bit cpus as gcc builtin */ 801/* popcount64 is only available on 64 bit cpus as gcc builtin */
753/* so for this version we are lazy */ 802/* so for this version we are lazy */
754ecb_function_ int ecb_popcount64 (uint64_t x) ecb_const; 803ecb_function_ int ecb_popcount64 (uint64_t x) ecb_const;
755ecb_function_ int 804ecb_function_ int
756ecb_popcount64 (uint64_t x) 805ecb_popcount64 (uint64_t x)
831 #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0)) 880 #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0))
832#else 881#else
833 #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n))) 882 #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n)))
834#endif 883#endif
835 884
885#if __cplusplus
886 template<typename T>
887 static inline T ecb_div_rd (T val, T div)
888 {
889 return val < 0 ? - ((-val + div - 1) / div) : (val ) / div;
890 }
891 template<typename T>
892 static inline T ecb_div_ru (T val, T div)
893 {
894 return val < 0 ? - ((-val ) / div) : (val + div - 1) / div;
895 }
896#else
897 #define ecb_div_rd(val,div) ((val) < 0 ? - ((-(val) + (div) - 1) / (div)) : ((val) ) / (div))
898 #define ecb_div_ru(val,div) ((val) < 0 ? - ((-(val) ) / (div)) : ((val) + (div) - 1) / (div))
899#endif
900
836#if ecb_cplusplus_does_not_suck 901#if ecb_cplusplus_does_not_suck
837 /* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */ 902 /* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */
838 template<typename T, int N> 903 template<typename T, int N>
839 static inline int ecb_array_length (const T (&arr)[N]) 904 static inline int ecb_array_length (const T (&arr)[N])
840 { 905 {
845#endif 910#endif
846 911
847#endif 912#endif
848 913
849/* ECB.H END */ 914/* ECB.H END */
915
916#if ECB_MEMORY_FENCE_NEEDS_PTHREADS
917/* if your architecture doesn't need memory fences, e.g. because it is
918 * single-cpu/core, or if you use libev in a project that doesn't use libev
919 * from multiple threads, then you can define ECB_AVOID_PTHREADS when compiling
920 * libev, in which cases the memory fences become nops.
921 * alternatively, you can remove this #error and link against libpthread,
922 * which will then provide the memory fences.
923 */
924# error "memory fences not defined for your architecture, please report"
925#endif
926
927#ifndef ECB_MEMORY_FENCE
928# define ECB_MEMORY_FENCE do { } while (0)
929# define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
930# define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
931#endif
850 932
851#define expect_false(cond) ecb_expect_false (cond) 933#define expect_false(cond) ecb_expect_false (cond)
852#define expect_true(cond) ecb_expect_true (cond) 934#define expect_true(cond) ecb_expect_true (cond)
853#define noinline ecb_noinline 935#define noinline ecb_noinline
854 936
1150 #undef VAR 1232 #undef VAR
1151 }; 1233 };
1152 #include "ev_wrap.h" 1234 #include "ev_wrap.h"
1153 1235
1154 static struct ev_loop default_loop_struct; 1236 static struct ev_loop default_loop_struct;
1155 struct ev_loop *ev_default_loop_ptr; 1237 EV_API_DECL struct ev_loop *ev_default_loop_ptr = 0; /* needs to be initialised to make it a definition despite extern */
1156 1238
1157#else 1239#else
1158 1240
1159 ev_tstamp ev_rt_now; 1241 EV_API_DECL ev_tstamp ev_rt_now = 0; /* needs to be initialised to make it a definition despite extern */
1160 #define VAR(name,decl) static decl; 1242 #define VAR(name,decl) static decl;
1161 #include "ev_vars.h" 1243 #include "ev_vars.h"
1162 #undef VAR 1244 #undef VAR
1163 1245
1164 static int ev_default_loop_ptr; 1246 static int ev_default_loop_ptr;
1258 1340
1259 do 1341 do
1260 ncur <<= 1; 1342 ncur <<= 1;
1261 while (cnt > ncur); 1343 while (cnt > ncur);
1262 1344
1263 /* if size is large, round to MALLOC_ROUND - 4 * longs to accomodate malloc overhead */ 1345 /* if size is large, round to MALLOC_ROUND - 4 * longs to accommodate malloc overhead */
1264 if (elem * ncur > MALLOC_ROUND - sizeof (void *) * 4) 1346 if (elem * ncur > MALLOC_ROUND - sizeof (void *) * 4)
1265 { 1347 {
1266 ncur *= elem; 1348 ncur *= elem;
1267 ncur = (ncur + elem + (MALLOC_ROUND - 1) + sizeof (void *) * 4) & ~(MALLOC_ROUND - 1); 1349 ncur = (ncur + elem + (MALLOC_ROUND - 1) + sizeof (void *) * 4) & ~(MALLOC_ROUND - 1);
1268 ncur = ncur - sizeof (void *) * 4; 1350 ncur = ncur - sizeof (void *) * 4;
1754 1836
1755 if (pipe_write_wanted) 1837 if (pipe_write_wanted)
1756 { 1838 {
1757 int old_errno; 1839 int old_errno;
1758 1840
1759 pipe_write_skipped = 0; /* just an optimsiation, no fence needed */ 1841 pipe_write_skipped = 0; /* just an optimisation, no fence needed */
1760 1842
1761 old_errno = errno; /* save errno because write will clobber it */ 1843 old_errno = errno; /* save errno because write will clobber it */
1762 1844
1763#if EV_USE_EVENTFD 1845#if EV_USE_EVENTFD
1764 if (evfd >= 0) 1846 if (evfd >= 0)
2948#endif 3030#endif
2949 assert ((loop_done = EVBREAK_RECURSE, 1)); /* assert for side effect */ 3031 assert ((loop_done = EVBREAK_RECURSE, 1)); /* assert for side effect */
2950 backend_poll (EV_A_ waittime); 3032 backend_poll (EV_A_ waittime);
2951 assert ((loop_done = EVBREAK_CANCEL, 1)); /* assert for side effect */ 3033 assert ((loop_done = EVBREAK_CANCEL, 1)); /* assert for side effect */
2952 3034
2953 pipe_write_wanted = 0; /* just an optimsiation, no fence needed */ 3035 pipe_write_wanted = 0; /* just an optimisation, no fence needed */
2954 3036
2955 if (pipe_write_skipped) 3037 if (pipe_write_skipped)
2956 { 3038 {
2957 assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w))); 3039 assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w)));
2958 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM); 3040 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
3216 3298
3217void noinline 3299void noinline
3218ev_timer_again (EV_P_ ev_timer *w) 3300ev_timer_again (EV_P_ ev_timer *w)
3219{ 3301{
3220 EV_FREQUENT_CHECK; 3302 EV_FREQUENT_CHECK;
3303
3304 clear_pending (EV_A_ (W)w);
3221 3305
3222 if (ev_is_active (w)) 3306 if (ev_is_active (w))
3223 { 3307 {
3224 if (w->repeat) 3308 if (w->repeat)
3225 { 3309 {
4371 4455
4372#if EV_MULTIPLICITY 4456#if EV_MULTIPLICITY
4373 #include "ev_wrap.h" 4457 #include "ev_wrap.h"
4374#endif 4458#endif
4375 4459
4376EV_CPP(})
4377

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines