ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libev/ev.c
(Generate patch)

Comparing libev/ev.c (file contents):
Revision 1.401 by root, Tue Dec 20 04:08:35 2011 UTC vs.
Revision 1.409 by root, Sat Feb 4 15:17:34 2012 UTC

467/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */ 467/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */
468/* ECB.H BEGIN */ 468/* ECB.H BEGIN */
469/* 469/*
470 * libecb - http://software.schmorp.de/pkg/libecb 470 * libecb - http://software.schmorp.de/pkg/libecb
471 * 471 *
472 * Copyright (©) 2009-2011 Marc Alexander Lehmann <libecb@schmorp.de> 472 * Copyright (©) 2009-2012 Marc Alexander Lehmann <libecb@schmorp.de>
473 * Copyright (©) 2011 Emanuele Giaquinta 473 * Copyright (©) 2011 Emanuele Giaquinta
474 * All rights reserved. 474 * All rights reserved.
475 * 475 *
476 * Redistribution and use in source and binary forms, with or without modifica- 476 * Redistribution and use in source and binary forms, with or without modifica-
477 * tion, are permitted provided that the following conditions are met: 477 * tion, are permitted provided that the following conditions are met:
539#if ECB_NO_THREADS || ECB_NO_SMP 539#if ECB_NO_THREADS || ECB_NO_SMP
540 #define ECB_MEMORY_FENCE do { } while (0) 540 #define ECB_MEMORY_FENCE do { } while (0)
541#endif 541#endif
542 542
543#ifndef ECB_MEMORY_FENCE 543#ifndef ECB_MEMORY_FENCE
544 #if ECB_GCC_VERSION(2,5) || defined(__INTEL_COMPILER) || defined(__clang__) 544 #if ECB_GCC_VERSION(2,5) || defined(__INTEL_COMPILER) || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
545 #if __i386__ 545 #if __i386 || __i386__
546 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") 546 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
547 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE /* non-lock xchg might be enough */ 547 #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE /* non-lock xchg might be enough */
548 #define ECB_MEMORY_FENCE_RELEASE do { } while (0) /* unlikely to change in future cpus */ 548 #define ECB_MEMORY_FENCE_RELEASE do { } while (0) /* unlikely to change in future cpus */
549 #elif __amd64 549 #elif __amd64 || __amd64__ || __x86_64 || __x86_64__
550 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") 550 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory")
551 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("lfence" : : : "memory") 551 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("lfence" : : : "memory")
552 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("sfence") /* play safe - not needed in any current cpu */ 552 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("sfence") /* play safe - not needed in any current cpu */
553 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ 553 #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__
554 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory") 554 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory")
556 || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6ZK__) 556 || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6ZK__)
557 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory") 557 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory")
558 #elif defined(__ARM_ARCH_7__ ) || defined(__ARM_ARCH_7A__ ) \ 558 #elif defined(__ARM_ARCH_7__ ) || defined(__ARM_ARCH_7A__ ) \
559 || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7R__ ) 559 || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7R__ )
560 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") 560 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory")
561 #elif __sparc || __sparc__
562 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad | " : : : "memory")
563 #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory")
564 #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore")
565 #elif defined(__s390__) || defined(__s390x__)
566 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory")
561 #endif 567 #endif
562 #endif 568 #endif
563#endif 569#endif
564 570
565#ifndef ECB_MEMORY_FENCE 571#ifndef ECB_MEMORY_FENCE
573 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */ 579 #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */
574 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier () 580 #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier ()
575 #elif defined(_WIN32) 581 #elif defined(_WIN32)
576 #include <WinNT.h> 582 #include <WinNT.h>
577 #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */ 583 #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */
584 #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
585 #include <mbarrier.h>
586 #define ECB_MEMORY_FENCE __machine_rw_barrier ()
587 #define ECB_MEMORY_FENCE_ACQUIRE __machine_r_barrier ()
588 #define ECB_MEMORY_FENCE_RELEASE __machine_w_barrier ()
578 #endif 589 #endif
579#endif 590#endif
580 591
581#ifndef ECB_MEMORY_FENCE 592#ifndef ECB_MEMORY_FENCE
582 #if !ECB_AVOID_PTHREADS 593 #if !ECB_AVOID_PTHREADS
757 768
758 return r + ecb_ld32 (x); 769 return r + ecb_ld32 (x);
759 } 770 }
760#endif 771#endif
761 772
773ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) ecb_const;
774ecb_function_ uint8_t ecb_bitrev8 (uint8_t x)
775{
776 return ( (x * 0x0802U & 0x22110U)
777 | (x * 0x8020U & 0x88440U)) * 0x10101U >> 16;
778}
779
780ecb_function_ uint16_t ecb_bitrev16 (uint16_t x) ecb_const;
781ecb_function_ uint16_t ecb_bitrev16 (uint16_t x)
782{
783 x = ((x >> 1) & 0x5555) | ((x & 0x5555) << 1);
784 x = ((x >> 2) & 0x3333) | ((x & 0x3333) << 2);
785 x = ((x >> 4) & 0x0f0f) | ((x & 0x0f0f) << 4);
786 x = ( x >> 8 ) | ( x << 8);
787
788 return x;
789}
790
791ecb_function_ uint32_t ecb_bitrev32 (uint32_t x) ecb_const;
792ecb_function_ uint32_t ecb_bitrev32 (uint32_t x)
793{
794 x = ((x >> 1) & 0x55555555) | ((x & 0x55555555) << 1);
795 x = ((x >> 2) & 0x33333333) | ((x & 0x33333333) << 2);
796 x = ((x >> 4) & 0x0f0f0f0f) | ((x & 0x0f0f0f0f) << 4);
797 x = ((x >> 8) & 0x00ff00ff) | ((x & 0x00ff00ff) << 8);
798 x = ( x >> 16 ) | ( x << 16);
799
800 return x;
801}
802
762/* popcount64 is only available on 64 bit cpus as gcc builtin */ 803/* popcount64 is only available on 64 bit cpus as gcc builtin */
763/* so for this version we are lazy */ 804/* so for this version we are lazy */
764ecb_function_ int ecb_popcount64 (uint64_t x) ecb_const; 805ecb_function_ int ecb_popcount64 (uint64_t x) ecb_const;
765ecb_function_ int 806ecb_function_ int
766ecb_popcount64 (uint64_t x) 807ecb_popcount64 (uint64_t x)
815 856
816#if ECB_GCC_VERSION(4,5) 857#if ECB_GCC_VERSION(4,5)
817 #define ecb_unreachable() __builtin_unreachable () 858 #define ecb_unreachable() __builtin_unreachable ()
818#else 859#else
819 /* this seems to work fine, but gcc always emits a warning for it :/ */ 860 /* this seems to work fine, but gcc always emits a warning for it :/ */
820 ecb_function_ void ecb_unreachable (void) ecb_noreturn; 861 ecb_inline void ecb_unreachable (void) ecb_noreturn;
821 ecb_function_ void ecb_unreachable (void) { } 862 ecb_inline void ecb_unreachable (void) { }
822#endif 863#endif
823 864
824/* try to tell the compiler that some condition is definitely true */ 865/* try to tell the compiler that some condition is definitely true */
825#define ecb_assume(cond) do { if (!(cond)) ecb_unreachable (); } while (0) 866#define ecb_assume(cond) do { if (!(cond)) ecb_unreachable (); } while (0)
826 867
827ecb_function_ unsigned char ecb_byteorder_helper (void) ecb_const; 868ecb_inline unsigned char ecb_byteorder_helper (void) ecb_const;
828ecb_function_ unsigned char 869ecb_inline unsigned char
829ecb_byteorder_helper (void) 870ecb_byteorder_helper (void)
830{ 871{
831 const uint32_t u = 0x11223344; 872 const uint32_t u = 0x11223344;
832 return *(unsigned char *)&u; 873 return *(unsigned char *)&u;
833} 874}
834 875
835ecb_function_ ecb_bool ecb_big_endian (void) ecb_const; 876ecb_inline ecb_bool ecb_big_endian (void) ecb_const;
836ecb_function_ ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; } 877ecb_inline ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; }
837ecb_function_ ecb_bool ecb_little_endian (void) ecb_const; 878ecb_inline ecb_bool ecb_little_endian (void) ecb_const;
838ecb_function_ ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44; } 879ecb_inline ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44; }
839 880
840#if ECB_GCC_VERSION(3,0) || ECB_C99 881#if ECB_GCC_VERSION(3,0) || ECB_C99
841 #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0)) 882 #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0))
842#else 883#else
843 #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n))) 884 #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n)))
876 917
877#if ECB_MEMORY_FENCE_NEEDS_PTHREADS 918#if ECB_MEMORY_FENCE_NEEDS_PTHREADS
878/* if your architecture doesn't need memory fences, e.g. because it is 919/* if your architecture doesn't need memory fences, e.g. because it is
879 * single-cpu/core, or if you use libev in a project that doesn't use libev 920 * single-cpu/core, or if you use libev in a project that doesn't use libev
880 * from multiple threads, then you can define ECB_AVOID_PTHREADS when compiling 921 * from multiple threads, then you can define ECB_AVOID_PTHREADS when compiling
881 * libev, in which casess the memory fences become nops. 922 * libev, in which cases the memory fences become nops.
882 * alternatively, you can remove this #error and link against libpthread, 923 * alternatively, you can remove this #error and link against libpthread,
883 * which will then provide the memory fences. 924 * which will then provide the memory fences.
884 */ 925 */
885# error "memory fences not defined for your architecture, please report" 926# error "memory fences not defined for your architecture, please report"
886#endif 927#endif
1193 #undef VAR 1234 #undef VAR
1194 }; 1235 };
1195 #include "ev_wrap.h" 1236 #include "ev_wrap.h"
1196 1237
1197 static struct ev_loop default_loop_struct; 1238 static struct ev_loop default_loop_struct;
1198 EV_API_DECL struct ev_loop *ev_default_loop_ptr = 0; /* needs to be initialised to make it a defintiino despite extern */ 1239 EV_API_DECL struct ev_loop *ev_default_loop_ptr = 0; /* needs to be initialised to make it a definition despite extern */
1199 1240
1200#else 1241#else
1201 1242
1202 EV_API_DECL ev_tstamp ev_rt_now = 0; /* needs to be initialised to make it a defintiino despite extern */ 1243 EV_API_DECL ev_tstamp ev_rt_now = 0; /* needs to be initialised to make it a definition despite extern */
1203 #define VAR(name,decl) static decl; 1244 #define VAR(name,decl) static decl;
1204 #include "ev_vars.h" 1245 #include "ev_vars.h"
1205 #undef VAR 1246 #undef VAR
1206 1247
1207 static int ev_default_loop_ptr; 1248 static int ev_default_loop_ptr;
2991#endif 3032#endif
2992 assert ((loop_done = EVBREAK_RECURSE, 1)); /* assert for side effect */ 3033 assert ((loop_done = EVBREAK_RECURSE, 1)); /* assert for side effect */
2993 backend_poll (EV_A_ waittime); 3034 backend_poll (EV_A_ waittime);
2994 assert ((loop_done = EVBREAK_CANCEL, 1)); /* assert for side effect */ 3035 assert ((loop_done = EVBREAK_CANCEL, 1)); /* assert for side effect */
2995 3036
2996 pipe_write_wanted = 0; /* just an optimsiation, no fence needed */ 3037 pipe_write_wanted = 0; /* just an optimisation, no fence needed */
2997 3038
2998 if (pipe_write_skipped) 3039 if (pipe_write_skipped)
2999 { 3040 {
3000 assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w))); 3041 assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w)));
3001 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM); 3042 ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
3259 3300
3260void noinline 3301void noinline
3261ev_timer_again (EV_P_ ev_timer *w) 3302ev_timer_again (EV_P_ ev_timer *w)
3262{ 3303{
3263 EV_FREQUENT_CHECK; 3304 EV_FREQUENT_CHECK;
3305
3306 clear_pending (EV_A_ (W)w);
3264 3307
3265 if (ev_is_active (w)) 3308 if (ev_is_active (w))
3266 { 3309 {
3267 if (w->repeat) 3310 if (w->repeat)
3268 { 3311 {

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines