… | |
… | |
497 | #ifndef ECB_MEMORY_FENCE |
497 | #ifndef ECB_MEMORY_FENCE |
498 | #if ECB_GCC_VERSION(2,5) |
498 | #if ECB_GCC_VERSION(2,5) |
499 | #if __x86 |
499 | #if __x86 |
500 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") |
500 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") |
501 | #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE |
501 | #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE |
502 | #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE /* better be safe than sorry */ |
502 | #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE |
503 | #elif __amd64 |
503 | #elif __amd64 |
504 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") |
504 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") |
505 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("lfence" : : : "memory") |
505 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("lfence" : : : "memory") |
506 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("sfence") |
506 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("sfence") |
507 | #endif |
507 | #endif |
… | |
… | |
511 | #ifndef ECB_MEMORY_FENCE |
511 | #ifndef ECB_MEMORY_FENCE |
512 | #if ECB_GCC_VERSION(4,4) |
512 | #if ECB_GCC_VERSION(4,4) |
513 | #define ECB_MEMORY_FENCE __sync_synchronize () |
513 | #define ECB_MEMORY_FENCE __sync_synchronize () |
514 | #define ECB_MEMORY_FENCE_ACQUIRE ({ char dummy = 0; __sync_lock_test_and_set (&dummy, 1); }) |
514 | #define ECB_MEMORY_FENCE_ACQUIRE ({ char dummy = 0; __sync_lock_test_and_set (&dummy, 1); }) |
515 | #define ECB_MEMORY_FENCE_RELEASE ({ char dummy = 1; __sync_lock_release (&dummy ); }) |
515 | #define ECB_MEMORY_FENCE_RELEASE ({ char dummy = 1; __sync_lock_release (&dummy ); }) |
516 | #elif _MSC_VER >= 1400 |
|
|
517 | #define ECB_MEMORY_FENCE do { } while (0) |
|
|
518 | #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE |
|
|
519 | #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE |
|
|
520 | #elif defined(_WIN32) && defined(MemoryBarrier) |
516 | #elif defined(_WIN32) && defined(MemoryBarrier) |
521 | #define ECB_MEMORY_FENCE MemoryBarrier () |
517 | #define ECB_MEMORY_FENCE MemoryBarrier () |
522 | #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE |
518 | #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE |
523 | #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE |
519 | #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE |
524 | #endif |
520 | #endif |
… | |
… | |
1464 | if (expect_true (*flag)) |
1460 | if (expect_true (*flag)) |
1465 | return; |
1461 | return; |
1466 | |
1462 | |
1467 | *flag = 1; |
1463 | *flag = 1; |
1468 | |
1464 | |
1469 | ECB_MEMORY_FENCE_RELEASE; |
1465 | ECB_MEMORY_FENCE_RELEASE; /* make sure flag is visible before the wakeup */ |
1470 | |
1466 | |
1471 | pipe_write_skipped = 1; |
1467 | pipe_write_skipped = 1; |
1472 | |
1468 | |
1473 | ECB_MEMORY_FENCE; |
1469 | ECB_MEMORY_FENCE; /* make sure pipe_write_skipped is visible before we check pipe_write_wanted */ |
1474 | |
1470 | |
1475 | if (pipe_write_wanted) |
1471 | if (pipe_write_wanted) |
1476 | { |
1472 | { |
1477 | int old_errno; |
1473 | int old_errno; |
1478 | |
1474 | |
1479 | pipe_write_skipped = 0; /* optimisation only */ |
1475 | pipe_write_skipped = 0; /* just an optimsiation, no fence needed */ |
1480 | |
1476 | |
1481 | old_errno = errno; /* save errno because write will clobber it */ |
1477 | old_errno = errno; /* save errno because write will clobber it */ |
1482 | |
1478 | |
1483 | #if EV_USE_EVENTFD |
1479 | #if EV_USE_EVENTFD |
1484 | if (evfd >= 0) |
1480 | if (evfd >= 0) |
… | |
… | |
2616 | time_update (EV_A_ 1e100); |
2612 | time_update (EV_A_ 1e100); |
2617 | |
2613 | |
2618 | /* from now on, we want a pipe-wake-up */ |
2614 | /* from now on, we want a pipe-wake-up */ |
2619 | pipe_write_wanted = 1; |
2615 | pipe_write_wanted = 1; |
2620 | |
2616 | |
2621 | ECB_MEMORY_FENCE; |
2617 | ECB_MEMORY_FENCE; /* amke sure pipe_write_wanted is visible before we check for potential skips */ |
2622 | |
2618 | |
2623 | if (expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped))) |
2619 | if (expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped))) |
2624 | { |
2620 | { |
2625 | waittime = MAX_BLOCKTIME; |
2621 | waittime = MAX_BLOCKTIME; |
2626 | |
2622 | |
… | |
… | |
2668 | #endif |
2664 | #endif |
2669 | assert ((loop_done = EVBREAK_RECURSE, 1)); /* assert for side effect */ |
2665 | assert ((loop_done = EVBREAK_RECURSE, 1)); /* assert for side effect */ |
2670 | backend_poll (EV_A_ waittime); |
2666 | backend_poll (EV_A_ waittime); |
2671 | assert ((loop_done = EVBREAK_CANCEL, 1)); /* assert for side effect */ |
2667 | assert ((loop_done = EVBREAK_CANCEL, 1)); /* assert for side effect */ |
2672 | |
2668 | |
2673 | pipe_write_wanted = 0; |
2669 | pipe_write_wanted = 0; /* just an optimsiation, no fence needed */ |
2674 | |
2670 | |
2675 | if (pipe_write_skipped) |
2671 | if (pipe_write_skipped) |
2676 | { |
2672 | { |
2677 | assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w))); |
2673 | assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w))); |
2678 | ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM); |
2674 | ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM); |