… | |
… | |
492 | #define ecb_inline static inline |
492 | #define ecb_inline static inline |
493 | #else |
493 | #else |
494 | #define ecb_inline static |
494 | #define ecb_inline static |
495 | #endif |
495 | #endif |
496 | |
496 | |
|
|
497 | #ifndef ECB_MEMORY_FENCE |
|
|
498 | #if ECB_GCC_VERSION(2,5) |
|
|
499 | #if __x86 |
|
|
500 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") |
|
|
501 | #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE |
|
|
502 | #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE |
|
|
503 | #elif __amd64 |
|
|
504 | #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") |
|
|
505 | #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("lfence" : : : "memory") |
|
|
506 | #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("sfence") |
|
|
507 | #endif |
|
|
508 | #endif |
|
|
509 | #endif |
|
|
510 | |
|
|
511 | #ifndef ECB_MEMORY_FENCE |
|
|
512 | #if ECB_GCC_VERSION(4,4) |
|
|
513 | #define ECB_MEMORY_FENCE __sync_synchronize () |
|
|
514 | #define ECB_MEMORY_FENCE_ACQUIRE ({ char dummy = 0; __sync_lock_test_and_set (&dummy, 1); }) |
|
|
515 | #define ECB_MEMORY_FENCE_RELEASE ({ char dummy = 1; __sync_lock_release (&dummy ); }) |
|
|
516 | #elif defined(_WIN32) && defined(MemoryBarrier) |
|
|
517 | #define ECB_MEMORY_FENCE MemoryBarrier () |
|
|
518 | #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE |
|
|
519 | #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE |
|
|
520 | #endif |
|
|
521 | #endif |
|
|
522 | |
|
|
523 | #ifndef ECB_MEMORY_FENCE |
|
|
524 | #include <pthread.h> |
|
|
525 | |
|
|
526 | static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER; |
|
|
527 | #define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0) |
|
|
528 | #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE |
|
|
529 | #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE |
|
|
530 | #endif |
|
|
531 | |
497 | #if ECB_GCC_VERSION(3,1) |
532 | #if ECB_GCC_VERSION(3,1) |
498 | #define ecb_attribute(attrlist) __attribute__(attrlist) |
533 | #define ecb_attribute(attrlist) __attribute__(attrlist) |
499 | #define ecb_is_constant(expr) __builtin_constant_p (expr) |
534 | #define ecb_is_constant(expr) __builtin_constant_p (expr) |
500 | #define ecb_expect(expr,value) __builtin_expect ((expr),(value)) |
535 | #define ecb_expect(expr,value) __builtin_expect ((expr),(value)) |
501 | #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality) |
536 | #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality) |
… | |
… | |
1420 | } |
1455 | } |
1421 | |
1456 | |
1422 | inline_speed void |
1457 | inline_speed void |
1423 | evpipe_write (EV_P_ EV_ATOMIC_T *flag) |
1458 | evpipe_write (EV_P_ EV_ATOMIC_T *flag) |
1424 | { |
1459 | { |
1425 | if (!*flag) |
1460 | if (expect_true (*flag)) |
1426 | { |
1461 | /*return*//*D*/; |
|
|
1462 | |
1427 | *flag = 1; |
1463 | *flag = 1; |
1428 | |
1464 | |
|
|
1465 | ECB_MEMORY_FENCE_RELEASE; /* make sure flag is visible before the wakeup */ |
|
|
1466 | |
1429 | pipe_write_skipped = 1; |
1467 | pipe_write_skipped = 1; |
1430 | |
1468 | |
|
|
1469 | ECB_MEMORY_FENCE; /* make sure pipe_write_skipped is visible before we check pipe_write_wanted */ |
|
|
1470 | |
1431 | if (pipe_write_wanted) |
1471 | if (pipe_write_wanted) |
|
|
1472 | { |
|
|
1473 | int old_errno; |
|
|
1474 | |
|
|
1475 | pipe_write_skipped = 0; /* just an optimsiation, no fence needed */ |
|
|
1476 | |
|
|
1477 | old_errno = errno; /* save errno because write will clobber it */ |
|
|
1478 | |
|
|
1479 | #if EV_USE_EVENTFD |
|
|
1480 | if (evfd >= 0) |
1432 | { |
1481 | { |
1433 | int old_errno; |
|
|
1434 | |
|
|
1435 | pipe_write_skipped = 0; |
|
|
1436 | |
|
|
1437 | old_errno = errno; /* save errno because write will clobber it */ |
|
|
1438 | |
|
|
1439 | #if EV_USE_EVENTFD |
|
|
1440 | if (evfd >= 0) |
|
|
1441 | { |
|
|
1442 | uint64_t counter = 1; |
1482 | uint64_t counter = 1; |
1443 | write (evfd, &counter, sizeof (uint64_t)); |
1483 | write (evfd, &counter, sizeof (uint64_t)); |
1444 | } |
|
|
1445 | else |
|
|
1446 | #endif |
|
|
1447 | { |
|
|
1448 | /* win32 people keep sending patches that change this write() to send() */ |
|
|
1449 | /* and then run away. but send() is wrong, it wants a socket handle on win32 */ |
|
|
1450 | /* so when you think this write should be a send instead, please find out */ |
|
|
1451 | /* where your send() is from - it's definitely not the microsoft send, and */ |
|
|
1452 | /* tell me. thank you. */ |
|
|
1453 | write (evpipe [1], &(evpipe [1]), 1); |
|
|
1454 | } |
|
|
1455 | |
|
|
1456 | errno = old_errno; |
|
|
1457 | } |
1484 | } |
|
|
1485 | else |
|
|
1486 | #endif |
|
|
1487 | { |
|
|
1488 | /* win32 people keep sending patches that change this write() to send() */ |
|
|
1489 | /* and then run away. but send() is wrong, it wants a socket handle on win32 */ |
|
|
1490 | /* so when you think this write should be a send instead, please find out */ |
|
|
1491 | /* where your send() is from - it's definitely not the microsoft send, and */ |
|
|
1492 | /* tell me. thank you. */ |
|
|
1493 | write (evpipe [1], &(evpipe [1]), 1); |
|
|
1494 | } |
|
|
1495 | |
|
|
1496 | errno = old_errno; |
1458 | } |
1497 | } |
1459 | } |
1498 | } |
1460 | |
1499 | |
1461 | /* called whenever the libev signal pipe */ |
1500 | /* called whenever the libev signal pipe */ |
1462 | /* got some events (signal, async) */ |
1501 | /* got some events (signal, async) */ |
… | |
… | |
2573 | time_update (EV_A_ 1e100); |
2612 | time_update (EV_A_ 1e100); |
2574 | |
2613 | |
2575 | /* from now on, we want a pipe-wake-up */ |
2614 | /* from now on, we want a pipe-wake-up */ |
2576 | pipe_write_wanted = 1; |
2615 | pipe_write_wanted = 1; |
2577 | |
2616 | |
|
|
2617 | ECB_MEMORY_FENCE; /* amke sure pipe_write_wanted is visible before we check for potential skips */ |
|
|
2618 | |
2578 | if (expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped))) |
2619 | if (expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped))) |
2579 | { |
2620 | { |
2580 | waittime = MAX_BLOCKTIME; |
2621 | waittime = MAX_BLOCKTIME; |
2581 | |
2622 | |
2582 | if (timercnt) |
2623 | if (timercnt) |
… | |
… | |
2623 | #endif |
2664 | #endif |
2624 | assert ((loop_done = EVBREAK_RECURSE, 1)); /* assert for side effect */ |
2665 | assert ((loop_done = EVBREAK_RECURSE, 1)); /* assert for side effect */ |
2625 | backend_poll (EV_A_ waittime); |
2666 | backend_poll (EV_A_ waittime); |
2626 | assert ((loop_done = EVBREAK_CANCEL, 1)); /* assert for side effect */ |
2667 | assert ((loop_done = EVBREAK_CANCEL, 1)); /* assert for side effect */ |
2627 | |
2668 | |
2628 | pipe_write_wanted = 0; |
2669 | pipe_write_wanted = 0; /* just an optimsiation, no fence needed */ |
2629 | |
2670 | |
2630 | if (pipe_write_skipped) |
2671 | if (pipe_write_skipped) |
2631 | { |
2672 | { |
2632 | assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w))); |
2673 | assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w))); |
2633 | ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM); |
2674 | ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM); |