… | |
… | |
366 | #define ETP_PRI_MAX EIO_PRI_MAX |
366 | #define ETP_PRI_MAX EIO_PRI_MAX |
367 | |
367 | |
368 | #define ETP_TYPE_QUIT -1 |
368 | #define ETP_TYPE_QUIT -1 |
369 | #define ETP_TYPE_GROUP EIO_GROUP |
369 | #define ETP_TYPE_GROUP EIO_GROUP |
370 | |
370 | |
|
|
371 | static void eio_nop_callback (void) { } |
|
|
372 | static void (*eio_want_poll_cb)(void) = eio_nop_callback; |
|
|
373 | static void (*eio_done_poll_cb)(void) = eio_nop_callback; |
|
|
374 | |
|
|
375 | #define ETP_WANT_POLL(pool) eio_want_poll_cb () |
|
|
376 | #define ETP_DONE_POLL(pool) eio_done_poll_cb () |
|
|
377 | |
371 | struct etp_worker; |
378 | struct etp_worker; |
372 | |
|
|
373 | #define ETP_REQ eio_req |
379 | #define ETP_REQ eio_req |
374 | #define ETP_DESTROY(req) eio_destroy (req) |
380 | #define ETP_DESTROY(req) eio_destroy (req) |
375 | static int eio_finish (eio_req *req); |
381 | static int eio_finish (eio_req *req); |
376 | #define ETP_FINISH(req) eio_finish (req) |
382 | #define ETP_FINISH(req) eio_finish (req) |
377 | static void eio_execute (struct etp_worker *self, eio_req *req); |
383 | static void eio_execute (struct etp_worker *self, eio_req *req); |
378 | #define ETP_EXECUTE(wrk,req) eio_execute (wrk, req) |
384 | #define ETP_EXECUTE(wrk,req) eio_execute (wrk, req) |
379 | |
385 | |
380 | #include "etp.c" |
386 | #include "etp.c" |
381 | |
387 | |
|
|
388 | static struct etp_pool eio_pool; |
|
|
389 | #define EIO_POOL (&eio_pool) |
|
|
390 | |
382 | /*****************************************************************************/ |
391 | /*****************************************************************************/ |
383 | |
392 | |
384 | static void |
393 | static void |
385 | grp_try_feed (eio_req *grp) |
394 | grp_try_feed (eio_req *grp) |
386 | { |
395 | { |
387 | while (grp->size < grp->int2 && !EIO_CANCELLED (grp)) |
396 | while (grp->size < grp->int2 && !EIO_CANCELLED (grp)) |
388 | { |
397 | { |
389 | grp->flags &= ~EIO_FLAG_GROUPADD; |
398 | grp->flags &= ~ETP_FLAG_GROUPADD; |
390 | |
399 | |
391 | EIO_FEED (grp); |
400 | EIO_FEED (grp); |
392 | |
401 | |
393 | /* stop if no progress has been made */ |
402 | /* stop if no progress has been made */ |
394 | if (!(grp->flags & EIO_FLAG_GROUPADD)) |
403 | if (!(grp->flags & ETP_FLAG_GROUPADD)) |
395 | { |
404 | { |
396 | grp->feed = 0; |
405 | grp->feed = 0; |
397 | break; |
406 | break; |
398 | } |
407 | } |
399 | } |
408 | } |
… | |
… | |
406 | |
415 | |
407 | /* call feeder, if applicable */ |
416 | /* call feeder, if applicable */ |
408 | grp_try_feed (grp); |
417 | grp_try_feed (grp); |
409 | |
418 | |
410 | /* finish, if done */ |
419 | /* finish, if done */ |
411 | if (!grp->size && grp->int1) |
420 | if (!grp->size && grp->flags & ETP_FLAG_DELAYED) |
412 | return eio_finish (grp); |
421 | return eio_finish (grp); |
413 | else |
422 | else |
414 | return 0; |
423 | return 0; |
415 | } |
424 | } |
416 | |
425 | |
… | |
… | |
452 | } |
461 | } |
453 | |
462 | |
454 | void |
463 | void |
455 | eio_grp_cancel (eio_req *grp) |
464 | eio_grp_cancel (eio_req *grp) |
456 | { |
465 | { |
457 | etp_grp_cancel (grp); |
466 | etp_grp_cancel (EIO_POOL, grp); |
458 | } |
467 | } |
459 | |
468 | |
460 | void |
469 | void |
461 | eio_cancel (eio_req *req) |
470 | eio_cancel (eio_req *req) |
462 | { |
471 | { |
463 | etp_cancel (req); |
472 | etp_cancel (EIO_POOL, req); |
464 | } |
473 | } |
465 | |
474 | |
466 | void |
475 | void |
467 | eio_submit (eio_req *req) |
476 | eio_submit (eio_req *req) |
468 | { |
477 | { |
469 | etp_submit (req); |
478 | etp_submit (EIO_POOL, req); |
470 | } |
479 | } |
471 | |
480 | |
472 | unsigned int |
481 | unsigned int |
473 | eio_nreqs (void) |
482 | eio_nreqs (void) |
474 | { |
483 | { |
475 | return etp_nreqs (); |
484 | return etp_nreqs (EIO_POOL); |
476 | } |
485 | } |
477 | |
486 | |
478 | unsigned int |
487 | unsigned int |
479 | eio_nready (void) |
488 | eio_nready (void) |
480 | { |
489 | { |
481 | return etp_nready (); |
490 | return etp_nready (EIO_POOL); |
482 | } |
491 | } |
483 | |
492 | |
484 | unsigned int |
493 | unsigned int |
485 | eio_npending (void) |
494 | eio_npending (void) |
486 | { |
495 | { |
487 | return etp_npending (); |
496 | return etp_npending (EIO_POOL); |
488 | } |
497 | } |
489 | |
498 | |
490 | unsigned int ecb_cold |
499 | unsigned int ecb_cold |
491 | eio_nthreads (void) |
500 | eio_nthreads (void) |
492 | { |
501 | { |
493 | return etp_nthreads (); |
502 | return etp_nthreads (EIO_POOL); |
494 | } |
503 | } |
495 | |
504 | |
496 | void ecb_cold |
505 | void ecb_cold |
497 | eio_set_max_poll_time (double nseconds) |
506 | eio_set_max_poll_time (double nseconds) |
498 | { |
507 | { |
499 | etp_set_max_poll_time (nseconds); |
508 | etp_set_max_poll_time (EIO_POOL, nseconds); |
500 | } |
509 | } |
501 | |
510 | |
502 | void ecb_cold |
511 | void ecb_cold |
503 | eio_set_max_poll_reqs (unsigned int maxreqs) |
512 | eio_set_max_poll_reqs (unsigned int maxreqs) |
504 | { |
513 | { |
505 | etp_set_max_poll_reqs (maxreqs); |
514 | etp_set_max_poll_reqs (EIO_POOL, maxreqs); |
506 | } |
515 | } |
507 | |
516 | |
508 | void ecb_cold |
517 | void ecb_cold |
509 | eio_set_max_idle (unsigned int nthreads) |
518 | eio_set_max_idle (unsigned int nthreads) |
510 | { |
519 | { |
511 | etp_set_max_idle (nthreads); |
520 | etp_set_max_idle (EIO_POOL, nthreads); |
512 | } |
521 | } |
513 | |
522 | |
514 | void ecb_cold |
523 | void ecb_cold |
515 | eio_set_idle_timeout (unsigned int seconds) |
524 | eio_set_idle_timeout (unsigned int seconds) |
516 | { |
525 | { |
517 | etp_set_idle_timeout (seconds); |
526 | etp_set_idle_timeout (EIO_POOL, seconds); |
518 | } |
527 | } |
519 | |
528 | |
520 | void ecb_cold |
529 | void ecb_cold |
521 | eio_set_min_parallel (unsigned int nthreads) |
530 | eio_set_min_parallel (unsigned int nthreads) |
522 | { |
531 | { |
523 | etp_set_min_parallel (nthreads); |
532 | etp_set_min_parallel (EIO_POOL, nthreads); |
524 | } |
533 | } |
525 | |
534 | |
526 | void ecb_cold |
535 | void ecb_cold |
527 | eio_set_max_parallel (unsigned int nthreads) |
536 | eio_set_max_parallel (unsigned int nthreads) |
528 | { |
537 | { |
529 | etp_set_max_parallel (nthreads); |
538 | etp_set_max_parallel (EIO_POOL, nthreads); |
530 | } |
539 | } |
531 | |
540 | |
532 | int eio_poll (void) |
541 | int eio_poll (void) |
533 | { |
542 | { |
534 | return etp_poll (); |
543 | return etp_poll (EIO_POOL); |
535 | } |
544 | } |
536 | |
545 | |
537 | /*****************************************************************************/ |
546 | /*****************************************************************************/ |
538 | /* work around various missing functions */ |
547 | /* work around various missing functions */ |
539 | |
548 | |
… | |
… | |
1703 | /*****************************************************************************/ |
1712 | /*****************************************************************************/ |
1704 | |
1713 | |
1705 | #define ALLOC(len) \ |
1714 | #define ALLOC(len) \ |
1706 | if (!req->ptr2) \ |
1715 | if (!req->ptr2) \ |
1707 | { \ |
1716 | { \ |
1708 | X_LOCK (wrklock); \ |
1717 | X_LOCK (EIO_POOL->wrklock); \ |
1709 | req->flags |= EIO_FLAG_PTR2_FREE; \ |
1718 | req->flags |= EIO_FLAG_PTR2_FREE; \ |
1710 | X_UNLOCK (wrklock); \ |
1719 | X_UNLOCK (EIO_POOL->wrklock); \ |
1711 | req->ptr2 = malloc (len); \ |
1720 | req->ptr2 = malloc (len); \ |
1712 | if (!req->ptr2) \ |
1721 | if (!req->ptr2) \ |
1713 | { \ |
1722 | { \ |
1714 | errno = ENOMEM; \ |
1723 | errno = ENOMEM; \ |
1715 | req->result = -1; \ |
1724 | req->result = -1; \ |
1716 | break; \ |
1725 | break; \ |
1717 | } \ |
1726 | } \ |
1718 | } |
1727 | } |
1719 | |
1728 | |
1720 | static void ecb_noinline ecb_cold |
|
|
1721 | etp_proc_init (void) |
|
|
1722 | { |
|
|
1723 | #if HAVE_PRCTL_SET_NAME |
|
|
1724 | /* provide a more sensible "thread name" */ |
|
|
1725 | char name[16 + 1]; |
|
|
1726 | const int namelen = sizeof (name) - 1; |
|
|
1727 | int len; |
|
|
1728 | |
|
|
1729 | prctl (PR_GET_NAME, (unsigned long)name, 0, 0, 0); |
|
|
1730 | name [namelen] = 0; |
|
|
1731 | len = strlen (name); |
|
|
1732 | strcpy (name + (len <= namelen - 4 ? len : namelen - 4), "/eio"); |
|
|
1733 | prctl (PR_SET_NAME, (unsigned long)name, 0, 0, 0); |
|
|
1734 | #endif |
|
|
1735 | } |
|
|
1736 | |
|
|
1737 | /* TODO: move somehow to etp.c */ |
|
|
1738 | X_THREAD_PROC (etp_proc) |
|
|
1739 | { |
|
|
1740 | ETP_REQ *req; |
|
|
1741 | struct timespec ts; |
|
|
1742 | etp_worker *self = (etp_worker *)thr_arg; |
|
|
1743 | |
|
|
1744 | etp_proc_init (); |
|
|
1745 | |
|
|
1746 | /* try to distribute timeouts somewhat evenly */ |
|
|
1747 | ts.tv_nsec = ((unsigned long)self & 1023UL) * (1000000000UL / 1024UL); |
|
|
1748 | |
|
|
1749 | for (;;) |
|
|
1750 | { |
|
|
1751 | ts.tv_sec = 0; |
|
|
1752 | |
|
|
1753 | X_LOCK (reqlock); |
|
|
1754 | |
|
|
1755 | for (;;) |
|
|
1756 | { |
|
|
1757 | req = reqq_shift (&req_queue); |
|
|
1758 | |
|
|
1759 | if (req) |
|
|
1760 | break; |
|
|
1761 | |
|
|
1762 | if (ts.tv_sec == 1) /* no request, but timeout detected, let's quit */ |
|
|
1763 | { |
|
|
1764 | X_UNLOCK (reqlock); |
|
|
1765 | X_LOCK (wrklock); |
|
|
1766 | --started; |
|
|
1767 | X_UNLOCK (wrklock); |
|
|
1768 | goto quit; |
|
|
1769 | } |
|
|
1770 | |
|
|
1771 | ++idle; |
|
|
1772 | |
|
|
1773 | if (idle <= max_idle) |
|
|
1774 | /* we are allowed to idle, so do so without any timeout */ |
|
|
1775 | X_COND_WAIT (reqwait, reqlock); |
|
|
1776 | else |
|
|
1777 | { |
|
|
1778 | /* initialise timeout once */ |
|
|
1779 | if (!ts.tv_sec) |
|
|
1780 | ts.tv_sec = time (0) + idle_timeout; |
|
|
1781 | |
|
|
1782 | if (X_COND_TIMEDWAIT (reqwait, reqlock, ts) == ETIMEDOUT) |
|
|
1783 | ts.tv_sec = 1; /* assuming this is not a value computed above.,.. */ |
|
|
1784 | } |
|
|
1785 | |
|
|
1786 | --idle; |
|
|
1787 | } |
|
|
1788 | |
|
|
1789 | --nready; |
|
|
1790 | |
|
|
1791 | X_UNLOCK (reqlock); |
|
|
1792 | |
|
|
1793 | if (req->type == ETP_TYPE_QUIT) |
|
|
1794 | goto quit; |
|
|
1795 | |
|
|
1796 | ETP_EXECUTE (self, req); |
|
|
1797 | |
|
|
1798 | X_LOCK (reslock); |
|
|
1799 | |
|
|
1800 | ++npending; |
|
|
1801 | |
|
|
1802 | if (!reqq_push (&res_queue, req) && want_poll_cb) |
|
|
1803 | want_poll_cb (); |
|
|
1804 | |
|
|
1805 | etp_worker_clear (self); |
|
|
1806 | |
|
|
1807 | X_UNLOCK (reslock); |
|
|
1808 | } |
|
|
1809 | |
|
|
1810 | quit: |
|
|
1811 | free (req); |
|
|
1812 | |
|
|
1813 | X_LOCK (wrklock); |
|
|
1814 | etp_worker_free (self); |
|
|
1815 | X_UNLOCK (wrklock); |
|
|
1816 | |
|
|
1817 | return 0; |
|
|
1818 | } |
|
|
1819 | |
|
|
1820 | /*****************************************************************************/ |
1729 | /*****************************************************************************/ |
1821 | |
1730 | |
1822 | int ecb_cold |
1731 | int ecb_cold |
1823 | eio_init (void (*want_poll)(void), void (*done_poll)(void)) |
1732 | eio_init (void (*want_poll)(void), void (*done_poll)(void)) |
1824 | { |
1733 | { |
1825 | return etp_init (want_poll, done_poll); |
1734 | eio_want_poll_cb = want_poll; |
|
|
1735 | eio_done_poll_cb = done_poll; |
|
|
1736 | |
|
|
1737 | return etp_init (EIO_POOL, 0, 0, 0); |
1826 | } |
1738 | } |
1827 | |
1739 | |
1828 | ecb_inline void |
1740 | ecb_inline void |
1829 | eio_api_destroy (eio_req *req) |
1741 | eio_api_destroy (eio_req *req) |
1830 | { |
1742 | { |
… | |
… | |
2368 | void |
2280 | void |
2369 | eio_grp_add (eio_req *grp, eio_req *req) |
2281 | eio_grp_add (eio_req *grp, eio_req *req) |
2370 | { |
2282 | { |
2371 | assert (("cannot add requests to IO::AIO::GRP after the group finished", grp->int1 != 2)); |
2283 | assert (("cannot add requests to IO::AIO::GRP after the group finished", grp->int1 != 2)); |
2372 | |
2284 | |
2373 | grp->flags |= EIO_FLAG_GROUPADD; |
2285 | grp->flags |= ETP_FLAG_GROUPADD; |
2374 | |
2286 | |
2375 | ++grp->size; |
2287 | ++grp->size; |
2376 | req->grp = grp; |
2288 | req->grp = grp; |
2377 | |
2289 | |
2378 | req->grp_prev = 0; |
2290 | req->grp_prev = 0; |