ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libeio/eio.c
(Generate patch)

Comparing libeio/eio.c (file contents):
Revision 1.131 by root, Fri Aug 29 12:00:49 2014 UTC vs.
Revision 1.139 by root, Thu Jun 25 18:14:19 2015 UTC

120 #define link(old,neu) (CreateHardLink (neu, old, 0) ? 0 : EIO_ERRNO (ENOENT, -1)) 120 #define link(old,neu) (CreateHardLink (neu, old, 0) ? 0 : EIO_ERRNO (ENOENT, -1))
121 121
122 #define chmod(path,mode) _chmod (path, mode) 122 #define chmod(path,mode) _chmod (path, mode)
123 #define dup(fd) _dup (fd) 123 #define dup(fd) _dup (fd)
124 #define dup2(fd1,fd2) _dup2 (fd1, fd2) 124 #define dup2(fd1,fd2) _dup2 (fd1, fd2)
125 #define pipe(fds) _pipe (fds, 4096, O_BINARY)
125 126
126 #define fchmod(fd,mode) EIO_ENOSYS () 127 #define fchmod(fd,mode) EIO_ENOSYS ()
127 #define chown(path,uid,gid) EIO_ENOSYS () 128 #define chown(path,uid,gid) EIO_ENOSYS ()
128 #define fchown(fd,uid,gid) EIO_ENOSYS () 129 #define fchown(fd,uid,gid) EIO_ENOSYS ()
129 #define truncate(path,offs) EIO_ENOSYS () /* far-miss: SetEndOfFile */ 130 #define truncate(path,offs) EIO_ENOSYS () /* far-miss: SetEndOfFile */
335#define FUBd \ 336#define FUBd \
336 free (eio_buf) 337 free (eio_buf)
337 338
338/*****************************************************************************/ 339/*****************************************************************************/
339 340
340struct tmpbuf
341{
342 void *ptr;
343 int len;
344};
345
346static void *
347tmpbuf_get (struct tmpbuf *buf, int len)
348{
349 if (buf->len < len)
350 {
351 free (buf->ptr);
352 buf->ptr = malloc (buf->len = len);
353 }
354
355 return buf->ptr;
356}
357
358struct tmpbuf; 341struct etp_tmpbuf;
359 342
360#if _POSIX_VERSION >= 200809L 343#if _POSIX_VERSION >= 200809L
361 #define HAVE_AT 1 344 #define HAVE_AT 1
362 #define WD2FD(wd) ((wd) ? (wd)->fd : AT_FDCWD) 345 #define WD2FD(wd) ((wd) ? (wd)->fd : AT_FDCWD)
363 #ifndef O_SEARCH 346 #ifndef O_SEARCH
364 #define O_SEARCH O_RDONLY 347 #define O_SEARCH O_RDONLY
365 #endif 348 #endif
366#else 349#else
367 #define HAVE_AT 0 350 #define HAVE_AT 0
368 static const char *wd_expand (struct tmpbuf *tmpbuf, eio_wd wd, const char *path); 351 static const char *wd_expand (struct etp_tmpbuf *tmpbuf, eio_wd wd, const char *path);
369#endif 352#endif
370 353
371struct eio_pwd 354struct eio_pwd
372{ 355{
373#if HAVE_AT 356#if HAVE_AT
383#define ETP_PRI_MAX EIO_PRI_MAX 366#define ETP_PRI_MAX EIO_PRI_MAX
384 367
385#define ETP_TYPE_QUIT -1 368#define ETP_TYPE_QUIT -1
386#define ETP_TYPE_GROUP EIO_GROUP 369#define ETP_TYPE_GROUP EIO_GROUP
387 370
371static void eio_nop_callback (void) { }
372static void (*eio_want_poll_cb)(void) = eio_nop_callback;
373static void (*eio_done_poll_cb)(void) = eio_nop_callback;
374
375#define ETP_WANT_POLL(pool) eio_want_poll_cb ()
376#define ETP_DONE_POLL(pool) eio_done_poll_cb ()
377
388struct etp_worker; 378struct etp_worker;
389
390#define ETP_REQ eio_req 379#define ETP_REQ eio_req
391#define ETP_DESTROY(req) eio_destroy (req) 380#define ETP_DESTROY(req) eio_destroy (req)
392static int eio_finish (eio_req *req); 381static int eio_finish (eio_req *req);
393#define ETP_FINISH(req) eio_finish (req) 382#define ETP_FINISH(req) eio_finish (req)
394static void eio_execute (struct etp_worker *self, eio_req *req); 383static void eio_execute (struct etp_worker *self, eio_req *req);
395#define ETP_EXECUTE(wrk,req) eio_execute (wrk, req) 384#define ETP_EXECUTE(wrk,req) eio_execute (wrk, req)
396 385
397#include "etp.c" 386#include "etp.c"
398 387
388static struct etp_pool eio_pool;
389#define EIO_POOL (&eio_pool)
390
399/*****************************************************************************/ 391/*****************************************************************************/
400 392
401static void 393static void
402grp_try_feed (eio_req *grp) 394grp_try_feed (eio_req *grp)
403{ 395{
404 while (grp->size < grp->int2 && !EIO_CANCELLED (grp)) 396 while (grp->size < grp->int2 && !EIO_CANCELLED (grp))
405 { 397 {
406 grp->flags &= ~EIO_FLAG_GROUPADD; 398 grp->flags &= ~ETP_FLAG_GROUPADD;
407 399
408 EIO_FEED (grp); 400 EIO_FEED (grp);
409 401
410 /* stop if no progress has been made */ 402 /* stop if no progress has been made */
411 if (!(grp->flags & EIO_FLAG_GROUPADD)) 403 if (!(grp->flags & ETP_FLAG_GROUPADD))
412 { 404 {
413 grp->feed = 0; 405 grp->feed = 0;
414 break; 406 break;
415 } 407 }
416 } 408 }
423 415
424 /* call feeder, if applicable */ 416 /* call feeder, if applicable */
425 grp_try_feed (grp); 417 grp_try_feed (grp);
426 418
427 /* finish, if done */ 419 /* finish, if done */
428 if (!grp->size && grp->int1) 420 if (!grp->size && grp->flags & ETP_FLAG_DELAYED)
429 return eio_finish (grp); 421 return eio_finish (grp);
430 else 422 else
431 return 0; 423 return 0;
432} 424}
433 425
469} 461}
470 462
471void 463void
472eio_grp_cancel (eio_req *grp) 464eio_grp_cancel (eio_req *grp)
473{ 465{
474 etp_grp_cancel (grp); 466 etp_grp_cancel (EIO_POOL, grp);
475} 467}
476 468
477void 469void
478eio_cancel (eio_req *req) 470eio_cancel (eio_req *req)
479{ 471{
480 etp_cancel (req); 472 etp_cancel (EIO_POOL, req);
481} 473}
482 474
483void 475void
484eio_submit (eio_req *req) 476eio_submit (eio_req *req)
485{ 477{
486 etp_submit (req); 478 etp_submit (EIO_POOL, req);
487} 479}
488 480
489unsigned int 481unsigned int
490eio_nreqs (void) 482eio_nreqs (void)
491{ 483{
492 return etp_nreqs (); 484 return etp_nreqs (EIO_POOL);
493} 485}
494 486
495unsigned int 487unsigned int
496eio_nready (void) 488eio_nready (void)
497{ 489{
498 return etp_nready (); 490 return etp_nready (EIO_POOL);
499} 491}
500 492
501unsigned int 493unsigned int
502eio_npending (void) 494eio_npending (void)
503{ 495{
504 return etp_npending (); 496 return etp_npending (EIO_POOL);
505} 497}
506 498
507unsigned int ecb_cold 499unsigned int ecb_cold
508eio_nthreads (void) 500eio_nthreads (void)
509{ 501{
510 return etp_nthreads (); 502 return etp_nthreads (EIO_POOL);
511} 503}
512 504
513void ecb_cold 505void ecb_cold
514eio_set_max_poll_time (double nseconds) 506eio_set_max_poll_time (double nseconds)
515{ 507{
516 etp_set_max_poll_time (nseconds); 508 etp_set_max_poll_time (EIO_POOL, nseconds);
517} 509}
518 510
519void ecb_cold 511void ecb_cold
520eio_set_max_poll_reqs (unsigned int maxreqs) 512eio_set_max_poll_reqs (unsigned int maxreqs)
521{ 513{
522 etp_set_max_poll_reqs (maxreqs); 514 etp_set_max_poll_reqs (EIO_POOL, maxreqs);
523} 515}
524 516
525void ecb_cold 517void ecb_cold
526eio_set_max_idle (unsigned int nthreads) 518eio_set_max_idle (unsigned int nthreads)
527{ 519{
528 etp_set_max_idle (nthreads); 520 etp_set_max_idle (EIO_POOL, nthreads);
529} 521}
530 522
531void ecb_cold 523void ecb_cold
532eio_set_idle_timeout (unsigned int seconds) 524eio_set_idle_timeout (unsigned int seconds)
533{ 525{
534 etp_set_idle_timeout (seconds); 526 etp_set_idle_timeout (EIO_POOL, seconds);
535} 527}
536 528
537void ecb_cold 529void ecb_cold
538eio_set_min_parallel (unsigned int nthreads) 530eio_set_min_parallel (unsigned int nthreads)
539{ 531{
540 etp_set_min_parallel (nthreads); 532 etp_set_min_parallel (EIO_POOL, nthreads);
541} 533}
542 534
543void ecb_cold 535void ecb_cold
544eio_set_max_parallel (unsigned int nthreads) 536eio_set_max_parallel (unsigned int nthreads)
545{ 537{
546 etp_set_max_parallel (nthreads); 538 etp_set_max_parallel (EIO_POOL, nthreads);
547} 539}
548 540
549int eio_poll (void) 541int eio_poll (void)
550{ 542{
551 return etp_poll (); 543 return etp_poll (EIO_POOL);
552} 544}
553 545
554/*****************************************************************************/ 546/*****************************************************************************/
555/* work around various missing functions */ 547/* work around various missing functions */
556 548
966 req->result = req->offs == (off_t)-1 ? -1 : 0; 958 req->result = req->offs == (off_t)-1 ? -1 : 0;
967} 959}
968 960
969/* result will always end up in tmpbuf, there is always space for adding a 0-byte */ 961/* result will always end up in tmpbuf, there is always space for adding a 0-byte */
970static int 962static int
971eio__realpath (struct tmpbuf *tmpbuf, eio_wd wd, const char *path) 963eio__realpath (struct etp_tmpbuf *tmpbuf, eio_wd wd, const char *path)
972{ 964{
973 char *res; 965 char *res;
974 const char *rel = path; 966 const char *rel = path;
975 char *tmp1, *tmp2; 967 char *tmp1, *tmp2;
976#if SYMLOOP_MAX > 32 968#if SYMLOOP_MAX > 32
985 977
986 errno = ENOENT; 978 errno = ENOENT;
987 if (!*rel) 979 if (!*rel)
988 return -1; 980 return -1;
989 981
990 res = tmpbuf_get (tmpbuf, PATH_MAX * 3); 982 res = etp_tmpbuf_get (tmpbuf, PATH_MAX * 3);
991#ifdef _WIN32 983#ifdef _WIN32
992 if (_access (rel, 4) != 0) 984 if (_access (rel, 4) != 0)
993 return -1; 985 return -1;
994 986
995 symlinks = GetFullPathName (rel, PATH_MAX * 3, res, 0); 987 symlinks = GetFullPathName (rel, PATH_MAX * 3, res, 0);
1604#if !HAVE_AT 1596#if !HAVE_AT
1605 1597
1606/* a bit like realpath, but usually faster because it doesn'T have to return */ 1598/* a bit like realpath, but usually faster because it doesn'T have to return */
1607/* an absolute or canonical path */ 1599/* an absolute or canonical path */
1608static const char * 1600static const char *
1609wd_expand (struct tmpbuf *tmpbuf, eio_wd wd, const char *path) 1601wd_expand (struct etp_tmpbuf *tmpbuf, eio_wd wd, const char *path)
1610{ 1602{
1611 if (!wd || *path == '/') 1603 if (!wd || *path == '/')
1612 return path; 1604 return path;
1613 1605
1614 if (path [0] == '.' && !path [1]) 1606 if (path [0] == '.' && !path [1])
1616 1608
1617 { 1609 {
1618 int l1 = wd->len; 1610 int l1 = wd->len;
1619 int l2 = strlen (path); 1611 int l2 = strlen (path);
1620 1612
1621 char *res = tmpbuf_get (tmpbuf, l1 + l2 + 2); 1613 char *res = etp_tmpbuf_get (tmpbuf, l1 + l2 + 2);
1622 1614
1623 memcpy (res, wd->str, l1); 1615 memcpy (res, wd->str, l1);
1624 res [l1] = '/'; 1616 res [l1] = '/';
1625 memcpy (res + l1 + 1, path, l2 + 1); 1617 memcpy (res + l1 + 1, path, l2 + 1);
1626 1618
1629} 1621}
1630 1622
1631#endif 1623#endif
1632 1624
1633static eio_wd 1625static eio_wd
1634eio__wd_open_sync (struct tmpbuf *tmpbuf, eio_wd wd, const char *path) 1626eio__wd_open_sync (struct etp_tmpbuf *tmpbuf, eio_wd wd, const char *path)
1635{ 1627{
1636 int fd; 1628 int fd;
1637 eio_wd res; 1629 eio_wd res;
1638 int len = eio__realpath (tmpbuf, wd, path); 1630 int len = eio__realpath (tmpbuf, wd, path);
1639 1631
1661} 1653}
1662 1654
1663eio_wd 1655eio_wd
1664eio_wd_open_sync (eio_wd wd, const char *path) 1656eio_wd_open_sync (eio_wd wd, const char *path)
1665{ 1657{
1666 struct tmpbuf tmpbuf = { 0 }; 1658 struct etp_tmpbuf tmpbuf = { };
1667 wd = eio__wd_open_sync (&tmpbuf, wd, path); 1659 wd = eio__wd_open_sync (&tmpbuf, wd, path);
1668 free (tmpbuf.ptr); 1660 free (tmpbuf.ptr);
1669 1661
1670 return wd; 1662 return wd;
1671} 1663}
1720/*****************************************************************************/ 1712/*****************************************************************************/
1721 1713
1722#define ALLOC(len) \ 1714#define ALLOC(len) \
1723 if (!req->ptr2) \ 1715 if (!req->ptr2) \
1724 { \ 1716 { \
1725 X_LOCK (wrklock); \ 1717 X_LOCK (EIO_POOL->wrklock); \
1726 req->flags |= EIO_FLAG_PTR2_FREE; \ 1718 req->flags |= EIO_FLAG_PTR2_FREE; \
1727 X_UNLOCK (wrklock); \ 1719 X_UNLOCK (EIO_POOL->wrklock); \
1728 req->ptr2 = malloc (len); \ 1720 req->ptr2 = malloc (len); \
1729 if (!req->ptr2) \ 1721 if (!req->ptr2) \
1730 { \ 1722 { \
1731 errno = ENOMEM; \ 1723 errno = ENOMEM; \
1732 req->result = -1; \ 1724 req->result = -1; \
1733 break; \ 1725 break; \
1734 } \ 1726 } \
1735 } 1727 }
1736 1728
1737static void ecb_noinline ecb_cold
1738etp_proc_init (void)
1739{
1740#if HAVE_PRCTL_SET_NAME
1741 /* provide a more sensible "thread name" */
1742 char name[16 + 1];
1743 const int namelen = sizeof (name) - 1;
1744 int len;
1745
1746 prctl (PR_GET_NAME, (unsigned long)name, 0, 0, 0);
1747 name [namelen] = 0;
1748 len = strlen (name);
1749 strcpy (name + (len <= namelen - 4 ? len : namelen - 4), "/eio");
1750 prctl (PR_SET_NAME, (unsigned long)name, 0, 0, 0);
1751#endif
1752}
1753
1754/* TODO: move somehow to etp.c */
1755X_THREAD_PROC (etp_proc)
1756{
1757 ETP_REQ *req;
1758 struct timespec ts;
1759 etp_worker *self = (etp_worker *)thr_arg;
1760
1761 etp_proc_init ();
1762
1763 /* try to distribute timeouts somewhat evenly */
1764 ts.tv_nsec = ((unsigned long)self & 1023UL) * (1000000000UL / 1024UL);
1765
1766 for (;;)
1767 {
1768 ts.tv_sec = 0;
1769
1770 X_LOCK (reqlock);
1771
1772 for (;;)
1773 {
1774 req = reqq_shift (&req_queue);
1775
1776 if (req)
1777 break;
1778
1779 if (ts.tv_sec == 1) /* no request, but timeout detected, let's quit */
1780 {
1781 X_UNLOCK (reqlock);
1782 X_LOCK (wrklock);
1783 --started;
1784 X_UNLOCK (wrklock);
1785 goto quit;
1786 }
1787
1788 ++idle;
1789
1790 if (idle <= max_idle)
1791 /* we are allowed to idle, so do so without any timeout */
1792 X_COND_WAIT (reqwait, reqlock);
1793 else
1794 {
1795 /* initialise timeout once */
1796 if (!ts.tv_sec)
1797 ts.tv_sec = time (0) + idle_timeout;
1798
1799 if (X_COND_TIMEDWAIT (reqwait, reqlock, ts) == ETIMEDOUT)
1800 ts.tv_sec = 1; /* assuming this is not a value computed above.,.. */
1801 }
1802
1803 --idle;
1804 }
1805
1806 --nready;
1807
1808 X_UNLOCK (reqlock);
1809
1810 if (req->type == ETP_TYPE_QUIT)
1811 goto quit;
1812
1813 ETP_EXECUTE (self, req);
1814
1815 X_LOCK (reslock);
1816
1817 ++npending;
1818
1819 if (!reqq_push (&res_queue, req) && want_poll_cb)
1820 want_poll_cb ();
1821
1822 etp_worker_clear (self);
1823
1824 X_UNLOCK (reslock);
1825 }
1826
1827quit:
1828 free (req);
1829
1830 X_LOCK (wrklock);
1831 etp_worker_free (self);
1832 X_UNLOCK (wrklock);
1833
1834 return 0;
1835}
1836
1837/*****************************************************************************/ 1729/*****************************************************************************/
1838 1730
1839int ecb_cold 1731int ecb_cold
1840eio_init (void (*want_poll)(void), void (*done_poll)(void)) 1732eio_init (void (*want_poll)(void), void (*done_poll)(void))
1841{ 1733{
1842 return etp_init (want_poll, done_poll); 1734 eio_want_poll_cb = want_poll;
1735 eio_done_poll_cb = done_poll;
1736
1737 return etp_init (EIO_POOL, 0, 0, 0);
1843} 1738}
1844 1739
1845ecb_inline void 1740ecb_inline void
1846eio_api_destroy (eio_req *req) 1741eio_api_destroy (eio_req *req)
1847{ 1742{
1857 \ 1752 \
1858 req->type = rtype; \ 1753 req->type = rtype; \
1859 req->pri = pri; \ 1754 req->pri = pri; \
1860 req->finish = cb; \ 1755 req->finish = cb; \
1861 req->data = data; \ 1756 req->data = data; \
1862 req->result = -1; \
1863 req->errorno = ECANCELED; \
1864 req->destroy = eio_api_destroy; 1757 req->destroy = eio_api_destroy;
1865 1758
1866#define SEND eio_submit (req); return req 1759#define SEND eio_submit (req); return req
1867 1760
1868#define PATH \ 1761#define PATH \
2074 req->result = select (0, 0, 0, 0, &tv); 1967 req->result = select (0, 0, 0, 0, &tv);
2075 } 1968 }
2076#endif 1969#endif
2077 break; 1970 break;
2078 1971
1972#if 0
2079 case EIO_GROUP: 1973 case EIO_GROUP:
2080 abort (); /* handled in eio_request */ 1974 abort (); /* handled in eio_request */
1975#endif
2081 1976
2082 case EIO_NOP: 1977 case EIO_NOP:
2083 req->result = 0; 1978 req->result = 0;
2084 break; 1979 break;
2085 1980
2385void 2280void
2386eio_grp_add (eio_req *grp, eio_req *req) 2281eio_grp_add (eio_req *grp, eio_req *req)
2387{ 2282{
2388 assert (("cannot add requests to IO::AIO::GRP after the group finished", grp->int1 != 2)); 2283 assert (("cannot add requests to IO::AIO::GRP after the group finished", grp->int1 != 2));
2389 2284
2390 grp->flags |= EIO_FLAG_GROUPADD; 2285 grp->flags |= ETP_FLAG_GROUPADD;
2391 2286
2392 ++grp->size; 2287 ++grp->size;
2393 req->grp = grp; 2288 req->grp = grp;
2394 2289
2395 req->grp_prev = 0; 2290 req->grp_prev = 0;

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines