ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libeio/eio.c
(Generate patch)

Comparing libeio/eio.c (file contents):
Revision 1.124 by root, Thu Oct 11 05:01:56 2012 UTC vs.
Revision 1.139 by root, Thu Jun 25 18:14:19 2015 UTC

1/* 1/*
2 * libeio implementation 2 * libeio implementation
3 * 3 *
4 * Copyright (c) 2007,2008,2009,2010,2011,2012 Marc Alexander Lehmann <libeio@schmorp.de> 4 * Copyright (c) 2007,2008,2009,2010,2011,2012,2013 Marc Alexander Lehmann <libeio@schmorp.de>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without modifica- 7 * Redistribution and use in source and binary forms, with or without modifica-
8 * tion, are permitted provided that the following conditions are met: 8 * tion, are permitted provided that the following conditions are met:
9 * 9 *
10 * 1. Redistributions of source code must retain the above copyright notice, 10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer. 11 * this list of conditions and the following disclaimer.
12 * 12 *
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution. 15 * documentation and/or other materials provided with the distribution.
16 * 16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
19 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 19 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
20 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 20 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
21 * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 21 * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
120 #define link(old,neu) (CreateHardLink (neu, old, 0) ? 0 : EIO_ERRNO (ENOENT, -1)) 120 #define link(old,neu) (CreateHardLink (neu, old, 0) ? 0 : EIO_ERRNO (ENOENT, -1))
121 121
122 #define chmod(path,mode) _chmod (path, mode) 122 #define chmod(path,mode) _chmod (path, mode)
123 #define dup(fd) _dup (fd) 123 #define dup(fd) _dup (fd)
124 #define dup2(fd1,fd2) _dup2 (fd1, fd2) 124 #define dup2(fd1,fd2) _dup2 (fd1, fd2)
125 #define pipe(fds) _pipe (fds, 4096, O_BINARY)
125 126
126 #define fchmod(fd,mode) EIO_ENOSYS () 127 #define fchmod(fd,mode) EIO_ENOSYS ()
127 #define chown(path,uid,gid) EIO_ENOSYS () 128 #define chown(path,uid,gid) EIO_ENOSYS ()
128 #define fchown(fd,uid,gid) EIO_ENOSYS () 129 #define fchown(fd,uid,gid) EIO_ENOSYS ()
129 #define truncate(path,offs) EIO_ENOSYS () /* far-miss: SetEndOfFile */ 130 #define truncate(path,offs) EIO_ENOSYS () /* far-miss: SetEndOfFile */
213 #endif 214 #endif
214 215
215 return EIO_ERRNO (ENOENT, -1); 216 return EIO_ERRNO (ENOENT, -1);
216 } 217 }
217 218
218 /* POSIX API only */ 219 /* POSIX API only, causing trouble for win32 apps */
219 #define CreateHardLink(neu,old,flags) 0 220 #define CreateHardLink(neu,old,flags) 0 /* not really creating hardlink, still using relative paths? */
220 #define CreateSymbolicLink(neu,old,flags) 0 221 #define CreateSymbolicLink(neu,old,flags) 0 /* vista+ only */
221 222
222 struct statvfs 223 struct statvfs
223 { 224 {
224 int dummy; 225 int dummy;
225 }; 226 };
231 232
232#else 233#else
233 234
234 #include <sys/time.h> 235 #include <sys/time.h>
235 #include <sys/select.h> 236 #include <sys/select.h>
236 #include <sys/statvfs.h>
237 #include <unistd.h> 237 #include <unistd.h>
238 #include <signal.h> 238 #include <signal.h>
239 #include <dirent.h> 239 #include <dirent.h>
240
241 #ifdef ANDROID
242 #include <sys/vfs.h>
243 #define statvfs statfs
244 #define fstatvfs fstatfs
245 #include <asm/page.h> /* supposedly limits.h does #define PAGESIZE PAGESIZE */
246 #else
247 #include <sys/statvfs.h>
248 #endif
240 249
241 #if _POSIX_MEMLOCK || _POSIX_MEMLOCK_RANGE || _POSIX_MAPPED_FILES 250 #if _POSIX_MEMLOCK || _POSIX_MEMLOCK_RANGE || _POSIX_MAPPED_FILES
242 #include <sys/mman.h> 251 #include <sys/mman.h>
243 #endif 252 #endif
244 253
316#endif 325#endif
317 326
318/* buffer size for various temporary buffers */ 327/* buffer size for various temporary buffers */
319#define EIO_BUFSIZE 65536 328#define EIO_BUFSIZE 65536
320 329
321#define dBUF \ 330#define dBUF \
322 char *eio_buf = malloc (EIO_BUFSIZE); \ 331 char *eio_buf = malloc (EIO_BUFSIZE); \
323 errno = ENOMEM; \ 332 errno = ENOMEM; \
324 if (!eio_buf) \ 333 if (!eio_buf) \
325 return -1 334 return -1
326 335
327#define FUBd \ 336#define FUBd \
328 free (eio_buf) 337 free (eio_buf)
329 338
330#define EIO_TICKS ((1000000 + 1023) >> 10)
331
332/*****************************************************************************/ 339/*****************************************************************************/
333 340
334struct tmpbuf
335{
336 void *ptr;
337 int len;
338};
339
340static void *
341tmpbuf_get (struct tmpbuf *buf, int len)
342{
343 if (buf->len < len)
344 {
345 free (buf->ptr);
346 buf->ptr = malloc (buf->len = len);
347 }
348
349 return buf->ptr;
350}
351
352struct tmpbuf; 341struct etp_tmpbuf;
353 342
354#if _POSIX_VERSION >= 200809L 343#if _POSIX_VERSION >= 200809L
355 #define HAVE_AT 1 344 #define HAVE_AT 1
356 #define WD2FD(wd) ((wd) ? (wd)->fd : AT_FDCWD) 345 #define WD2FD(wd) ((wd) ? (wd)->fd : AT_FDCWD)
357 #ifndef O_SEARCH 346 #ifndef O_SEARCH
358 #define O_SEARCH O_RDONLY 347 #define O_SEARCH O_RDONLY
359 #endif 348 #endif
360#else 349#else
361 #define HAVE_AT 0 350 #define HAVE_AT 0
362 static const char *wd_expand (struct tmpbuf *tmpbuf, eio_wd wd, const char *path); 351 static const char *wd_expand (struct etp_tmpbuf *tmpbuf, eio_wd wd, const char *path);
363#endif 352#endif
364 353
365struct eio_pwd 354struct eio_pwd
366{ 355{
367#if HAVE_AT 356#if HAVE_AT
374/*****************************************************************************/ 363/*****************************************************************************/
375 364
376#define ETP_PRI_MIN EIO_PRI_MIN 365#define ETP_PRI_MIN EIO_PRI_MIN
377#define ETP_PRI_MAX EIO_PRI_MAX 366#define ETP_PRI_MAX EIO_PRI_MAX
378 367
368#define ETP_TYPE_QUIT -1
369#define ETP_TYPE_GROUP EIO_GROUP
370
371static void eio_nop_callback (void) { }
372static void (*eio_want_poll_cb)(void) = eio_nop_callback;
373static void (*eio_done_poll_cb)(void) = eio_nop_callback;
374
375#define ETP_WANT_POLL(pool) eio_want_poll_cb ()
376#define ETP_DONE_POLL(pool) eio_done_poll_cb ()
377
379struct etp_worker; 378struct etp_worker;
380
381#define ETP_REQ eio_req 379#define ETP_REQ eio_req
382#define ETP_DESTROY(req) eio_destroy (req) 380#define ETP_DESTROY(req) eio_destroy (req)
383static int eio_finish (eio_req *req); 381static int eio_finish (eio_req *req);
384#define ETP_FINISH(req) eio_finish (req) 382#define ETP_FINISH(req) eio_finish (req)
385static void eio_execute (struct etp_worker *self, eio_req *req); 383static void eio_execute (struct etp_worker *self, eio_req *req);
386#define ETP_EXECUTE(wrk,req) eio_execute (wrk,req) 384#define ETP_EXECUTE(wrk,req) eio_execute (wrk, req)
387 385
388/*****************************************************************************/ 386#include "etp.c"
389 387
390#define ETP_NUM_PRI (ETP_PRI_MAX - ETP_PRI_MIN + 1) 388static struct etp_pool eio_pool;
391 389#define EIO_POOL (&eio_pool)
392/* calculate time difference in ~1/EIO_TICKS of a second */
393ecb_inline int
394tvdiff (struct timeval *tv1, struct timeval *tv2)
395{
396 return (tv2->tv_sec - tv1->tv_sec ) * EIO_TICKS
397 + ((tv2->tv_usec - tv1->tv_usec) >> 10);
398}
399
400static unsigned int started, idle, wanted = 4;
401
402static void (*want_poll_cb) (void);
403static void (*done_poll_cb) (void);
404
405static unsigned int max_poll_time; /* reslock */
406static unsigned int max_poll_reqs; /* reslock */
407
408static unsigned int nreqs; /* reqlock */
409static unsigned int nready; /* reqlock */
410static unsigned int npending; /* reqlock */
411static unsigned int max_idle = 4; /* maximum number of threads that can idle indefinitely */
412static unsigned int idle_timeout = 10; /* number of seconds after which an idle threads exit */
413
414static xmutex_t wrklock;
415static xmutex_t reslock;
416static xmutex_t reqlock;
417static xcond_t reqwait;
418
419typedef struct etp_worker
420{
421 struct tmpbuf tmpbuf;
422
423 /* locked by wrklock */
424 struct etp_worker *prev, *next;
425
426 xthread_t tid;
427
428#ifdef ETP_WORKER_COMMON
429 ETP_WORKER_COMMON
430#endif
431} etp_worker;
432
433static etp_worker wrk_first; /* NOT etp */
434
435#define ETP_WORKER_LOCK(wrk) X_LOCK (wrklock)
436#define ETP_WORKER_UNLOCK(wrk) X_UNLOCK (wrklock)
437
438/* worker threads management */
439
440static void
441etp_worker_clear (etp_worker *wrk)
442{
443}
444
445static void ecb_cold
446etp_worker_free (etp_worker *wrk)
447{
448 free (wrk->tmpbuf.ptr);
449
450 wrk->next->prev = wrk->prev;
451 wrk->prev->next = wrk->next;
452
453 free (wrk);
454}
455
456static unsigned int
457etp_nreqs (void)
458{
459 int retval;
460 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
461 retval = nreqs;
462 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
463 return retval;
464}
465
466static unsigned int
467etp_nready (void)
468{
469 unsigned int retval;
470
471 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
472 retval = nready;
473 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
474
475 return retval;
476}
477
478static unsigned int
479etp_npending (void)
480{
481 unsigned int retval;
482
483 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
484 retval = npending;
485 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
486
487 return retval;
488}
489
490static unsigned int
491etp_nthreads (void)
492{
493 unsigned int retval;
494
495 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
496 retval = started;
497 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
498
499 return retval;
500}
501
502/*
503 * a somewhat faster data structure might be nice, but
504 * with 8 priorities this actually needs <20 insns
505 * per shift, the most expensive operation.
506 */
507typedef struct {
508 ETP_REQ *qs[ETP_NUM_PRI], *qe[ETP_NUM_PRI]; /* qstart, qend */
509 int size;
510} etp_reqq;
511
512static etp_reqq req_queue;
513static etp_reqq res_queue;
514
515static void ecb_noinline ecb_cold
516reqq_init (etp_reqq *q)
517{
518 int pri;
519
520 for (pri = 0; pri < ETP_NUM_PRI; ++pri)
521 q->qs[pri] = q->qe[pri] = 0;
522
523 q->size = 0;
524}
525
526static int ecb_noinline
527reqq_push (etp_reqq *q, ETP_REQ *req)
528{
529 int pri = req->pri;
530 req->next = 0;
531
532 if (q->qe[pri])
533 {
534 q->qe[pri]->next = req;
535 q->qe[pri] = req;
536 }
537 else
538 q->qe[pri] = q->qs[pri] = req;
539
540 return q->size++;
541}
542
543static ETP_REQ * ecb_noinline
544reqq_shift (etp_reqq *q)
545{
546 int pri;
547
548 if (!q->size)
549 return 0;
550
551 --q->size;
552
553 for (pri = ETP_NUM_PRI; pri--; )
554 {
555 eio_req *req = q->qs[pri];
556
557 if (req)
558 {
559 if (!(q->qs[pri] = (eio_req *)req->next))
560 q->qe[pri] = 0;
561
562 return req;
563 }
564 }
565
566 abort ();
567}
568
569static int ecb_cold
570etp_init (void (*want_poll)(void), void (*done_poll)(void))
571{
572 X_MUTEX_CREATE (wrklock);
573 X_MUTEX_CREATE (reslock);
574 X_MUTEX_CREATE (reqlock);
575 X_COND_CREATE (reqwait);
576
577 reqq_init (&req_queue);
578 reqq_init (&res_queue);
579
580 wrk_first.next =
581 wrk_first.prev = &wrk_first;
582
583 started = 0;
584 idle = 0;
585 nreqs = 0;
586 nready = 0;
587 npending = 0;
588
589 want_poll_cb = want_poll;
590 done_poll_cb = done_poll;
591
592 return 0;
593}
594
595X_THREAD_PROC (etp_proc);
596
597static void ecb_cold
598etp_start_thread (void)
599{
600 etp_worker *wrk = calloc (1, sizeof (etp_worker));
601
602 /*TODO*/
603 assert (("unable to allocate worker thread data", wrk));
604
605 X_LOCK (wrklock);
606
607 if (xthread_create (&wrk->tid, etp_proc, (void *)wrk))
608 {
609 wrk->prev = &wrk_first;
610 wrk->next = wrk_first.next;
611 wrk_first.next->prev = wrk;
612 wrk_first.next = wrk;
613 ++started;
614 }
615 else
616 free (wrk);
617
618 X_UNLOCK (wrklock);
619}
620
621static void
622etp_maybe_start_thread (void)
623{
624 if (ecb_expect_true (etp_nthreads () >= wanted))
625 return;
626
627 /* todo: maybe use idle here, but might be less exact */
628 if (ecb_expect_true (0 <= (int)etp_nthreads () + (int)etp_npending () - (int)etp_nreqs ()))
629 return;
630
631 etp_start_thread ();
632}
633
634static void ecb_cold
635etp_end_thread (void)
636{
637 eio_req *req = calloc (1, sizeof (eio_req)); /* will be freed by worker */
638
639 req->type = -1;
640 req->pri = ETP_PRI_MAX - ETP_PRI_MIN;
641
642 X_LOCK (reqlock);
643 reqq_push (&req_queue, req);
644 X_COND_SIGNAL (reqwait);
645 X_UNLOCK (reqlock);
646
647 X_LOCK (wrklock);
648 --started;
649 X_UNLOCK (wrklock);
650}
651
652static int
653etp_poll (void)
654{
655 unsigned int maxreqs;
656 unsigned int maxtime;
657 struct timeval tv_start, tv_now;
658
659 X_LOCK (reslock);
660 maxreqs = max_poll_reqs;
661 maxtime = max_poll_time;
662 X_UNLOCK (reslock);
663
664 if (maxtime)
665 gettimeofday (&tv_start, 0);
666
667 for (;;)
668 {
669 ETP_REQ *req;
670
671 etp_maybe_start_thread ();
672
673 X_LOCK (reslock);
674 req = reqq_shift (&res_queue);
675
676 if (req)
677 {
678 --npending;
679
680 if (!res_queue.size && done_poll_cb)
681 done_poll_cb ();
682 }
683
684 X_UNLOCK (reslock);
685
686 if (!req)
687 return 0;
688
689 X_LOCK (reqlock);
690 --nreqs;
691 X_UNLOCK (reqlock);
692
693 if (ecb_expect_false (req->type == EIO_GROUP && req->size))
694 {
695 req->int1 = 1; /* mark request as delayed */
696 continue;
697 }
698 else
699 {
700 int res = ETP_FINISH (req);
701 if (ecb_expect_false (res))
702 return res;
703 }
704
705 if (ecb_expect_false (maxreqs && !--maxreqs))
706 break;
707
708 if (maxtime)
709 {
710 gettimeofday (&tv_now, 0);
711
712 if (tvdiff (&tv_start, &tv_now) >= maxtime)
713 break;
714 }
715 }
716
717 errno = EAGAIN;
718 return -1;
719}
720
721static void
722etp_cancel (ETP_REQ *req)
723{
724 req->cancelled = 1;
725
726 eio_grp_cancel (req);
727}
728
729static void
730etp_submit (ETP_REQ *req)
731{
732 req->pri -= ETP_PRI_MIN;
733
734 if (ecb_expect_false (req->pri < ETP_PRI_MIN - ETP_PRI_MIN)) req->pri = ETP_PRI_MIN - ETP_PRI_MIN;
735 if (ecb_expect_false (req->pri > ETP_PRI_MAX - ETP_PRI_MIN)) req->pri = ETP_PRI_MAX - ETP_PRI_MIN;
736
737 if (ecb_expect_false (req->type == EIO_GROUP))
738 {
739 /* I hope this is worth it :/ */
740 X_LOCK (reqlock);
741 ++nreqs;
742 X_UNLOCK (reqlock);
743
744 X_LOCK (reslock);
745
746 ++npending;
747
748 if (!reqq_push (&res_queue, req) && want_poll_cb)
749 want_poll_cb ();
750
751 X_UNLOCK (reslock);
752 }
753 else
754 {
755 X_LOCK (reqlock);
756 ++nreqs;
757 ++nready;
758 reqq_push (&req_queue, req);
759 X_COND_SIGNAL (reqwait);
760 X_UNLOCK (reqlock);
761
762 etp_maybe_start_thread ();
763 }
764}
765
766static void ecb_cold
767etp_set_max_poll_time (double nseconds)
768{
769 if (WORDACCESS_UNSAFE) X_LOCK (reslock);
770 max_poll_time = nseconds * EIO_TICKS;
771 if (WORDACCESS_UNSAFE) X_UNLOCK (reslock);
772}
773
774static void ecb_cold
775etp_set_max_poll_reqs (unsigned int maxreqs)
776{
777 if (WORDACCESS_UNSAFE) X_LOCK (reslock);
778 max_poll_reqs = maxreqs;
779 if (WORDACCESS_UNSAFE) X_UNLOCK (reslock);
780}
781
782static void ecb_cold
783etp_set_max_idle (unsigned int nthreads)
784{
785 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
786 max_idle = nthreads;
787 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
788}
789
790static void ecb_cold
791etp_set_idle_timeout (unsigned int seconds)
792{
793 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
794 idle_timeout = seconds;
795 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
796}
797
798static void ecb_cold
799etp_set_min_parallel (unsigned int nthreads)
800{
801 if (wanted < nthreads)
802 wanted = nthreads;
803}
804
805static void ecb_cold
806etp_set_max_parallel (unsigned int nthreads)
807{
808 if (wanted > nthreads)
809 wanted = nthreads;
810
811 while (started > wanted)
812 etp_end_thread ();
813}
814 390
815/*****************************************************************************/ 391/*****************************************************************************/
816 392
817static void 393static void
818grp_try_feed (eio_req *grp) 394grp_try_feed (eio_req *grp)
819{ 395{
820 while (grp->size < grp->int2 && !EIO_CANCELLED (grp)) 396 while (grp->size < grp->int2 && !EIO_CANCELLED (grp))
821 { 397 {
822 grp->flags &= ~EIO_FLAG_GROUPADD; 398 grp->flags &= ~ETP_FLAG_GROUPADD;
823 399
824 EIO_FEED (grp); 400 EIO_FEED (grp);
825 401
826 /* stop if no progress has been made */ 402 /* stop if no progress has been made */
827 if (!(grp->flags & EIO_FLAG_GROUPADD)) 403 if (!(grp->flags & ETP_FLAG_GROUPADD))
828 { 404 {
829 grp->feed = 0; 405 grp->feed = 0;
830 break; 406 break;
831 } 407 }
832 } 408 }
839 415
840 /* call feeder, if applicable */ 416 /* call feeder, if applicable */
841 grp_try_feed (grp); 417 grp_try_feed (grp);
842 418
843 /* finish, if done */ 419 /* finish, if done */
844 if (!grp->size && grp->int1) 420 if (!grp->size && grp->flags & ETP_FLAG_DELAYED)
845 return eio_finish (grp); 421 return eio_finish (grp);
846 else 422 else
847 return 0; 423 return 0;
848} 424}
849 425
885} 461}
886 462
887void 463void
888eio_grp_cancel (eio_req *grp) 464eio_grp_cancel (eio_req *grp)
889{ 465{
890 for (grp = grp->grp_first; grp; grp = grp->grp_next) 466 etp_grp_cancel (EIO_POOL, grp);
891 eio_cancel (grp);
892} 467}
893 468
894void 469void
895eio_cancel (eio_req *req) 470eio_cancel (eio_req *req)
896{ 471{
897 etp_cancel (req); 472 etp_cancel (EIO_POOL, req);
898} 473}
899 474
900void 475void
901eio_submit (eio_req *req) 476eio_submit (eio_req *req)
902{ 477{
903 etp_submit (req); 478 etp_submit (EIO_POOL, req);
904} 479}
905 480
906unsigned int 481unsigned int
907eio_nreqs (void) 482eio_nreqs (void)
908{ 483{
909 return etp_nreqs (); 484 return etp_nreqs (EIO_POOL);
910} 485}
911 486
912unsigned int 487unsigned int
913eio_nready (void) 488eio_nready (void)
914{ 489{
915 return etp_nready (); 490 return etp_nready (EIO_POOL);
916} 491}
917 492
918unsigned int 493unsigned int
919eio_npending (void) 494eio_npending (void)
920{ 495{
921 return etp_npending (); 496 return etp_npending (EIO_POOL);
922} 497}
923 498
924unsigned int ecb_cold 499unsigned int ecb_cold
925eio_nthreads (void) 500eio_nthreads (void)
926{ 501{
927 return etp_nthreads (); 502 return etp_nthreads (EIO_POOL);
928} 503}
929 504
930void ecb_cold 505void ecb_cold
931eio_set_max_poll_time (double nseconds) 506eio_set_max_poll_time (double nseconds)
932{ 507{
933 etp_set_max_poll_time (nseconds); 508 etp_set_max_poll_time (EIO_POOL, nseconds);
934} 509}
935 510
936void ecb_cold 511void ecb_cold
937eio_set_max_poll_reqs (unsigned int maxreqs) 512eio_set_max_poll_reqs (unsigned int maxreqs)
938{ 513{
939 etp_set_max_poll_reqs (maxreqs); 514 etp_set_max_poll_reqs (EIO_POOL, maxreqs);
940} 515}
941 516
942void ecb_cold 517void ecb_cold
943eio_set_max_idle (unsigned int nthreads) 518eio_set_max_idle (unsigned int nthreads)
944{ 519{
945 etp_set_max_idle (nthreads); 520 etp_set_max_idle (EIO_POOL, nthreads);
946} 521}
947 522
948void ecb_cold 523void ecb_cold
949eio_set_idle_timeout (unsigned int seconds) 524eio_set_idle_timeout (unsigned int seconds)
950{ 525{
951 etp_set_idle_timeout (seconds); 526 etp_set_idle_timeout (EIO_POOL, seconds);
952} 527}
953 528
954void ecb_cold 529void ecb_cold
955eio_set_min_parallel (unsigned int nthreads) 530eio_set_min_parallel (unsigned int nthreads)
956{ 531{
957 etp_set_min_parallel (nthreads); 532 etp_set_min_parallel (EIO_POOL, nthreads);
958} 533}
959 534
960void ecb_cold 535void ecb_cold
961eio_set_max_parallel (unsigned int nthreads) 536eio_set_max_parallel (unsigned int nthreads)
962{ 537{
963 etp_set_max_parallel (nthreads); 538 etp_set_max_parallel (EIO_POOL, nthreads);
964} 539}
965 540
966int eio_poll (void) 541int eio_poll (void)
967{ 542{
968 return etp_poll (); 543 return etp_poll (EIO_POOL);
969} 544}
970 545
971/*****************************************************************************/ 546/*****************************************************************************/
972/* work around various missing functions */ 547/* work around various missing functions */
973 548
1383 req->result = req->offs == (off_t)-1 ? -1 : 0; 958 req->result = req->offs == (off_t)-1 ? -1 : 0;
1384} 959}
1385 960
1386/* result will always end up in tmpbuf, there is always space for adding a 0-byte */ 961/* result will always end up in tmpbuf, there is always space for adding a 0-byte */
1387static int 962static int
1388eio__realpath (struct tmpbuf *tmpbuf, eio_wd wd, const char *path) 963eio__realpath (struct etp_tmpbuf *tmpbuf, eio_wd wd, const char *path)
1389{ 964{
965 char *res;
1390 const char *rel = path; 966 const char *rel = path;
1391 char *res;
1392 char *tmp1, *tmp2; 967 char *tmp1, *tmp2;
1393#if SYMLOOP_MAX > 32 968#if SYMLOOP_MAX > 32
1394 int symlinks = SYMLOOP_MAX; 969 int symlinks = SYMLOOP_MAX;
1395#else 970#else
1396 int symlinks = 32; 971 int symlinks = 32;
1402 977
1403 errno = ENOENT; 978 errno = ENOENT;
1404 if (!*rel) 979 if (!*rel)
1405 return -1; 980 return -1;
1406 981
1407 res = tmpbuf_get (tmpbuf, PATH_MAX * 3); 982 res = etp_tmpbuf_get (tmpbuf, PATH_MAX * 3);
983#ifdef _WIN32
984 if (_access (rel, 4) != 0)
985 return -1;
986
987 symlinks = GetFullPathName (rel, PATH_MAX * 3, res, 0);
988
989 errno = ENAMETOOLONG;
990 if (symlinks >= PATH_MAX * 3)
991 return -1;
992
993 errno = EIO;
994 if (symlinks <= 0)
995 return -1;
996
997 return symlinks;
998
999#else
1408 tmp1 = res + PATH_MAX; 1000 tmp1 = res + PATH_MAX;
1409 tmp2 = tmp1 + PATH_MAX; 1001 tmp2 = tmp1 + PATH_MAX;
1410 1002
1411#if 0 /* disabled, the musl way to do things is just too racy */ 1003#if 0 /* disabled, the musl way to do things is just too racy */
1412#if __linux && defined(O_NONBLOCK) && defined(O_NOATIME) 1004#if __linux && defined(O_NONBLOCK) && defined(O_NOATIME)
1416 1008
1417 if (fd >= 0) 1009 if (fd >= 0)
1418 { 1010 {
1419 sprintf (tmp1, "/proc/self/fd/%d", fd); 1011 sprintf (tmp1, "/proc/self/fd/%d", fd);
1420 req->result = readlink (tmp1, res, PATH_MAX); 1012 req->result = readlink (tmp1, res, PATH_MAX);
1013 /* here we should probably stat the open file and the disk file, to make sure they still match */
1421 close (fd); 1014 close (fd);
1422
1423 /* here we should probably stat the open file and the disk file, to make sure they still match */
1424 1015
1425 if (req->result > 0) 1016 if (req->result > 0)
1426 goto done; 1017 goto done;
1427 } 1018 }
1428 else if (errno == ELOOP || errno == ENAMETOOLONG || errno == ENOENT || errno == ENOTDIR || errno == EIO) 1019 else if (errno == ELOOP || errno == ENAMETOOLONG || errno == ENOENT || errno == ENOTDIR || errno == EIO)
1429 return; 1020 return -1;
1430 } 1021 }
1431#endif 1022#endif
1432#endif 1023#endif
1433 1024
1434 if (*rel != '/') 1025 if (*rel != '/')
1536 /* special case for the lone root path */ 1127 /* special case for the lone root path */
1537 if (res == tmpbuf->ptr) 1128 if (res == tmpbuf->ptr)
1538 *res++ = '/'; 1129 *res++ = '/';
1539 1130
1540 return res - (char *)tmpbuf->ptr; 1131 return res - (char *)tmpbuf->ptr;
1132#endif
1541} 1133}
1542 1134
1543static signed char 1135static signed char
1544eio_dent_cmp (const eio_dirent *a, const eio_dirent *b) 1136eio_dent_cmp (const eio_dirent *a, const eio_dirent *b)
1545{ 1137{
2004#if !HAVE_AT 1596#if !HAVE_AT
2005 1597
2006/* a bit like realpath, but usually faster because it doesn'T have to return */ 1598/* a bit like realpath, but usually faster because it doesn'T have to return */
2007/* an absolute or canonical path */ 1599/* an absolute or canonical path */
2008static const char * 1600static const char *
2009wd_expand (struct tmpbuf *tmpbuf, eio_wd wd, const char *path) 1601wd_expand (struct etp_tmpbuf *tmpbuf, eio_wd wd, const char *path)
2010{ 1602{
2011 if (!wd || *path == '/') 1603 if (!wd || *path == '/')
2012 return path; 1604 return path;
2013 1605
2014 if (path [0] == '.' && !path [1]) 1606 if (path [0] == '.' && !path [1])
2016 1608
2017 { 1609 {
2018 int l1 = wd->len; 1610 int l1 = wd->len;
2019 int l2 = strlen (path); 1611 int l2 = strlen (path);
2020 1612
2021 char *res = tmpbuf_get (tmpbuf, l1 + l2 + 2); 1613 char *res = etp_tmpbuf_get (tmpbuf, l1 + l2 + 2);
2022 1614
2023 memcpy (res, wd->str, l1); 1615 memcpy (res, wd->str, l1);
2024 res [l1] = '/'; 1616 res [l1] = '/';
2025 memcpy (res + l1 + 1, path, l2 + 1); 1617 memcpy (res + l1 + 1, path, l2 + 1);
2026 1618
2029} 1621}
2030 1622
2031#endif 1623#endif
2032 1624
2033static eio_wd 1625static eio_wd
2034eio__wd_open_sync (struct tmpbuf *tmpbuf, eio_wd wd, const char *path) 1626eio__wd_open_sync (struct etp_tmpbuf *tmpbuf, eio_wd wd, const char *path)
2035{ 1627{
2036 int fd; 1628 int fd;
2037 eio_wd res; 1629 eio_wd res;
2038 int len = eio__realpath (tmpbuf, wd, path); 1630 int len = eio__realpath (tmpbuf, wd, path);
2039 1631
2061} 1653}
2062 1654
2063eio_wd 1655eio_wd
2064eio_wd_open_sync (eio_wd wd, const char *path) 1656eio_wd_open_sync (eio_wd wd, const char *path)
2065{ 1657{
2066 struct tmpbuf tmpbuf = { 0 }; 1658 struct etp_tmpbuf tmpbuf = { };
2067 wd = eio__wd_open_sync (&tmpbuf, wd, path); 1659 wd = eio__wd_open_sync (&tmpbuf, wd, path);
2068 free (tmpbuf.ptr); 1660 free (tmpbuf.ptr);
2069 1661
2070 return wd; 1662 return wd;
2071} 1663}
2120/*****************************************************************************/ 1712/*****************************************************************************/
2121 1713
2122#define ALLOC(len) \ 1714#define ALLOC(len) \
2123 if (!req->ptr2) \ 1715 if (!req->ptr2) \
2124 { \ 1716 { \
2125 X_LOCK (wrklock); \ 1717 X_LOCK (EIO_POOL->wrklock); \
2126 req->flags |= EIO_FLAG_PTR2_FREE; \ 1718 req->flags |= EIO_FLAG_PTR2_FREE; \
2127 X_UNLOCK (wrklock); \ 1719 X_UNLOCK (EIO_POOL->wrklock); \
2128 req->ptr2 = malloc (len); \ 1720 req->ptr2 = malloc (len); \
2129 if (!req->ptr2) \ 1721 if (!req->ptr2) \
2130 { \ 1722 { \
2131 errno = ENOMEM; \ 1723 errno = ENOMEM; \
2132 req->result = -1; \ 1724 req->result = -1; \
2133 break; \ 1725 break; \
2134 } \ 1726 } \
2135 } 1727 }
2136 1728
2137static void ecb_noinline ecb_cold
2138etp_proc_init (void)
2139{
2140#if HAVE_PRCTL_SET_NAME
2141 /* provide a more sensible "thread name" */
2142 char name[16 + 1];
2143 const int namelen = sizeof (name) - 1;
2144 int len;
2145
2146 prctl (PR_GET_NAME, (unsigned long)name, 0, 0, 0);
2147 name [namelen] = 0;
2148 len = strlen (name);
2149 strcpy (name + (len <= namelen - 4 ? len : namelen - 4), "/eio");
2150 prctl (PR_SET_NAME, (unsigned long)name, 0, 0, 0);
2151#endif
2152}
2153
2154X_THREAD_PROC (etp_proc)
2155{
2156 ETP_REQ *req;
2157 struct timespec ts;
2158 etp_worker *self = (etp_worker *)thr_arg;
2159
2160 etp_proc_init ();
2161
2162 /* try to distribute timeouts somewhat evenly */
2163 ts.tv_nsec = ((unsigned long)self & 1023UL) * (1000000000UL / 1024UL);
2164
2165 for (;;)
2166 {
2167 ts.tv_sec = 0;
2168
2169 X_LOCK (reqlock);
2170
2171 for (;;)
2172 {
2173 req = reqq_shift (&req_queue);
2174
2175 if (req)
2176 break;
2177
2178 if (ts.tv_sec == 1) /* no request, but timeout detected, let's quit */
2179 {
2180 X_UNLOCK (reqlock);
2181 X_LOCK (wrklock);
2182 --started;
2183 X_UNLOCK (wrklock);
2184 goto quit;
2185 }
2186
2187 ++idle;
2188
2189 if (idle <= max_idle)
2190 /* we are allowed to idle, so do so without any timeout */
2191 X_COND_WAIT (reqwait, reqlock);
2192 else
2193 {
2194 /* initialise timeout once */
2195 if (!ts.tv_sec)
2196 ts.tv_sec = time (0) + idle_timeout;
2197
2198 if (X_COND_TIMEDWAIT (reqwait, reqlock, ts) == ETIMEDOUT)
2199 ts.tv_sec = 1; /* assuming this is not a value computed above.,.. */
2200 }
2201
2202 --idle;
2203 }
2204
2205 --nready;
2206
2207 X_UNLOCK (reqlock);
2208
2209 if (req->type < 0)
2210 goto quit;
2211
2212 ETP_EXECUTE (self, req);
2213
2214 X_LOCK (reslock);
2215
2216 ++npending;
2217
2218 if (!reqq_push (&res_queue, req) && want_poll_cb)
2219 want_poll_cb ();
2220
2221 etp_worker_clear (self);
2222
2223 X_UNLOCK (reslock);
2224 }
2225
2226quit:
2227 free (req);
2228
2229 X_LOCK (wrklock);
2230 etp_worker_free (self);
2231 X_UNLOCK (wrklock);
2232
2233 return 0;
2234}
2235
2236/*****************************************************************************/ 1729/*****************************************************************************/
2237 1730
2238int ecb_cold 1731int ecb_cold
2239eio_init (void (*want_poll)(void), void (*done_poll)(void)) 1732eio_init (void (*want_poll)(void), void (*done_poll)(void))
2240{ 1733{
2241 return etp_init (want_poll, done_poll); 1734 eio_want_poll_cb = want_poll;
1735 eio_done_poll_cb = done_poll;
1736
1737 return etp_init (EIO_POOL, 0, 0, 0);
2242} 1738}
2243 1739
2244ecb_inline void 1740ecb_inline void
2245eio_api_destroy (eio_req *req) 1741eio_api_destroy (eio_req *req)
2246{ 1742{
2269 { \ 1765 { \
2270 eio_api_destroy (req); \ 1766 eio_api_destroy (req); \
2271 return 0; \ 1767 return 0; \
2272 } 1768 }
2273 1769
1770#define SINGLEDOT(ptr) (0[(char *)(ptr)] == '.' && !1[(char *)(ptr)])
1771
2274static void 1772static void
2275eio_execute (etp_worker *self, eio_req *req) 1773eio_execute (etp_worker *self, eio_req *req)
2276{ 1774{
2277#if HAVE_AT 1775#if HAVE_AT
2278 int dirfd; 1776 int dirfd;
2333 case EIO_CHMOD: req->result = fchmodat (dirfd, req->ptr1, (mode_t)req->int2, 0); break; 1831 case EIO_CHMOD: req->result = fchmodat (dirfd, req->ptr1, (mode_t)req->int2, 0); break;
2334 case EIO_TRUNCATE: req->result = eio__truncateat (dirfd, req->ptr1, req->offs); break; 1832 case EIO_TRUNCATE: req->result = eio__truncateat (dirfd, req->ptr1, req->offs); break;
2335 case EIO_OPEN: req->result = openat (dirfd, req->ptr1, req->int1, (mode_t)req->int2); break; 1833 case EIO_OPEN: req->result = openat (dirfd, req->ptr1, req->int1, (mode_t)req->int2); break;
2336 1834
2337 case EIO_UNLINK: req->result = unlinkat (dirfd, req->ptr1, 0); break; 1835 case EIO_UNLINK: req->result = unlinkat (dirfd, req->ptr1, 0); break;
2338 case EIO_RMDIR: req->result = unlinkat (dirfd, req->ptr1, AT_REMOVEDIR); break; 1836 case EIO_RMDIR: /* complications arise because "." cannot be removed, so we might have to expand */
1837 req->result = req->wd && SINGLEDOT (req->ptr1)
1838 ? rmdir (req->wd->str)
1839 : unlinkat (dirfd, req->ptr1, AT_REMOVEDIR); break;
2339 case EIO_MKDIR: req->result = mkdirat (dirfd, req->ptr1, (mode_t)req->int2); break; 1840 case EIO_MKDIR: req->result = mkdirat (dirfd, req->ptr1, (mode_t)req->int2); break;
2340 case EIO_RENAME: req->result = renameat (dirfd, req->ptr1, WD2FD ((eio_wd)req->int3), req->ptr2); break; 1841 case EIO_RENAME: /* complications arise because "." cannot be renamed, so we might have to expand */
1842 req->result = req->wd && SINGLEDOT (req->ptr1)
1843 ? rename (req->wd->str, req->ptr2)
1844 : renameat (dirfd, req->ptr1, WD2FD ((eio_wd)req->int3), req->ptr2); break;
2341 case EIO_LINK: req->result = linkat (dirfd, req->ptr1, WD2FD ((eio_wd)req->int3), req->ptr2, 0); break; 1845 case EIO_LINK: req->result = linkat (dirfd, req->ptr1, WD2FD ((eio_wd)req->int3), req->ptr2, 0); break;
2342 case EIO_SYMLINK: req->result = symlinkat (req->ptr1, dirfd, req->ptr2); break; 1846 case EIO_SYMLINK: req->result = symlinkat (req->ptr1, dirfd, req->ptr2); break;
2343 case EIO_MKNOD: req->result = mknodat (dirfd, req->ptr1, (mode_t)req->int2, (dev_t)req->offs); break; 1847 case EIO_MKNOD: req->result = mknodat (dirfd, req->ptr1, (mode_t)req->int2, (dev_t)req->offs); break;
2344 case EIO_READLINK: ALLOC (PATH_MAX); 1848 case EIO_READLINK: ALLOC (PATH_MAX);
2345 req->result = readlinkat (dirfd, req->ptr1, req->ptr2, PATH_MAX); break; 1849 req->result = readlinkat (dirfd, req->ptr1, req->ptr2, PATH_MAX); break;
2463 req->result = select (0, 0, 0, 0, &tv); 1967 req->result = select (0, 0, 0, 0, &tv);
2464 } 1968 }
2465#endif 1969#endif
2466 break; 1970 break;
2467 1971
1972#if 0
2468 case EIO_GROUP: 1973 case EIO_GROUP:
2469 abort (); /* handled in eio_request */ 1974 abort (); /* handled in eio_request */
1975#endif
2470 1976
2471 case EIO_NOP: 1977 case EIO_NOP:
2472 req->result = 0; 1978 req->result = 0;
2473 break; 1979 break;
2474 1980
2774void 2280void
2775eio_grp_add (eio_req *grp, eio_req *req) 2281eio_grp_add (eio_req *grp, eio_req *req)
2776{ 2282{
2777 assert (("cannot add requests to IO::AIO::GRP after the group finished", grp->int1 != 2)); 2283 assert (("cannot add requests to IO::AIO::GRP after the group finished", grp->int1 != 2));
2778 2284
2779 grp->flags |= EIO_FLAG_GROUPADD; 2285 grp->flags |= ETP_FLAG_GROUPADD;
2780 2286
2781 ++grp->size; 2287 ++grp->size;
2782 req->grp = grp; 2288 req->grp = grp;
2783 2289
2784 req->grp_prev = 0; 2290 req->grp_prev = 0;

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines