ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libeio/eio.c
(Generate patch)

Comparing libeio/eio.c (file contents):
Revision 1.110 by root, Tue Sep 27 12:36:19 2011 UTC vs.
Revision 1.139 by root, Thu Jun 25 18:14:19 2015 UTC

1/* 1/*
2 * libeio implementation 2 * libeio implementation
3 * 3 *
4 * Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann <libeio@schmorp.de> 4 * Copyright (c) 2007,2008,2009,2010,2011,2012,2013 Marc Alexander Lehmann <libeio@schmorp.de>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without modifica- 7 * Redistribution and use in source and binary forms, with or without modifica-
8 * tion, are permitted provided that the following conditions are met: 8 * tion, are permitted provided that the following conditions are met:
9 * 9 *
10 * 1. Redistributions of source code must retain the above copyright notice, 10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer. 11 * this list of conditions and the following disclaimer.
12 * 12 *
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution. 15 * documentation and/or other materials provided with the distribution.
16 * 16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
19 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 19 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
20 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 20 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
21 * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 21 * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
120 #define link(old,neu) (CreateHardLink (neu, old, 0) ? 0 : EIO_ERRNO (ENOENT, -1)) 120 #define link(old,neu) (CreateHardLink (neu, old, 0) ? 0 : EIO_ERRNO (ENOENT, -1))
121 121
122 #define chmod(path,mode) _chmod (path, mode) 122 #define chmod(path,mode) _chmod (path, mode)
123 #define dup(fd) _dup (fd) 123 #define dup(fd) _dup (fd)
124 #define dup2(fd1,fd2) _dup2 (fd1, fd2) 124 #define dup2(fd1,fd2) _dup2 (fd1, fd2)
125 #define pipe(fds) _pipe (fds, 4096, O_BINARY)
125 126
126 #define fchmod(fd,mode) EIO_ENOSYS () 127 #define fchmod(fd,mode) EIO_ENOSYS ()
127 #define chown(path,uid,gid) EIO_ENOSYS () 128 #define chown(path,uid,gid) EIO_ENOSYS ()
128 #define fchown(fd,uid,gid) EIO_ENOSYS () 129 #define fchown(fd,uid,gid) EIO_ENOSYS ()
129 #define truncate(path,offs) EIO_ENOSYS () /* far-miss: SetEndOfFile */ 130 #define truncate(path,offs) EIO_ENOSYS () /* far-miss: SetEndOfFile */
131 #define mknod(path,mode,dev) EIO_ENOSYS () 132 #define mknod(path,mode,dev) EIO_ENOSYS ()
132 #define sync() EIO_ENOSYS () 133 #define sync() EIO_ENOSYS ()
133 #define readlink(path,buf,s) EIO_ENOSYS () 134 #define readlink(path,buf,s) EIO_ENOSYS ()
134 #define statvfs(path,buf) EIO_ENOSYS () 135 #define statvfs(path,buf) EIO_ENOSYS ()
135 #define fstatvfs(fd,buf) EIO_ENOSYS () 136 #define fstatvfs(fd,buf) EIO_ENOSYS ()
137
138 #define pread(fd,buf,count,offset) eio__pread (fd, buf, count, offset)
139 #define pwrite(fd,buf,count,offset) eio__pwrite (fd, buf, count, offset)
140
141 #if __GNUC__
142 typedef long long eio_off_t; /* signed for compatibility to msvc */
143 #else
144 typedef __int64 eio_off_t; /* unsigned not supported by msvc */
145 #endif
146
147 static eio_ssize_t
148 eio__pread (int fd, void *buf, eio_ssize_t count, eio_off_t offset)
149 {
150 OVERLAPPED o = { 0 };
151 DWORD got;
152
153 o.Offset = offset;
154 o.OffsetHigh = offset >> 32;
155
156 return ReadFile ((HANDLE)EIO_FD_TO_WIN32_HANDLE (fd), buf, count, &got, &o)
157 ? got : -1;
158 }
159
160 static eio_ssize_t
161 eio__pwrite (int fd, void *buf, eio_ssize_t count, eio_off_t offset)
162 {
163 OVERLAPPED o = { 0 };
164 DWORD got;
165
166 o.Offset = offset;
167 o.OffsetHigh = offset >> 32;
168
169 return WriteFile ((HANDLE)EIO_FD_TO_WIN32_HANDLE (fd), buf, count, &got, &o)
170 ? got : -1;
171 }
136 172
137 /* rename() uses MoveFile, which fails to overwrite */ 173 /* rename() uses MoveFile, which fails to overwrite */
138 #define rename(old,neu) eio__rename (old, neu) 174 #define rename(old,neu) eio__rename (old, neu)
139 175
140 static int 176 static int
178 #endif 214 #endif
179 215
180 return EIO_ERRNO (ENOENT, -1); 216 return EIO_ERRNO (ENOENT, -1);
181 } 217 }
182 218
183 /* POSIX API only */ 219 /* POSIX API only, causing trouble for win32 apps */
184 #define CreateHardLink(neu,old,flags) 0 220 #define CreateHardLink(neu,old,flags) 0 /* not really creating hardlink, still using relative paths? */
185 #define CreateSymbolicLink(neu,old,flags) 0 221 #define CreateSymbolicLink(neu,old,flags) 0 /* vista+ only */
186 222
187 struct statvfs 223 struct statvfs
188 { 224 {
189 int dummy; 225 int dummy;
190 }; 226 };
196 232
197#else 233#else
198 234
199 #include <sys/time.h> 235 #include <sys/time.h>
200 #include <sys/select.h> 236 #include <sys/select.h>
201 #include <sys/statvfs.h>
202 #include <unistd.h> 237 #include <unistd.h>
203 #include <signal.h> 238 #include <signal.h>
204 #include <dirent.h> 239 #include <dirent.h>
205 240
241 #ifdef ANDROID
242 #include <sys/vfs.h>
243 #define statvfs statfs
244 #define fstatvfs fstatfs
245 #include <asm/page.h> /* supposedly limits.h does #define PAGESIZE PAGESIZE */
246 #else
247 #include <sys/statvfs.h>
248 #endif
249
206 #if _POSIX_MEMLOCK || _POSIX_MEMLOCK_RANGE || _POSIX_MAPPED_FILES 250 #if _POSIX_MEMLOCK || _POSIX_MEMLOCK_RANGE || _POSIX_MAPPED_FILES
207 #include <sys/mman.h> 251 #include <sys/mman.h>
208 #endif 252 #endif
209 253
210 #define D_NAME(entp) entp->d_name 254 #define D_NAME(entp) entp->d_name
211 255
212 /* POSIX_SOURCE is useless on bsd's, and XOPEN_SOURCE is unreliable there, too */ 256 /* POSIX_SOURCE is useless on bsd's, and XOPEN_SOURCE is unreliable there, too */
213 #if __FreeBSD__ || defined __NetBSD__ || defined __OpenBSD__ 257 #if __FreeBSD__ || __NetBSD__ || __OpenBSD__
214 #define _DIRENT_HAVE_D_TYPE /* sigh */ 258 #define _DIRENT_HAVE_D_TYPE /* sigh */
215 #define D_INO(de) (de)->d_fileno 259 #define D_INO(de) (de)->d_fileno
216 #define D_NAMLEN(de) (de)->d_namlen 260 #define D_NAMLEN(de) (de)->d_namlen
217 #elif __linux || defined d_ino || _XOPEN_SOURCE >= 600 261 #elif __linux || defined d_ino || _XOPEN_SOURCE >= 600
218 #define D_INO(de) (de)->d_ino 262 #define D_INO(de) (de)->d_ino
281#endif 325#endif
282 326
283/* buffer size for various temporary buffers */ 327/* buffer size for various temporary buffers */
284#define EIO_BUFSIZE 65536 328#define EIO_BUFSIZE 65536
285 329
286#define dBUF \ 330#define dBUF \
287 char *eio_buf = malloc (EIO_BUFSIZE); \ 331 char *eio_buf = malloc (EIO_BUFSIZE); \
288 errno = ENOMEM; \ 332 errno = ENOMEM; \
289 if (!eio_buf) \ 333 if (!eio_buf) \
290 return -1 334 return -1
291 335
292#define FUBd \ 336#define FUBd \
293 free (eio_buf) 337 free (eio_buf)
294 338
295#define EIO_TICKS ((1000000 + 1023) >> 10)
296
297/*****************************************************************************/ 339/*****************************************************************************/
298 340
299struct tmpbuf
300{
301 void *ptr;
302 int len;
303};
304
305static void *
306tmpbuf_get (struct tmpbuf *buf, int len)
307{
308 if (buf->len < len)
309 {
310 free (buf->ptr);
311 buf->ptr = malloc (buf->len = len);
312 }
313
314 return buf->ptr;
315}
316
317struct tmpbuf; 341struct etp_tmpbuf;
318 342
319#if _POSIX_VERSION >= 200809L 343#if _POSIX_VERSION >= 200809L
320 #define HAVE_AT 1 344 #define HAVE_AT 1
321 #define WD2FD(wd) ((wd) ? (wd)->fd : AT_FDCWD) 345 #define WD2FD(wd) ((wd) ? (wd)->fd : AT_FDCWD)
322 #ifndef O_SEARCH 346 #ifndef O_SEARCH
323 #define O_SEARCH O_RDONLY 347 #define O_SEARCH O_RDONLY
324 #endif 348 #endif
325#else 349#else
326 #define HAVE_AT 0 350 #define HAVE_AT 0
327 static const char *wd_expand (struct tmpbuf *tmpbuf, eio_wd wd, const char *path); 351 static const char *wd_expand (struct etp_tmpbuf *tmpbuf, eio_wd wd, const char *path);
328#endif 352#endif
329 353
330struct eio_pwd 354struct eio_pwd
331{ 355{
332#if HAVE_AT 356#if HAVE_AT
339/*****************************************************************************/ 363/*****************************************************************************/
340 364
341#define ETP_PRI_MIN EIO_PRI_MIN 365#define ETP_PRI_MIN EIO_PRI_MIN
342#define ETP_PRI_MAX EIO_PRI_MAX 366#define ETP_PRI_MAX EIO_PRI_MAX
343 367
368#define ETP_TYPE_QUIT -1
369#define ETP_TYPE_GROUP EIO_GROUP
370
371static void eio_nop_callback (void) { }
372static void (*eio_want_poll_cb)(void) = eio_nop_callback;
373static void (*eio_done_poll_cb)(void) = eio_nop_callback;
374
375#define ETP_WANT_POLL(pool) eio_want_poll_cb ()
376#define ETP_DONE_POLL(pool) eio_done_poll_cb ()
377
344struct etp_worker; 378struct etp_worker;
345
346#define ETP_REQ eio_req 379#define ETP_REQ eio_req
347#define ETP_DESTROY(req) eio_destroy (req) 380#define ETP_DESTROY(req) eio_destroy (req)
348static int eio_finish (eio_req *req); 381static int eio_finish (eio_req *req);
349#define ETP_FINISH(req) eio_finish (req) 382#define ETP_FINISH(req) eio_finish (req)
350static void eio_execute (struct etp_worker *self, eio_req *req); 383static void eio_execute (struct etp_worker *self, eio_req *req);
351#define ETP_EXECUTE(wrk,req) eio_execute (wrk,req) 384#define ETP_EXECUTE(wrk,req) eio_execute (wrk, req)
352 385
353/*****************************************************************************/ 386#include "etp.c"
354 387
355#define ETP_NUM_PRI (ETP_PRI_MAX - ETP_PRI_MIN + 1) 388static struct etp_pool eio_pool;
356 389#define EIO_POOL (&eio_pool)
357/* calculate time difference in ~1/EIO_TICKS of a second */
358ecb_inline int
359tvdiff (struct timeval *tv1, struct timeval *tv2)
360{
361 return (tv2->tv_sec - tv1->tv_sec ) * EIO_TICKS
362 + ((tv2->tv_usec - tv1->tv_usec) >> 10);
363}
364
365static unsigned int started, idle, wanted = 4;
366
367static void (*want_poll_cb) (void);
368static void (*done_poll_cb) (void);
369
370static unsigned int max_poll_time; /* reslock */
371static unsigned int max_poll_reqs; /* reslock */
372
373static unsigned int nreqs; /* reqlock */
374static unsigned int nready; /* reqlock */
375static unsigned int npending; /* reqlock */
376static unsigned int max_idle = 4; /* maximum number of threads that can idle indefinitely */
377static unsigned int idle_timeout = 10; /* number of seconds after which an idle threads exit */
378
379static xmutex_t wrklock;
380static xmutex_t reslock;
381static xmutex_t reqlock;
382static xcond_t reqwait;
383
384#if !HAVE_PREADWRITE
385/*
386 * make our pread/pwrite emulation safe against themselves, but not against
387 * normal read/write by using a mutex. slows down execution a lot,
388 * but that's your problem, not mine.
389 */
390static xmutex_t preadwritelock;
391#endif
392
393typedef struct etp_worker
394{
395 struct tmpbuf tmpbuf;
396
397 /* locked by wrklock */
398 struct etp_worker *prev, *next;
399
400 xthread_t tid;
401
402#ifdef ETP_WORKER_COMMON
403 ETP_WORKER_COMMON
404#endif
405} etp_worker;
406
407static etp_worker wrk_first; /* NOT etp */
408
409#define ETP_WORKER_LOCK(wrk) X_LOCK (wrklock)
410#define ETP_WORKER_UNLOCK(wrk) X_UNLOCK (wrklock)
411
412/* worker threads management */
413
414static void
415etp_worker_clear (etp_worker *wrk)
416{
417}
418
419static void ecb_cold
420etp_worker_free (etp_worker *wrk)
421{
422 free (wrk->tmpbuf.ptr);
423
424 wrk->next->prev = wrk->prev;
425 wrk->prev->next = wrk->next;
426
427 free (wrk);
428}
429
430static unsigned int
431etp_nreqs (void)
432{
433 int retval;
434 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
435 retval = nreqs;
436 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
437 return retval;
438}
439
440static unsigned int
441etp_nready (void)
442{
443 unsigned int retval;
444
445 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
446 retval = nready;
447 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
448
449 return retval;
450}
451
452static unsigned int
453etp_npending (void)
454{
455 unsigned int retval;
456
457 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
458 retval = npending;
459 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
460
461 return retval;
462}
463
464static unsigned int
465etp_nthreads (void)
466{
467 unsigned int retval;
468
469 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
470 retval = started;
471 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
472
473 return retval;
474}
475
476/*
477 * a somewhat faster data structure might be nice, but
478 * with 8 priorities this actually needs <20 insns
479 * per shift, the most expensive operation.
480 */
481typedef struct {
482 ETP_REQ *qs[ETP_NUM_PRI], *qe[ETP_NUM_PRI]; /* qstart, qend */
483 int size;
484} etp_reqq;
485
486static etp_reqq req_queue;
487static etp_reqq res_queue;
488
489static void ecb_noinline ecb_cold
490reqq_init (etp_reqq *q)
491{
492 int pri;
493
494 for (pri = 0; pri < ETP_NUM_PRI; ++pri)
495 q->qs[pri] = q->qe[pri] = 0;
496
497 q->size = 0;
498}
499
500static int ecb_noinline
501reqq_push (etp_reqq *q, ETP_REQ *req)
502{
503 int pri = req->pri;
504 req->next = 0;
505
506 if (q->qe[pri])
507 {
508 q->qe[pri]->next = req;
509 q->qe[pri] = req;
510 }
511 else
512 q->qe[pri] = q->qs[pri] = req;
513
514 return q->size++;
515}
516
517static ETP_REQ * ecb_noinline
518reqq_shift (etp_reqq *q)
519{
520 int pri;
521
522 if (!q->size)
523 return 0;
524
525 --q->size;
526
527 for (pri = ETP_NUM_PRI; pri--; )
528 {
529 eio_req *req = q->qs[pri];
530
531 if (req)
532 {
533 if (!(q->qs[pri] = (eio_req *)req->next))
534 q->qe[pri] = 0;
535
536 return req;
537 }
538 }
539
540 abort ();
541}
542
543static int ecb_cold
544etp_init (void (*want_poll)(void), void (*done_poll)(void))
545{
546 X_MUTEX_CREATE (wrklock);
547 X_MUTEX_CREATE (reslock);
548 X_MUTEX_CREATE (reqlock);
549 X_COND_CREATE (reqwait);
550
551 reqq_init (&req_queue);
552 reqq_init (&res_queue);
553
554 wrk_first.next =
555 wrk_first.prev = &wrk_first;
556
557 started = 0;
558 idle = 0;
559 nreqs = 0;
560 nready = 0;
561 npending = 0;
562
563 want_poll_cb = want_poll;
564 done_poll_cb = done_poll;
565
566 return 0;
567}
568
569X_THREAD_PROC (etp_proc);
570
571static void ecb_cold
572etp_start_thread (void)
573{
574 etp_worker *wrk = calloc (1, sizeof (etp_worker));
575
576 /*TODO*/
577 assert (("unable to allocate worker thread data", wrk));
578
579 X_LOCK (wrklock);
580
581 if (thread_create (&wrk->tid, etp_proc, (void *)wrk))
582 {
583 wrk->prev = &wrk_first;
584 wrk->next = wrk_first.next;
585 wrk_first.next->prev = wrk;
586 wrk_first.next = wrk;
587 ++started;
588 }
589 else
590 free (wrk);
591
592 X_UNLOCK (wrklock);
593}
594
595static void
596etp_maybe_start_thread (void)
597{
598 if (ecb_expect_true (etp_nthreads () >= wanted))
599 return;
600
601 /* todo: maybe use idle here, but might be less exact */
602 if (ecb_expect_true (0 <= (int)etp_nthreads () + (int)etp_npending () - (int)etp_nreqs ()))
603 return;
604
605 etp_start_thread ();
606}
607
608static void ecb_cold
609etp_end_thread (void)
610{
611 eio_req *req = calloc (1, sizeof (eio_req)); /* will be freed by worker */
612
613 req->type = -1;
614 req->pri = ETP_PRI_MAX - ETP_PRI_MIN;
615
616 X_LOCK (reqlock);
617 reqq_push (&req_queue, req);
618 X_COND_SIGNAL (reqwait);
619 X_UNLOCK (reqlock);
620
621 X_LOCK (wrklock);
622 --started;
623 X_UNLOCK (wrklock);
624}
625
626static int
627etp_poll (void)
628{
629 unsigned int maxreqs;
630 unsigned int maxtime;
631 struct timeval tv_start, tv_now;
632
633 X_LOCK (reslock);
634 maxreqs = max_poll_reqs;
635 maxtime = max_poll_time;
636 X_UNLOCK (reslock);
637
638 if (maxtime)
639 gettimeofday (&tv_start, 0);
640
641 for (;;)
642 {
643 ETP_REQ *req;
644
645 etp_maybe_start_thread ();
646
647 X_LOCK (reslock);
648 req = reqq_shift (&res_queue);
649
650 if (req)
651 {
652 --npending;
653
654 if (!res_queue.size && done_poll_cb)
655 done_poll_cb ();
656 }
657
658 X_UNLOCK (reslock);
659
660 if (!req)
661 return 0;
662
663 X_LOCK (reqlock);
664 --nreqs;
665 X_UNLOCK (reqlock);
666
667 if (ecb_expect_false (req->type == EIO_GROUP && req->size))
668 {
669 req->int1 = 1; /* mark request as delayed */
670 continue;
671 }
672 else
673 {
674 int res = ETP_FINISH (req);
675 if (ecb_expect_false (res))
676 return res;
677 }
678
679 if (ecb_expect_false (maxreqs && !--maxreqs))
680 break;
681
682 if (maxtime)
683 {
684 gettimeofday (&tv_now, 0);
685
686 if (tvdiff (&tv_start, &tv_now) >= maxtime)
687 break;
688 }
689 }
690
691 errno = EAGAIN;
692 return -1;
693}
694
695static void
696etp_cancel (ETP_REQ *req)
697{
698 req->cancelled = 1;
699
700 eio_grp_cancel (req);
701}
702
703static void
704etp_submit (ETP_REQ *req)
705{
706 req->pri -= ETP_PRI_MIN;
707
708 if (ecb_expect_false (req->pri < ETP_PRI_MIN - ETP_PRI_MIN)) req->pri = ETP_PRI_MIN - ETP_PRI_MIN;
709 if (ecb_expect_false (req->pri > ETP_PRI_MAX - ETP_PRI_MIN)) req->pri = ETP_PRI_MAX - ETP_PRI_MIN;
710
711 if (ecb_expect_false (req->type == EIO_GROUP))
712 {
713 /* I hope this is worth it :/ */
714 X_LOCK (reqlock);
715 ++nreqs;
716 X_UNLOCK (reqlock);
717
718 X_LOCK (reslock);
719
720 ++npending;
721
722 if (!reqq_push (&res_queue, req) && want_poll_cb)
723 want_poll_cb ();
724
725 X_UNLOCK (reslock);
726 }
727 else
728 {
729 X_LOCK (reqlock);
730 ++nreqs;
731 ++nready;
732 reqq_push (&req_queue, req);
733 X_COND_SIGNAL (reqwait);
734 X_UNLOCK (reqlock);
735
736 etp_maybe_start_thread ();
737 }
738}
739
740static void ecb_cold
741etp_set_max_poll_time (double nseconds)
742{
743 if (WORDACCESS_UNSAFE) X_LOCK (reslock);
744 max_poll_time = nseconds * EIO_TICKS;
745 if (WORDACCESS_UNSAFE) X_UNLOCK (reslock);
746}
747
748static void ecb_cold
749etp_set_max_poll_reqs (unsigned int maxreqs)
750{
751 if (WORDACCESS_UNSAFE) X_LOCK (reslock);
752 max_poll_reqs = maxreqs;
753 if (WORDACCESS_UNSAFE) X_UNLOCK (reslock);
754}
755
756static void ecb_cold
757etp_set_max_idle (unsigned int nthreads)
758{
759 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
760 max_idle = nthreads;
761 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
762}
763
764static void ecb_cold
765etp_set_idle_timeout (unsigned int seconds)
766{
767 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
768 idle_timeout = seconds;
769 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
770}
771
772static void ecb_cold
773etp_set_min_parallel (unsigned int nthreads)
774{
775 if (wanted < nthreads)
776 wanted = nthreads;
777}
778
779static void ecb_cold
780etp_set_max_parallel (unsigned int nthreads)
781{
782 if (wanted > nthreads)
783 wanted = nthreads;
784
785 while (started > wanted)
786 etp_end_thread ();
787}
788 390
789/*****************************************************************************/ 391/*****************************************************************************/
790 392
791static void 393static void
792grp_try_feed (eio_req *grp) 394grp_try_feed (eio_req *grp)
793{ 395{
794 while (grp->size < grp->int2 && !EIO_CANCELLED (grp)) 396 while (grp->size < grp->int2 && !EIO_CANCELLED (grp))
795 { 397 {
796 grp->flags &= ~EIO_FLAG_GROUPADD; 398 grp->flags &= ~ETP_FLAG_GROUPADD;
797 399
798 EIO_FEED (grp); 400 EIO_FEED (grp);
799 401
800 /* stop if no progress has been made */ 402 /* stop if no progress has been made */
801 if (!(grp->flags & EIO_FLAG_GROUPADD)) 403 if (!(grp->flags & ETP_FLAG_GROUPADD))
802 { 404 {
803 grp->feed = 0; 405 grp->feed = 0;
804 break; 406 break;
805 } 407 }
806 } 408 }
813 415
814 /* call feeder, if applicable */ 416 /* call feeder, if applicable */
815 grp_try_feed (grp); 417 grp_try_feed (grp);
816 418
817 /* finish, if done */ 419 /* finish, if done */
818 if (!grp->size && grp->int1) 420 if (!grp->size && grp->flags & ETP_FLAG_DELAYED)
819 return eio_finish (grp); 421 return eio_finish (grp);
820 else 422 else
821 return 0; 423 return 0;
822} 424}
823 425
859} 461}
860 462
861void 463void
862eio_grp_cancel (eio_req *grp) 464eio_grp_cancel (eio_req *grp)
863{ 465{
864 for (grp = grp->grp_first; grp; grp = grp->grp_next) 466 etp_grp_cancel (EIO_POOL, grp);
865 eio_cancel (grp);
866} 467}
867 468
868void 469void
869eio_cancel (eio_req *req) 470eio_cancel (eio_req *req)
870{ 471{
871 etp_cancel (req); 472 etp_cancel (EIO_POOL, req);
872} 473}
873 474
874void 475void
875eio_submit (eio_req *req) 476eio_submit (eio_req *req)
876{ 477{
877 etp_submit (req); 478 etp_submit (EIO_POOL, req);
878} 479}
879 480
880unsigned int 481unsigned int
881eio_nreqs (void) 482eio_nreqs (void)
882{ 483{
883 return etp_nreqs (); 484 return etp_nreqs (EIO_POOL);
884} 485}
885 486
886unsigned int 487unsigned int
887eio_nready (void) 488eio_nready (void)
888{ 489{
889 return etp_nready (); 490 return etp_nready (EIO_POOL);
890} 491}
891 492
892unsigned int 493unsigned int
893eio_npending (void) 494eio_npending (void)
894{ 495{
895 return etp_npending (); 496 return etp_npending (EIO_POOL);
896} 497}
897 498
898unsigned int ecb_cold 499unsigned int ecb_cold
899eio_nthreads (void) 500eio_nthreads (void)
900{ 501{
901 return etp_nthreads (); 502 return etp_nthreads (EIO_POOL);
902} 503}
903 504
904void ecb_cold 505void ecb_cold
905eio_set_max_poll_time (double nseconds) 506eio_set_max_poll_time (double nseconds)
906{ 507{
907 etp_set_max_poll_time (nseconds); 508 etp_set_max_poll_time (EIO_POOL, nseconds);
908} 509}
909 510
910void ecb_cold 511void ecb_cold
911eio_set_max_poll_reqs (unsigned int maxreqs) 512eio_set_max_poll_reqs (unsigned int maxreqs)
912{ 513{
913 etp_set_max_poll_reqs (maxreqs); 514 etp_set_max_poll_reqs (EIO_POOL, maxreqs);
914} 515}
915 516
916void ecb_cold 517void ecb_cold
917eio_set_max_idle (unsigned int nthreads) 518eio_set_max_idle (unsigned int nthreads)
918{ 519{
919 etp_set_max_idle (nthreads); 520 etp_set_max_idle (EIO_POOL, nthreads);
920} 521}
921 522
922void ecb_cold 523void ecb_cold
923eio_set_idle_timeout (unsigned int seconds) 524eio_set_idle_timeout (unsigned int seconds)
924{ 525{
925 etp_set_idle_timeout (seconds); 526 etp_set_idle_timeout (EIO_POOL, seconds);
926} 527}
927 528
928void ecb_cold 529void ecb_cold
929eio_set_min_parallel (unsigned int nthreads) 530eio_set_min_parallel (unsigned int nthreads)
930{ 531{
931 etp_set_min_parallel (nthreads); 532 etp_set_min_parallel (EIO_POOL, nthreads);
932} 533}
933 534
934void ecb_cold 535void ecb_cold
935eio_set_max_parallel (unsigned int nthreads) 536eio_set_max_parallel (unsigned int nthreads)
936{ 537{
937 etp_set_max_parallel (nthreads); 538 etp_set_max_parallel (EIO_POOL, nthreads);
938} 539}
939 540
940int eio_poll (void) 541int eio_poll (void)
941{ 542{
942 return etp_poll (); 543 return etp_poll (EIO_POOL);
943} 544}
944 545
945/*****************************************************************************/ 546/*****************************************************************************/
946/* work around various missing functions */ 547/* work around various missing functions */
947
948#if !HAVE_PREADWRITE
949# undef pread
950# undef pwrite
951# define pread eio__pread
952# define pwrite eio__pwrite
953
954static eio_ssize_t
955eio__pread (int fd, void *buf, size_t count, off_t offset)
956{
957 eio_ssize_t res;
958 off_t ooffset;
959
960 X_LOCK (preadwritelock);
961 ooffset = lseek (fd, 0, SEEK_CUR);
962 lseek (fd, offset, SEEK_SET);
963 res = read (fd, buf, count);
964 lseek (fd, ooffset, SEEK_SET);
965 X_UNLOCK (preadwritelock);
966
967 return res;
968}
969
970static eio_ssize_t
971eio__pwrite (int fd, void *buf, size_t count, off_t offset)
972{
973 eio_ssize_t res;
974 off_t ooffset;
975
976 X_LOCK (preadwritelock);
977 ooffset = lseek (fd, 0, SEEK_CUR);
978 lseek (fd, offset, SEEK_SET);
979 res = write (fd, buf, count);
980 lseek (fd, ooffset, SEEK_SET);
981 X_UNLOCK (preadwritelock);
982
983 return res;
984}
985#endif
986 548
987#ifndef HAVE_UTIMES 549#ifndef HAVE_UTIMES
988 550
989# undef utimes 551# undef utimes
990# define utimes(path,times) eio__utimes (path, times) 552# define utimes(path,times) eio__utimes (path, times)
1032 int res; 594 int res;
1033 595
1034#if HAVE_SYS_SYNCFS 596#if HAVE_SYS_SYNCFS
1035 res = (int)syscall (__NR_syncfs, (int)(fd)); 597 res = (int)syscall (__NR_syncfs, (int)(fd));
1036#else 598#else
1037 res = -1; 599 res = EIO_ENOSYS ();
1038 errno = ENOSYS;
1039#endif 600#endif
1040 601
1041 if (res < 0 && errno == ENOSYS && fd >= 0) 602 if (res < 0 && errno == ENOSYS && fd >= 0)
1042 sync (); 603 sync ();
1043 604
1073} 634}
1074 635
1075static int 636static int
1076eio__fallocate (int fd, int mode, off_t offset, size_t len) 637eio__fallocate (int fd, int mode, off_t offset, size_t len)
1077{ 638{
1078#if HAVE_FALLOCATE 639#if HAVE_LINUX_FALLOCATE
1079 return fallocate (fd, mode, offset, len); 640 return fallocate (fd, mode, offset, len);
1080#else 641#else
1081 errno = ENOSYS; 642 return EIO_ENOSYS ();
1082 return -1;
1083#endif 643#endif
1084} 644}
1085 645
1086#if !HAVE_READAHEAD 646#if !HAVE_READAHEAD
1087# undef readahead 647# undef readahead
1102 todo -= len; 662 todo -= len;
1103 } 663 }
1104 664
1105 FUBd; 665 FUBd;
1106 666
1107 errno = 0; 667 /* linux's readahead basically only fails for EBADF or EINVAL (not mmappable) */
668 /* but not for e.g. EIO or eof, so we also never fail */
1108 return count; 669 return 0;
1109} 670}
1110 671
1111#endif 672#endif
1112 673
1113/* sendfile always needs emulation */ 674/* sendfile always needs emulation */
1148 709
1149 /* according to source inspection, this is correct, and useful behaviour */ 710 /* according to source inspection, this is correct, and useful behaviour */
1150 if (sbytes) 711 if (sbytes)
1151 res = sbytes; 712 res = sbytes;
1152 713
1153# elif defined (__APPLE__) 714# elif defined __APPLE__
1154 off_t sbytes = count; 715 off_t sbytes = count;
1155 res = sendfile (ifd, ofd, offset, &sbytes, 0, 0); 716 res = sendfile (ifd, ofd, offset, &sbytes, 0, 0);
1156 717
1157 /* according to the manpage, sbytes is always valid */ 718 /* according to the manpage, sbytes is always valid */
1158 if (sbytes) 719 if (sbytes)
1185 HANDLE h = TO_SOCKET (ifd); 746 HANDLE h = TO_SOCKET (ifd);
1186 SetFilePointer (h, offset, 0, FILE_BEGIN); 747 SetFilePointer (h, offset, 0, FILE_BEGIN);
1187 res = TransmitFile (TO_SOCKET (ofd), h, count, 0, 0, 0, 0); 748 res = TransmitFile (TO_SOCKET (ofd), h, count, 0, 0, 0, 0);
1188 749
1189#else 750#else
1190 res = -1; 751 res = EIO_ENOSYS ();
1191 errno = ENOSYS;
1192#endif 752#endif
1193 753
1194 /* we assume sendfile can copy at least 128mb in one go */ 754 /* we assume sendfile can copy at least 128mb in one go */
1195 if (res <= 128 * 1024 * 1024) 755 if (res <= 128 * 1024 * 1024)
1196 { 756 {
1382} 942}
1383 943
1384/*****************************************************************************/ 944/*****************************************************************************/
1385/* requests implemented outside eio_execute, because they are so large */ 945/* requests implemented outside eio_execute, because they are so large */
1386 946
947static void
948eio__lseek (eio_req *req)
949{
950 /* this usually gets optimised away completely, or your compiler sucks, */
951 /* or the whence constants really are not 0, 1, 2 */
952 int whence = req->int2 == EIO_SEEK_SET ? SEEK_SET
953 : req->int2 == EIO_SEEK_CUR ? SEEK_CUR
954 : req->int2 == EIO_SEEK_END ? SEEK_END
955 : req->int2;
956
957 req->offs = lseek (req->int1, req->offs, whence);
958 req->result = req->offs == (off_t)-1 ? -1 : 0;
959}
960
1387/* result will always end up in tmpbuf, there is always space for adding a 0-byte */ 961/* result will always end up in tmpbuf, there is always space for adding a 0-byte */
1388static int 962static int
1389eio__realpath (struct tmpbuf *tmpbuf, eio_wd wd, const char *path) 963eio__realpath (struct etp_tmpbuf *tmpbuf, eio_wd wd, const char *path)
1390{ 964{
965 char *res;
1391 const char *rel = path; 966 const char *rel = path;
1392 char *res;
1393 char *tmp1, *tmp2; 967 char *tmp1, *tmp2;
1394#if SYMLOOP_MAX > 32 968#if SYMLOOP_MAX > 32
1395 int symlinks = SYMLOOP_MAX; 969 int symlinks = SYMLOOP_MAX;
1396#else 970#else
1397 int symlinks = 32; 971 int symlinks = 32;
1403 977
1404 errno = ENOENT; 978 errno = ENOENT;
1405 if (!*rel) 979 if (!*rel)
1406 return -1; 980 return -1;
1407 981
1408 res = tmpbuf_get (tmpbuf, PATH_MAX * 3); 982 res = etp_tmpbuf_get (tmpbuf, PATH_MAX * 3);
983#ifdef _WIN32
984 if (_access (rel, 4) != 0)
985 return -1;
986
987 symlinks = GetFullPathName (rel, PATH_MAX * 3, res, 0);
988
989 errno = ENAMETOOLONG;
990 if (symlinks >= PATH_MAX * 3)
991 return -1;
992
993 errno = EIO;
994 if (symlinks <= 0)
995 return -1;
996
997 return symlinks;
998
999#else
1409 tmp1 = res + PATH_MAX; 1000 tmp1 = res + PATH_MAX;
1410 tmp2 = tmp1 + PATH_MAX; 1001 tmp2 = tmp1 + PATH_MAX;
1411 1002
1412#if 0 /* disabled, the musl way to do things is just too racy */ 1003#if 0 /* disabled, the musl way to do things is just too racy */
1413#if __linux && defined(O_NONBLOCK) && defined(O_NOATIME) 1004#if __linux && defined(O_NONBLOCK) && defined(O_NOATIME)
1417 1008
1418 if (fd >= 0) 1009 if (fd >= 0)
1419 { 1010 {
1420 sprintf (tmp1, "/proc/self/fd/%d", fd); 1011 sprintf (tmp1, "/proc/self/fd/%d", fd);
1421 req->result = readlink (tmp1, res, PATH_MAX); 1012 req->result = readlink (tmp1, res, PATH_MAX);
1013 /* here we should probably stat the open file and the disk file, to make sure they still match */
1422 close (fd); 1014 close (fd);
1423
1424 /* here we should probably stat the open file and the disk file, to make sure they still match */
1425 1015
1426 if (req->result > 0) 1016 if (req->result > 0)
1427 goto done; 1017 goto done;
1428 } 1018 }
1429 else if (errno == ELOOP || errno == ENAMETOOLONG || errno == ENOENT || errno == ENOTDIR || errno == EIO) 1019 else if (errno == ELOOP || errno == ENAMETOOLONG || errno == ENOENT || errno == ENOTDIR || errno == EIO)
1430 return; 1020 return -1;
1431 } 1021 }
1432#endif 1022#endif
1433#endif 1023#endif
1434 1024
1435 if (*rel != '/') 1025 if (*rel != '/')
1487 } 1077 }
1488 } 1078 }
1489 1079
1490 errno = ENAMETOOLONG; 1080 errno = ENAMETOOLONG;
1491 if (res + 1 + len + 1 >= tmp1) 1081 if (res + 1 + len + 1 >= tmp1)
1492 return; 1082 return -1;
1493 1083
1494 /* copy one component */ 1084 /* copy one component */
1495 *res = '/'; 1085 *res = '/';
1496 memcpy (res + 1, beg, len); 1086 memcpy (res + 1, beg, len);
1497 1087
1537 /* special case for the lone root path */ 1127 /* special case for the lone root path */
1538 if (res == tmpbuf->ptr) 1128 if (res == tmpbuf->ptr)
1539 *res++ = '/'; 1129 *res++ = '/';
1540 1130
1541 return res - (char *)tmpbuf->ptr; 1131 return res - (char *)tmpbuf->ptr;
1132#endif
1542} 1133}
1543 1134
1544static signed char 1135static signed char
1545eio_dent_cmp (const eio_dirent *a, const eio_dirent *b) 1136eio_dent_cmp (const eio_dirent *a, const eio_dirent *b)
1546{ 1137{
1919 #ifdef DT_FIFO 1510 #ifdef DT_FIFO
1920 case DT_FIFO: ent->type = EIO_DT_FIFO; break; 1511 case DT_FIFO: ent->type = EIO_DT_FIFO; break;
1921 #endif 1512 #endif
1922 #ifdef DT_CHR 1513 #ifdef DT_CHR
1923 case DT_CHR: ent->type = EIO_DT_CHR; break; 1514 case DT_CHR: ent->type = EIO_DT_CHR; break;
1924 #endif 1515 #endif
1925 #ifdef DT_MPC 1516 #ifdef DT_MPC
1926 case DT_MPC: ent->type = EIO_DT_MPC; break; 1517 case DT_MPC: ent->type = EIO_DT_MPC; break;
1927 #endif 1518 #endif
1928 #ifdef DT_DIR 1519 #ifdef DT_DIR
1929 case DT_DIR: ent->type = EIO_DT_DIR; break; 1520 case DT_DIR: ent->type = EIO_DT_DIR; break;
1930 #endif 1521 #endif
1931 #ifdef DT_NAM 1522 #ifdef DT_NAM
1932 case DT_NAM: ent->type = EIO_DT_NAM; break; 1523 case DT_NAM: ent->type = EIO_DT_NAM; break;
1933 #endif 1524 #endif
1934 #ifdef DT_BLK 1525 #ifdef DT_BLK
1935 case DT_BLK: ent->type = EIO_DT_BLK; break; 1526 case DT_BLK: ent->type = EIO_DT_BLK; break;
1936 #endif 1527 #endif
1937 #ifdef DT_MPB 1528 #ifdef DT_MPB
1938 case DT_MPB: ent->type = EIO_DT_MPB; break; 1529 case DT_MPB: ent->type = EIO_DT_MPB; break;
1939 #endif 1530 #endif
1940 #ifdef DT_REG 1531 #ifdef DT_REG
1941 case DT_REG: ent->type = EIO_DT_REG; break; 1532 case DT_REG: ent->type = EIO_DT_REG; break;
1942 #endif 1533 #endif
1943 #ifdef DT_NWK 1534 #ifdef DT_NWK
1944 case DT_NWK: ent->type = EIO_DT_NWK; break; 1535 case DT_NWK: ent->type = EIO_DT_NWK; break;
1945 #endif 1536 #endif
1946 #ifdef DT_CMP 1537 #ifdef DT_CMP
1947 case DT_CMP: ent->type = EIO_DT_CMP; break; 1538 case DT_CMP: ent->type = EIO_DT_CMP; break;
1948 #endif 1539 #endif
1949 #ifdef DT_LNK 1540 #ifdef DT_LNK
1950 case DT_LNK: ent->type = EIO_DT_LNK; break; 1541 case DT_LNK: ent->type = EIO_DT_LNK; break;
1951 #endif 1542 #endif
1952 #ifdef DT_SOCK 1543 #ifdef DT_SOCK
1953 case DT_SOCK: ent->type = EIO_DT_SOCK; break; 1544 case DT_SOCK: ent->type = EIO_DT_SOCK; break;
1966 { 1557 {
1967 if (ent->type == EIO_DT_UNKNOWN) 1558 if (ent->type == EIO_DT_UNKNOWN)
1968 { 1559 {
1969 if (*name == '.') /* leading dots are likely directories, and, in any case, rare */ 1560 if (*name == '.') /* leading dots are likely directories, and, in any case, rare */
1970 ent->score = 1; 1561 ent->score = 1;
1971 else if (!strchr (name, '.')) /* absense of dots indicate likely dirs */ 1562 else if (!strchr (name, '.')) /* absence of dots indicate likely dirs */
1972 ent->score = len <= 2 ? 4 - len : len <= 4 ? 4 : len <= 7 ? 5 : 6; /* shorter == more likely dir, but avoid too many classes */ 1563 ent->score = len <= 2 ? 4 - len : len <= 4 ? 4 : len <= 7 ? 5 : 6; /* shorter == more likely dir, but avoid too many classes */
1973 } 1564 }
1974 else if (ent->type == EIO_DT_DIR) 1565 else if (ent->type == EIO_DT_DIR)
1975 ent->score = 0; 1566 ent->score = 0;
1976 } 1567 }
2005#if !HAVE_AT 1596#if !HAVE_AT
2006 1597
2007/* a bit like realpath, but usually faster because it doesn'T have to return */ 1598/* a bit like realpath, but usually faster because it doesn'T have to return */
2008/* an absolute or canonical path */ 1599/* an absolute or canonical path */
2009static const char * 1600static const char *
2010wd_expand (struct tmpbuf *tmpbuf, eio_wd wd, const char *path) 1601wd_expand (struct etp_tmpbuf *tmpbuf, eio_wd wd, const char *path)
2011{ 1602{
2012 if (!wd || *path == '/') 1603 if (!wd || *path == '/')
2013 return path; 1604 return path;
2014 1605
2015 if (path [0] == '.' && !path [1]) 1606 if (path [0] == '.' && !path [1])
2017 1608
2018 { 1609 {
2019 int l1 = wd->len; 1610 int l1 = wd->len;
2020 int l2 = strlen (path); 1611 int l2 = strlen (path);
2021 1612
2022 char *res = tmpbuf_get (tmpbuf, l1 + l2 + 2); 1613 char *res = etp_tmpbuf_get (tmpbuf, l1 + l2 + 2);
2023 1614
2024 memcpy (res, wd->str, l1); 1615 memcpy (res, wd->str, l1);
2025 res [l1] = '/'; 1616 res [l1] = '/';
2026 memcpy (res + l1 + 1, path, l2 + 1); 1617 memcpy (res + l1 + 1, path, l2 + 1);
2027 1618
2030} 1621}
2031 1622
2032#endif 1623#endif
2033 1624
2034static eio_wd 1625static eio_wd
2035eio__wd_open_sync (struct tmpbuf *tmpbuf, eio_wd wd, const char *path) 1626eio__wd_open_sync (struct etp_tmpbuf *tmpbuf, eio_wd wd, const char *path)
2036{ 1627{
2037 int fd; 1628 int fd;
2038 eio_wd res; 1629 eio_wd res;
2039 int len = eio__realpath (tmpbuf, wd, path); 1630 int len = eio__realpath (tmpbuf, wd, path);
2040 1631
2062} 1653}
2063 1654
2064eio_wd 1655eio_wd
2065eio_wd_open_sync (eio_wd wd, const char *path) 1656eio_wd_open_sync (eio_wd wd, const char *path)
2066{ 1657{
2067 struct tmpbuf tmpbuf = { 0 }; 1658 struct etp_tmpbuf tmpbuf = { };
2068 wd = eio__wd_open_sync (&tmpbuf, wd, path); 1659 wd = eio__wd_open_sync (&tmpbuf, wd, path);
2069 free (tmpbuf.ptr); 1660 free (tmpbuf.ptr);
2070 1661
2071 return wd; 1662 return wd;
2072} 1663}
2121/*****************************************************************************/ 1712/*****************************************************************************/
2122 1713
2123#define ALLOC(len) \ 1714#define ALLOC(len) \
2124 if (!req->ptr2) \ 1715 if (!req->ptr2) \
2125 { \ 1716 { \
2126 X_LOCK (wrklock); \ 1717 X_LOCK (EIO_POOL->wrklock); \
2127 req->flags |= EIO_FLAG_PTR2_FREE; \ 1718 req->flags |= EIO_FLAG_PTR2_FREE; \
2128 X_UNLOCK (wrklock); \ 1719 X_UNLOCK (EIO_POOL->wrklock); \
2129 req->ptr2 = malloc (len); \ 1720 req->ptr2 = malloc (len); \
2130 if (!req->ptr2) \ 1721 if (!req->ptr2) \
2131 { \ 1722 { \
2132 errno = ENOMEM; \ 1723 errno = ENOMEM; \
2133 req->result = -1; \ 1724 req->result = -1; \
2134 break; \ 1725 break; \
2135 } \ 1726 } \
2136 } 1727 }
2137 1728
2138X_THREAD_PROC (etp_proc)
2139{
2140 ETP_REQ *req;
2141 struct timespec ts;
2142 etp_worker *self = (etp_worker *)thr_arg;
2143
2144#if HAVE_PRCTL_SET_NAME
2145 prctl (PR_SET_NAME, (unsigned long)"eio_thread", 0, 0, 0);
2146#endif
2147
2148 /* try to distribute timeouts somewhat evenly */
2149 ts.tv_nsec = ((unsigned long)self & 1023UL) * (1000000000UL / 1024UL);
2150
2151 for (;;)
2152 {
2153 ts.tv_sec = 0;
2154
2155 X_LOCK (reqlock);
2156
2157 for (;;)
2158 {
2159 req = reqq_shift (&req_queue);
2160
2161 if (req)
2162 break;
2163
2164 if (ts.tv_sec == 1) /* no request, but timeout detected, let's quit */
2165 {
2166 X_UNLOCK (reqlock);
2167 X_LOCK (wrklock);
2168 --started;
2169 X_UNLOCK (wrklock);
2170 goto quit;
2171 }
2172
2173 ++idle;
2174
2175 if (idle <= max_idle)
2176 /* we are allowed to idle, so do so without any timeout */
2177 X_COND_WAIT (reqwait, reqlock);
2178 else
2179 {
2180 /* initialise timeout once */
2181 if (!ts.tv_sec)
2182 ts.tv_sec = time (0) + idle_timeout;
2183
2184 if (X_COND_TIMEDWAIT (reqwait, reqlock, ts) == ETIMEDOUT)
2185 ts.tv_sec = 1; /* assuming this is not a value computed above.,.. */
2186 }
2187
2188 --idle;
2189 }
2190
2191 --nready;
2192
2193 X_UNLOCK (reqlock);
2194
2195 if (req->type < 0)
2196 goto quit;
2197
2198 ETP_EXECUTE (self, req);
2199
2200 X_LOCK (reslock);
2201
2202 ++npending;
2203
2204 if (!reqq_push (&res_queue, req) && want_poll_cb)
2205 want_poll_cb ();
2206
2207 etp_worker_clear (self);
2208
2209 X_UNLOCK (reslock);
2210 }
2211
2212quit:
2213 free (req);
2214
2215 X_LOCK (wrklock);
2216 etp_worker_free (self);
2217 X_UNLOCK (wrklock);
2218
2219 return 0;
2220}
2221
2222/*****************************************************************************/ 1729/*****************************************************************************/
2223 1730
2224int ecb_cold 1731int ecb_cold
2225eio_init (void (*want_poll)(void), void (*done_poll)(void)) 1732eio_init (void (*want_poll)(void), void (*done_poll)(void))
2226{ 1733{
2227#if !HAVE_PREADWRITE 1734 eio_want_poll_cb = want_poll;
2228 X_MUTEX_CREATE (preadwritelock); 1735 eio_done_poll_cb = done_poll;
2229#endif
2230 1736
2231 return etp_init (want_poll, done_poll); 1737 return etp_init (EIO_POOL, 0, 0, 0);
2232} 1738}
2233 1739
2234ecb_inline void 1740ecb_inline void
2235eio_api_destroy (eio_req *req) 1741eio_api_destroy (eio_req *req)
2236{ 1742{
2237 free (req); 1743 free (req);
2238} 1744}
2239 1745
2240#define REQ(rtype) \ 1746#define REQ(rtype) \
2241 eio_req *req; \ 1747 eio_req *req; \
2242 \ 1748 \
2243 req = (eio_req *)calloc (1, sizeof *req); \ 1749 req = (eio_req *)calloc (1, sizeof *req); \
2244 if (!req) \ 1750 if (!req) \
2245 return 0; \ 1751 return 0; \
2259 { \ 1765 { \
2260 eio_api_destroy (req); \ 1766 eio_api_destroy (req); \
2261 return 0; \ 1767 return 0; \
2262 } 1768 }
2263 1769
1770#define SINGLEDOT(ptr) (0[(char *)(ptr)] == '.' && !1[(char *)(ptr)])
1771
2264static void 1772static void
2265eio_execute (etp_worker *self, eio_req *req) 1773eio_execute (etp_worker *self, eio_req *req)
2266{ 1774{
2267#if HAVE_AT 1775#if HAVE_AT
2268 int dirfd; 1776 int dirfd;
2299 req->result = req->wd == EIO_INVALID_WD ? -1 : 0; 1807 req->result = req->wd == EIO_INVALID_WD ? -1 : 0;
2300 break; 1808 break;
2301 case EIO_WD_CLOSE: req->result = 0; 1809 case EIO_WD_CLOSE: req->result = 0;
2302 eio_wd_close_sync (req->wd); break; 1810 eio_wd_close_sync (req->wd); break;
2303 1811
1812 case EIO_SEEK: eio__lseek (req); break;
2304 case EIO_READ: ALLOC (req->size); 1813 case EIO_READ: ALLOC (req->size);
2305 req->result = req->offs >= 0 1814 req->result = req->offs >= 0
2306 ? pread (req->int1, req->ptr2, req->size, req->offs) 1815 ? pread (req->int1, req->ptr2, req->size, req->offs)
2307 : read (req->int1, req->ptr2, req->size); break; 1816 : read (req->int1, req->ptr2, req->size); break;
2308 case EIO_WRITE: req->result = req->offs >= 0 1817 case EIO_WRITE: req->result = req->offs >= 0
2322 case EIO_CHMOD: req->result = fchmodat (dirfd, req->ptr1, (mode_t)req->int2, 0); break; 1831 case EIO_CHMOD: req->result = fchmodat (dirfd, req->ptr1, (mode_t)req->int2, 0); break;
2323 case EIO_TRUNCATE: req->result = eio__truncateat (dirfd, req->ptr1, req->offs); break; 1832 case EIO_TRUNCATE: req->result = eio__truncateat (dirfd, req->ptr1, req->offs); break;
2324 case EIO_OPEN: req->result = openat (dirfd, req->ptr1, req->int1, (mode_t)req->int2); break; 1833 case EIO_OPEN: req->result = openat (dirfd, req->ptr1, req->int1, (mode_t)req->int2); break;
2325 1834
2326 case EIO_UNLINK: req->result = unlinkat (dirfd, req->ptr1, 0); break; 1835 case EIO_UNLINK: req->result = unlinkat (dirfd, req->ptr1, 0); break;
2327 case EIO_RMDIR: req->result = unlinkat (dirfd, req->ptr1, AT_REMOVEDIR); break; 1836 case EIO_RMDIR: /* complications arise because "." cannot be removed, so we might have to expand */
1837 req->result = req->wd && SINGLEDOT (req->ptr1)
1838 ? rmdir (req->wd->str)
1839 : unlinkat (dirfd, req->ptr1, AT_REMOVEDIR); break;
2328 case EIO_MKDIR: req->result = mkdirat (dirfd, req->ptr1, (mode_t)req->int2); break; 1840 case EIO_MKDIR: req->result = mkdirat (dirfd, req->ptr1, (mode_t)req->int2); break;
2329 case EIO_RENAME: req->result = renameat (dirfd, req->ptr1, WD2FD ((eio_wd)req->int3), req->ptr2); break; 1841 case EIO_RENAME: /* complications arise because "." cannot be renamed, so we might have to expand */
1842 req->result = req->wd && SINGLEDOT (req->ptr1)
1843 ? rename (req->wd->str, req->ptr2)
1844 : renameat (dirfd, req->ptr1, WD2FD ((eio_wd)req->int3), req->ptr2); break;
2330 case EIO_LINK: req->result = linkat (dirfd, req->ptr1, WD2FD ((eio_wd)req->int3), req->ptr2, 0); break; 1845 case EIO_LINK: req->result = linkat (dirfd, req->ptr1, WD2FD ((eio_wd)req->int3), req->ptr2, 0); break;
2331 case EIO_SYMLINK: req->result = symlinkat (req->ptr1, dirfd, req->ptr2); break; 1846 case EIO_SYMLINK: req->result = symlinkat (req->ptr1, dirfd, req->ptr2); break;
2332 case EIO_MKNOD: req->result = mknodat (dirfd, req->ptr1, (mode_t)req->int2, (dev_t)req->offs); break; 1847 case EIO_MKNOD: req->result = mknodat (dirfd, req->ptr1, (mode_t)req->int2, (dev_t)req->offs); break;
2333 case EIO_READLINK: ALLOC (PATH_MAX); 1848 case EIO_READLINK: ALLOC (PATH_MAX);
2334 req->result = readlinkat (dirfd, req->ptr1, req->ptr2, PATH_MAX); break; 1849 req->result = readlinkat (dirfd, req->ptr1, req->ptr2, PATH_MAX); break;
2452 req->result = select (0, 0, 0, 0, &tv); 1967 req->result = select (0, 0, 0, 0, &tv);
2453 } 1968 }
2454#endif 1969#endif
2455 break; 1970 break;
2456 1971
1972#if 0
2457 case EIO_GROUP: 1973 case EIO_GROUP:
2458 abort (); /* handled in eio_request */ 1974 abort (); /* handled in eio_request */
1975#endif
2459 1976
2460 case EIO_NOP: 1977 case EIO_NOP:
2461 req->result = 0; 1978 req->result = 0;
2462 break; 1979 break;
2463 1980
2464 case EIO_CUSTOM: 1981 case EIO_CUSTOM:
2465 req->feed (req); 1982 req->feed (req);
2466 break; 1983 break;
2467 1984
2468 default: 1985 default:
2469 errno = ENOSYS;
2470 req->result = -1; 1986 req->result = EIO_ENOSYS ();
2471 break; 1987 break;
2472 } 1988 }
2473 1989
2474 req->errorno = errno; 1990 req->errorno = errno;
2475} 1991}
2552} 2068}
2553 2069
2554eio_req *eio_readahead (int fd, off_t offset, size_t length, int pri, eio_cb cb, void *data) 2070eio_req *eio_readahead (int fd, off_t offset, size_t length, int pri, eio_cb cb, void *data)
2555{ 2071{
2556 REQ (EIO_READAHEAD); req->int1 = fd; req->offs = offset; req->size = length; SEND; 2072 REQ (EIO_READAHEAD); req->int1 = fd; req->offs = offset; req->size = length; SEND;
2073}
2074
2075eio_req *eio_seek (int fd, off_t offset, int whence, int pri, eio_cb cb, void *data)
2076{
2077 REQ (EIO_SEEK); req->int1 = fd; req->offs = offset; req->int2 = whence; SEND;
2557} 2078}
2558 2079
2559eio_req *eio_read (int fd, void *buf, size_t length, off_t offset, int pri, eio_cb cb, void *data) 2080eio_req *eio_read (int fd, void *buf, size_t length, off_t offset, int pri, eio_cb cb, void *data)
2560{ 2081{
2561 REQ (EIO_READ); req->int1 = fd; req->offs = offset; req->size = length; req->ptr2 = buf; SEND; 2082 REQ (EIO_READ); req->int1 = fd; req->offs = offset; req->size = length; req->ptr2 = buf; SEND;
2759void 2280void
2760eio_grp_add (eio_req *grp, eio_req *req) 2281eio_grp_add (eio_req *grp, eio_req *req)
2761{ 2282{
2762 assert (("cannot add requests to IO::AIO::GRP after the group finished", grp->int1 != 2)); 2283 assert (("cannot add requests to IO::AIO::GRP after the group finished", grp->int1 != 2));
2763 2284
2764 grp->flags |= EIO_FLAG_GROUPADD; 2285 grp->flags |= ETP_FLAG_GROUPADD;
2765 2286
2766 ++grp->size; 2287 ++grp->size;
2767 req->grp = grp; 2288 req->grp = grp;
2768 2289
2769 req->grp_prev = 0; 2290 req->grp_prev = 0;

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines