ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libeio/eio.c
(Generate patch)

Comparing libeio/eio.c (file contents):
Revision 1.121 by root, Sat Jun 2 20:13:26 2012 UTC vs.
Revision 1.143 by root, Sat Dec 3 16:33:46 2016 UTC

1/* 1/*
2 * libeio implementation 2 * libeio implementation
3 * 3 *
4 * Copyright (c) 2007,2008,2009,2010,2011,2012 Marc Alexander Lehmann <libeio@schmorp.de> 4 * Copyright (c) 2007,2008,2009,2010,2011,2012,2013,2016 Marc Alexander Lehmann <libeio@schmorp.de>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without modifica- 7 * Redistribution and use in source and binary forms, with or without modifica-
8 * tion, are permitted provided that the following conditions are met: 8 * tion, are permitted provided that the following conditions are met:
9 * 9 *
10 * 1. Redistributions of source code must retain the above copyright notice, 10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer. 11 * this list of conditions and the following disclaimer.
12 * 12 *
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution. 15 * documentation and/or other materials provided with the distribution.
16 * 16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- 18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
19 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 19 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
20 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- 20 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
21 * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 21 * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
42#endif 42#endif
43 43
44#include "eio.h" 44#include "eio.h"
45#include "ecb.h" 45#include "ecb.h"
46 46
47#ifdef EIO_STACKSIZE
48# define X_STACKSIZE EIO_STACKSIZE
49#endif
50#include "xthread.h"
51
52#include <errno.h> 47#include <errno.h>
53#include <stddef.h> 48#include <stddef.h>
54#include <stdlib.h> 49#include <stdlib.h>
55#include <string.h> 50#include <string.h>
56#include <errno.h> 51#include <errno.h>
120 #define link(old,neu) (CreateHardLink (neu, old, 0) ? 0 : EIO_ERRNO (ENOENT, -1)) 115 #define link(old,neu) (CreateHardLink (neu, old, 0) ? 0 : EIO_ERRNO (ENOENT, -1))
121 116
122 #define chmod(path,mode) _chmod (path, mode) 117 #define chmod(path,mode) _chmod (path, mode)
123 #define dup(fd) _dup (fd) 118 #define dup(fd) _dup (fd)
124 #define dup2(fd1,fd2) _dup2 (fd1, fd2) 119 #define dup2(fd1,fd2) _dup2 (fd1, fd2)
120 #define pipe(fds) _pipe (fds, 4096, O_BINARY)
125 121
122 #define fcntl(fd,cmd,arg) EIO_ENOSYS ()
123 #define ioctl(fd,cmd,arg) EIO_ENOSYS ()
126 #define fchmod(fd,mode) EIO_ENOSYS () 124 #define fchmod(fd,mode) EIO_ENOSYS ()
127 #define chown(path,uid,gid) EIO_ENOSYS () 125 #define chown(path,uid,gid) EIO_ENOSYS ()
128 #define fchown(fd,uid,gid) EIO_ENOSYS () 126 #define fchown(fd,uid,gid) EIO_ENOSYS ()
129 #define truncate(path,offs) EIO_ENOSYS () /* far-miss: SetEndOfFile */ 127 #define truncate(path,offs) EIO_ENOSYS () /* far-miss: SetEndOfFile */
130 #define ftruncate(fd,offs) EIO_ENOSYS () /* near-miss: SetEndOfFile */ 128 #define ftruncate(fd,offs) EIO_ENOSYS () /* near-miss: SetEndOfFile */
131 #define mknod(path,mode,dev) EIO_ENOSYS () 129 #define mknod(path,mode,dev) EIO_ENOSYS ()
132 #define sync() EIO_ENOSYS () 130 #define sync() EIO_ENOSYS ()
133 #define readlink(path,buf,s) EIO_ENOSYS () 131 #define readlink(path,buf,s) EIO_ENOSYS ()
134 #define statvfs(path,buf) EIO_ENOSYS () 132 #define statvfs(path,buf) EIO_ENOSYS ()
135 #define fstatvfs(fd,buf) EIO_ENOSYS () 133 #define fstatvfs(fd,buf) EIO_ENOSYS ()
134
135 #define pread(fd,buf,count,offset) eio__pread (fd, buf, count, offset)
136 #define pwrite(fd,buf,count,offset) eio__pwrite (fd, buf, count, offset)
137
138 #if __GNUC__
139 typedef long long eio_off_t; /* signed for compatibility to msvc */
140 #else
141 typedef __int64 eio_off_t; /* unsigned not supported by msvc */
142 #endif
143
144 static eio_ssize_t
145 eio__pread (int fd, void *buf, eio_ssize_t count, eio_off_t offset)
146 {
147 OVERLAPPED o = { 0 };
148 DWORD got;
149
150 o.Offset = offset;
151 o.OffsetHigh = offset >> 32;
152
153 return ReadFile ((HANDLE)EIO_FD_TO_WIN32_HANDLE (fd), buf, count, &got, &o)
154 ? got : -1;
155 }
156
157 static eio_ssize_t
158 eio__pwrite (int fd, void *buf, eio_ssize_t count, eio_off_t offset)
159 {
160 OVERLAPPED o = { 0 };
161 DWORD got;
162
163 o.Offset = offset;
164 o.OffsetHigh = offset >> 32;
165
166 return WriteFile ((HANDLE)EIO_FD_TO_WIN32_HANDLE (fd), buf, count, &got, &o)
167 ? got : -1;
168 }
136 169
137 /* rename() uses MoveFile, which fails to overwrite */ 170 /* rename() uses MoveFile, which fails to overwrite */
138 #define rename(old,neu) eio__rename (old, neu) 171 #define rename(old,neu) eio__rename (old, neu)
139 172
140 static int 173 static int
168 /* we could even stat and see if it exists */ 201 /* we could even stat and see if it exists */
169 static int 202 static int
170 symlink (const char *old, const char *neu) 203 symlink (const char *old, const char *neu)
171 { 204 {
172 #if WINVER >= 0x0600 205 #if WINVER >= 0x0600
206 int flags;
207
208 /* This tries out all combinations of SYMBOLIC_LINK_FLAG_DIRECTORY
209 * and SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE,
210 * with directory first.
211 */
212 for (flags = 3; flags >= 0; --flags)
173 if (CreateSymbolicLink (neu, old, 1)) 213 if (CreateSymbolicLink (neu, old, flags))
174 return 0; 214 return 0;
175
176 if (CreateSymbolicLink (neu, old, 0))
177 return 0;
178 #endif 215 #endif
179 216
180 return EIO_ERRNO (ENOENT, -1); 217 return EIO_ERRNO (ENOENT, -1);
181 } 218 }
182 219
183 /* POSIX API only */ 220 /* POSIX API only, causing trouble for win32 apps */
184 #define CreateHardLink(neu,old,flags) 0 221 #define CreateHardLink(neu,old,flags) 0 /* not really creating hardlink, still using relative paths? */
185 #define CreateSymbolicLink(neu,old,flags) 0 222 #define CreateSymbolicLink(neu,old,flags) 0 /* vista+ only */
186 223
187 struct statvfs 224 struct statvfs
188 { 225 {
189 int dummy; 226 int dummy;
190 }; 227 };
196 233
197#else 234#else
198 235
199 #include <sys/time.h> 236 #include <sys/time.h>
200 #include <sys/select.h> 237 #include <sys/select.h>
201 #include <sys/statvfs.h>
202 #include <unistd.h> 238 #include <unistd.h>
203 #include <signal.h> 239 #include <signal.h>
204 #include <dirent.h> 240 #include <dirent.h>
241
242 #ifdef ANDROID
243 #include <sys/vfs.h>
244 #define statvfs statfs
245 #define fstatvfs fstatfs
246 #include <asm/page.h> /* supposedly limits.h does #define PAGESIZE PAGESIZE */
247 #else
248 #include <sys/statvfs.h>
249 #endif
205 250
206 #if _POSIX_MEMLOCK || _POSIX_MEMLOCK_RANGE || _POSIX_MAPPED_FILES 251 #if _POSIX_MEMLOCK || _POSIX_MEMLOCK_RANGE || _POSIX_MAPPED_FILES
207 #include <sys/mman.h> 252 #include <sys/mman.h>
208 #endif 253 #endif
209 254
237# include <utime.h> 282# include <utime.h>
238#endif 283#endif
239 284
240#if HAVE_SYS_SYSCALL_H 285#if HAVE_SYS_SYSCALL_H
241# include <sys/syscall.h> 286# include <sys/syscall.h>
242#endif
243
244#if HAVE_SYS_PRCTL_H
245# include <sys/prctl.h>
246#endif 287#endif
247 288
248#if HAVE_SENDFILE 289#if HAVE_SENDFILE
249# if __linux 290# if __linux
250# include <sys/sendfile.h> 291# include <sys/sendfile.h>
281#endif 322#endif
282 323
283/* buffer size for various temporary buffers */ 324/* buffer size for various temporary buffers */
284#define EIO_BUFSIZE 65536 325#define EIO_BUFSIZE 65536
285 326
286#define dBUF \ 327#define dBUF \
287 char *eio_buf = malloc (EIO_BUFSIZE); \ 328 char *eio_buf = malloc (EIO_BUFSIZE); \
288 errno = ENOMEM; \ 329 errno = ENOMEM; \
289 if (!eio_buf) \ 330 if (!eio_buf) \
290 return -1 331 return -1
291 332
292#define FUBd \ 333#define FUBd \
293 free (eio_buf) 334 free (eio_buf)
294 335
295#define EIO_TICKS ((1000000 + 1023) >> 10)
296
297/*****************************************************************************/ 336/*****************************************************************************/
298 337
299struct tmpbuf
300{
301 void *ptr;
302 int len;
303};
304
305static void *
306tmpbuf_get (struct tmpbuf *buf, int len)
307{
308 if (buf->len < len)
309 {
310 free (buf->ptr);
311 buf->ptr = malloc (buf->len = len);
312 }
313
314 return buf->ptr;
315}
316
317struct tmpbuf; 338struct etp_tmpbuf;
318 339
319#if _POSIX_VERSION >= 200809L 340#if _POSIX_VERSION >= 200809L
320 #define HAVE_AT 1 341 #define HAVE_AT 1
321 #define WD2FD(wd) ((wd) ? (wd)->fd : AT_FDCWD) 342 #define WD2FD(wd) ((wd) ? (wd)->fd : AT_FDCWD)
322 #ifndef O_SEARCH 343 #ifndef O_SEARCH
323 #define O_SEARCH O_RDONLY 344 #define O_SEARCH O_RDONLY
324 #endif 345 #endif
325#else 346#else
326 #define HAVE_AT 0 347 #define HAVE_AT 0
327 static const char *wd_expand (struct tmpbuf *tmpbuf, eio_wd wd, const char *path); 348 static const char *wd_expand (struct etp_tmpbuf *tmpbuf, eio_wd wd, const char *path);
328#endif 349#endif
329 350
330struct eio_pwd 351struct eio_pwd
331{ 352{
332#if HAVE_AT 353#if HAVE_AT
339/*****************************************************************************/ 360/*****************************************************************************/
340 361
341#define ETP_PRI_MIN EIO_PRI_MIN 362#define ETP_PRI_MIN EIO_PRI_MIN
342#define ETP_PRI_MAX EIO_PRI_MAX 363#define ETP_PRI_MAX EIO_PRI_MAX
343 364
365#define ETP_TYPE_QUIT -1
366#define ETP_TYPE_GROUP EIO_GROUP
367
368static void eio_nop_callback (void) { }
369static void (*eio_want_poll_cb)(void) = eio_nop_callback;
370static void (*eio_done_poll_cb)(void) = eio_nop_callback;
371
372#define ETP_WANT_POLL(pool) eio_want_poll_cb ()
373#define ETP_DONE_POLL(pool) eio_done_poll_cb ()
374
344struct etp_worker; 375struct etp_worker;
345
346#define ETP_REQ eio_req 376#define ETP_REQ eio_req
347#define ETP_DESTROY(req) eio_destroy (req) 377#define ETP_DESTROY(req) eio_destroy (req)
348static int eio_finish (eio_req *req); 378static int eio_finish (eio_req *req);
349#define ETP_FINISH(req) eio_finish (req) 379#define ETP_FINISH(req) eio_finish (req)
350static void eio_execute (struct etp_worker *self, eio_req *req); 380static void eio_execute (struct etp_worker *self, eio_req *req);
351#define ETP_EXECUTE(wrk,req) eio_execute (wrk,req) 381#define ETP_EXECUTE(wrk,req) eio_execute (wrk, req)
352 382
353/*****************************************************************************/ 383#include "etp.c"
354 384
355#define ETP_NUM_PRI (ETP_PRI_MAX - ETP_PRI_MIN + 1) 385static struct etp_pool eio_pool;
356 386#define EIO_POOL (&eio_pool)
357/* calculate time difference in ~1/EIO_TICKS of a second */
358ecb_inline int
359tvdiff (struct timeval *tv1, struct timeval *tv2)
360{
361 return (tv2->tv_sec - tv1->tv_sec ) * EIO_TICKS
362 + ((tv2->tv_usec - tv1->tv_usec) >> 10);
363}
364
365static unsigned int started, idle, wanted = 4;
366
367static void (*want_poll_cb) (void);
368static void (*done_poll_cb) (void);
369
370static unsigned int max_poll_time; /* reslock */
371static unsigned int max_poll_reqs; /* reslock */
372
373static unsigned int nreqs; /* reqlock */
374static unsigned int nready; /* reqlock */
375static unsigned int npending; /* reqlock */
376static unsigned int max_idle = 4; /* maximum number of threads that can idle indefinitely */
377static unsigned int idle_timeout = 10; /* number of seconds after which an idle threads exit */
378
379static xmutex_t wrklock;
380static xmutex_t reslock;
381static xmutex_t reqlock;
382static xcond_t reqwait;
383
384typedef struct etp_worker
385{
386 struct tmpbuf tmpbuf;
387
388 /* locked by wrklock */
389 struct etp_worker *prev, *next;
390
391 xthread_t tid;
392
393#ifdef ETP_WORKER_COMMON
394 ETP_WORKER_COMMON
395#endif
396} etp_worker;
397
398static etp_worker wrk_first; /* NOT etp */
399
400#define ETP_WORKER_LOCK(wrk) X_LOCK (wrklock)
401#define ETP_WORKER_UNLOCK(wrk) X_UNLOCK (wrklock)
402
403/* worker threads management */
404
405static void
406etp_worker_clear (etp_worker *wrk)
407{
408}
409
410static void ecb_cold
411etp_worker_free (etp_worker *wrk)
412{
413 free (wrk->tmpbuf.ptr);
414
415 wrk->next->prev = wrk->prev;
416 wrk->prev->next = wrk->next;
417
418 free (wrk);
419}
420
421static unsigned int
422etp_nreqs (void)
423{
424 int retval;
425 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
426 retval = nreqs;
427 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
428 return retval;
429}
430
431static unsigned int
432etp_nready (void)
433{
434 unsigned int retval;
435
436 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
437 retval = nready;
438 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
439
440 return retval;
441}
442
443static unsigned int
444etp_npending (void)
445{
446 unsigned int retval;
447
448 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
449 retval = npending;
450 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
451
452 return retval;
453}
454
455static unsigned int
456etp_nthreads (void)
457{
458 unsigned int retval;
459
460 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
461 retval = started;
462 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
463
464 return retval;
465}
466
467/*
468 * a somewhat faster data structure might be nice, but
469 * with 8 priorities this actually needs <20 insns
470 * per shift, the most expensive operation.
471 */
472typedef struct {
473 ETP_REQ *qs[ETP_NUM_PRI], *qe[ETP_NUM_PRI]; /* qstart, qend */
474 int size;
475} etp_reqq;
476
477static etp_reqq req_queue;
478static etp_reqq res_queue;
479
480static void ecb_noinline ecb_cold
481reqq_init (etp_reqq *q)
482{
483 int pri;
484
485 for (pri = 0; pri < ETP_NUM_PRI; ++pri)
486 q->qs[pri] = q->qe[pri] = 0;
487
488 q->size = 0;
489}
490
491static int ecb_noinline
492reqq_push (etp_reqq *q, ETP_REQ *req)
493{
494 int pri = req->pri;
495 req->next = 0;
496
497 if (q->qe[pri])
498 {
499 q->qe[pri]->next = req;
500 q->qe[pri] = req;
501 }
502 else
503 q->qe[pri] = q->qs[pri] = req;
504
505 return q->size++;
506}
507
508static ETP_REQ * ecb_noinline
509reqq_shift (etp_reqq *q)
510{
511 int pri;
512
513 if (!q->size)
514 return 0;
515
516 --q->size;
517
518 for (pri = ETP_NUM_PRI; pri--; )
519 {
520 eio_req *req = q->qs[pri];
521
522 if (req)
523 {
524 if (!(q->qs[pri] = (eio_req *)req->next))
525 q->qe[pri] = 0;
526
527 return req;
528 }
529 }
530
531 abort ();
532}
533
534static int ecb_cold
535etp_init (void (*want_poll)(void), void (*done_poll)(void))
536{
537 X_MUTEX_CREATE (wrklock);
538 X_MUTEX_CREATE (reslock);
539 X_MUTEX_CREATE (reqlock);
540 X_COND_CREATE (reqwait);
541
542 reqq_init (&req_queue);
543 reqq_init (&res_queue);
544
545 wrk_first.next =
546 wrk_first.prev = &wrk_first;
547
548 started = 0;
549 idle = 0;
550 nreqs = 0;
551 nready = 0;
552 npending = 0;
553
554 want_poll_cb = want_poll;
555 done_poll_cb = done_poll;
556
557 return 0;
558}
559
560X_THREAD_PROC (etp_proc);
561
562static void ecb_cold
563etp_start_thread (void)
564{
565 etp_worker *wrk = calloc (1, sizeof (etp_worker));
566
567 /*TODO*/
568 assert (("unable to allocate worker thread data", wrk));
569
570 X_LOCK (wrklock);
571
572 if (thread_create (&wrk->tid, etp_proc, (void *)wrk))
573 {
574 wrk->prev = &wrk_first;
575 wrk->next = wrk_first.next;
576 wrk_first.next->prev = wrk;
577 wrk_first.next = wrk;
578 ++started;
579 }
580 else
581 free (wrk);
582
583 X_UNLOCK (wrklock);
584}
585
586static void
587etp_maybe_start_thread (void)
588{
589 if (ecb_expect_true (etp_nthreads () >= wanted))
590 return;
591
592 /* todo: maybe use idle here, but might be less exact */
593 if (ecb_expect_true (0 <= (int)etp_nthreads () + (int)etp_npending () - (int)etp_nreqs ()))
594 return;
595
596 etp_start_thread ();
597}
598
599static void ecb_cold
600etp_end_thread (void)
601{
602 eio_req *req = calloc (1, sizeof (eio_req)); /* will be freed by worker */
603
604 req->type = -1;
605 req->pri = ETP_PRI_MAX - ETP_PRI_MIN;
606
607 X_LOCK (reqlock);
608 reqq_push (&req_queue, req);
609 X_COND_SIGNAL (reqwait);
610 X_UNLOCK (reqlock);
611
612 X_LOCK (wrklock);
613 --started;
614 X_UNLOCK (wrklock);
615}
616
617static int
618etp_poll (void)
619{
620 unsigned int maxreqs;
621 unsigned int maxtime;
622 struct timeval tv_start, tv_now;
623
624 X_LOCK (reslock);
625 maxreqs = max_poll_reqs;
626 maxtime = max_poll_time;
627 X_UNLOCK (reslock);
628
629 if (maxtime)
630 gettimeofday (&tv_start, 0);
631
632 for (;;)
633 {
634 ETP_REQ *req;
635
636 etp_maybe_start_thread ();
637
638 X_LOCK (reslock);
639 req = reqq_shift (&res_queue);
640
641 if (req)
642 {
643 --npending;
644
645 if (!res_queue.size && done_poll_cb)
646 done_poll_cb ();
647 }
648
649 X_UNLOCK (reslock);
650
651 if (!req)
652 return 0;
653
654 X_LOCK (reqlock);
655 --nreqs;
656 X_UNLOCK (reqlock);
657
658 if (ecb_expect_false (req->type == EIO_GROUP && req->size))
659 {
660 req->int1 = 1; /* mark request as delayed */
661 continue;
662 }
663 else
664 {
665 int res = ETP_FINISH (req);
666 if (ecb_expect_false (res))
667 return res;
668 }
669
670 if (ecb_expect_false (maxreqs && !--maxreqs))
671 break;
672
673 if (maxtime)
674 {
675 gettimeofday (&tv_now, 0);
676
677 if (tvdiff (&tv_start, &tv_now) >= maxtime)
678 break;
679 }
680 }
681
682 errno = EAGAIN;
683 return -1;
684}
685
686static void
687etp_cancel (ETP_REQ *req)
688{
689 req->cancelled = 1;
690
691 eio_grp_cancel (req);
692}
693
694static void
695etp_submit (ETP_REQ *req)
696{
697 req->pri -= ETP_PRI_MIN;
698
699 if (ecb_expect_false (req->pri < ETP_PRI_MIN - ETP_PRI_MIN)) req->pri = ETP_PRI_MIN - ETP_PRI_MIN;
700 if (ecb_expect_false (req->pri > ETP_PRI_MAX - ETP_PRI_MIN)) req->pri = ETP_PRI_MAX - ETP_PRI_MIN;
701
702 if (ecb_expect_false (req->type == EIO_GROUP))
703 {
704 /* I hope this is worth it :/ */
705 X_LOCK (reqlock);
706 ++nreqs;
707 X_UNLOCK (reqlock);
708
709 X_LOCK (reslock);
710
711 ++npending;
712
713 if (!reqq_push (&res_queue, req) && want_poll_cb)
714 want_poll_cb ();
715
716 X_UNLOCK (reslock);
717 }
718 else
719 {
720 X_LOCK (reqlock);
721 ++nreqs;
722 ++nready;
723 reqq_push (&req_queue, req);
724 X_COND_SIGNAL (reqwait);
725 X_UNLOCK (reqlock);
726
727 etp_maybe_start_thread ();
728 }
729}
730
731static void ecb_cold
732etp_set_max_poll_time (double nseconds)
733{
734 if (WORDACCESS_UNSAFE) X_LOCK (reslock);
735 max_poll_time = nseconds * EIO_TICKS;
736 if (WORDACCESS_UNSAFE) X_UNLOCK (reslock);
737}
738
739static void ecb_cold
740etp_set_max_poll_reqs (unsigned int maxreqs)
741{
742 if (WORDACCESS_UNSAFE) X_LOCK (reslock);
743 max_poll_reqs = maxreqs;
744 if (WORDACCESS_UNSAFE) X_UNLOCK (reslock);
745}
746
747static void ecb_cold
748etp_set_max_idle (unsigned int nthreads)
749{
750 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
751 max_idle = nthreads;
752 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
753}
754
755static void ecb_cold
756etp_set_idle_timeout (unsigned int seconds)
757{
758 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
759 idle_timeout = seconds;
760 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
761}
762
763static void ecb_cold
764etp_set_min_parallel (unsigned int nthreads)
765{
766 if (wanted < nthreads)
767 wanted = nthreads;
768}
769
770static void ecb_cold
771etp_set_max_parallel (unsigned int nthreads)
772{
773 if (wanted > nthreads)
774 wanted = nthreads;
775
776 while (started > wanted)
777 etp_end_thread ();
778}
779 387
780/*****************************************************************************/ 388/*****************************************************************************/
781 389
782static void 390static void
783grp_try_feed (eio_req *grp) 391grp_try_feed (eio_req *grp)
784{ 392{
785 while (grp->size < grp->int2 && !EIO_CANCELLED (grp)) 393 while (grp->size < grp->int2 && !EIO_CANCELLED (grp))
786 { 394 {
787 grp->flags &= ~EIO_FLAG_GROUPADD; 395 grp->flags &= ~ETP_FLAG_GROUPADD;
788 396
789 EIO_FEED (grp); 397 EIO_FEED (grp);
790 398
791 /* stop if no progress has been made */ 399 /* stop if no progress has been made */
792 if (!(grp->flags & EIO_FLAG_GROUPADD)) 400 if (!(grp->flags & ETP_FLAG_GROUPADD))
793 { 401 {
794 grp->feed = 0; 402 grp->feed = 0;
795 break; 403 break;
796 } 404 }
797 } 405 }
804 412
805 /* call feeder, if applicable */ 413 /* call feeder, if applicable */
806 grp_try_feed (grp); 414 grp_try_feed (grp);
807 415
808 /* finish, if done */ 416 /* finish, if done */
809 if (!grp->size && grp->int1) 417 if (!grp->size && grp->flags & ETP_FLAG_DELAYED)
810 return eio_finish (grp); 418 return eio_finish (grp);
811 else 419 else
812 return 0; 420 return 0;
813} 421}
814 422
850} 458}
851 459
852void 460void
853eio_grp_cancel (eio_req *grp) 461eio_grp_cancel (eio_req *grp)
854{ 462{
855 for (grp = grp->grp_first; grp; grp = grp->grp_next) 463 etp_grp_cancel (EIO_POOL, grp);
856 eio_cancel (grp);
857} 464}
858 465
859void 466void
860eio_cancel (eio_req *req) 467eio_cancel (eio_req *req)
861{ 468{
862 etp_cancel (req); 469 etp_cancel (EIO_POOL, req);
863} 470}
864 471
865void 472void
866eio_submit (eio_req *req) 473eio_submit (eio_req *req)
867{ 474{
868 etp_submit (req); 475 etp_submit (EIO_POOL, req);
869} 476}
870 477
871unsigned int 478unsigned int
872eio_nreqs (void) 479eio_nreqs (void)
873{ 480{
874 return etp_nreqs (); 481 return etp_nreqs (EIO_POOL);
875} 482}
876 483
877unsigned int 484unsigned int
878eio_nready (void) 485eio_nready (void)
879{ 486{
880 return etp_nready (); 487 return etp_nready (EIO_POOL);
881} 488}
882 489
883unsigned int 490unsigned int
884eio_npending (void) 491eio_npending (void)
885{ 492{
886 return etp_npending (); 493 return etp_npending (EIO_POOL);
887} 494}
888 495
889unsigned int ecb_cold 496unsigned int ecb_cold
890eio_nthreads (void) 497eio_nthreads (void)
891{ 498{
892 return etp_nthreads (); 499 return etp_nthreads (EIO_POOL);
893} 500}
894 501
895void ecb_cold 502void ecb_cold
896eio_set_max_poll_time (double nseconds) 503eio_set_max_poll_time (double nseconds)
897{ 504{
898 etp_set_max_poll_time (nseconds); 505 etp_set_max_poll_time (EIO_POOL, nseconds);
899} 506}
900 507
901void ecb_cold 508void ecb_cold
902eio_set_max_poll_reqs (unsigned int maxreqs) 509eio_set_max_poll_reqs (unsigned int maxreqs)
903{ 510{
904 etp_set_max_poll_reqs (maxreqs); 511 etp_set_max_poll_reqs (EIO_POOL, maxreqs);
905} 512}
906 513
907void ecb_cold 514void ecb_cold
908eio_set_max_idle (unsigned int nthreads) 515eio_set_max_idle (unsigned int nthreads)
909{ 516{
910 etp_set_max_idle (nthreads); 517 etp_set_max_idle (EIO_POOL, nthreads);
911} 518}
912 519
913void ecb_cold 520void ecb_cold
914eio_set_idle_timeout (unsigned int seconds) 521eio_set_idle_timeout (unsigned int seconds)
915{ 522{
916 etp_set_idle_timeout (seconds); 523 etp_set_idle_timeout (EIO_POOL, seconds);
917} 524}
918 525
919void ecb_cold 526void ecb_cold
920eio_set_min_parallel (unsigned int nthreads) 527eio_set_min_parallel (unsigned int nthreads)
921{ 528{
922 etp_set_min_parallel (nthreads); 529 etp_set_min_parallel (EIO_POOL, nthreads);
923} 530}
924 531
925void ecb_cold 532void ecb_cold
926eio_set_max_parallel (unsigned int nthreads) 533eio_set_max_parallel (unsigned int nthreads)
927{ 534{
928 etp_set_max_parallel (nthreads); 535 etp_set_max_parallel (EIO_POOL, nthreads);
929} 536}
930 537
931int eio_poll (void) 538int eio_poll (void)
932{ 539{
933 return etp_poll (); 540 return etp_poll (EIO_POOL);
934} 541}
935 542
936/*****************************************************************************/ 543/*****************************************************************************/
937/* work around various missing functions */ 544/* work around various missing functions */
938 545
1348 req->result = req->offs == (off_t)-1 ? -1 : 0; 955 req->result = req->offs == (off_t)-1 ? -1 : 0;
1349} 956}
1350 957
1351/* result will always end up in tmpbuf, there is always space for adding a 0-byte */ 958/* result will always end up in tmpbuf, there is always space for adding a 0-byte */
1352static int 959static int
1353eio__realpath (struct tmpbuf *tmpbuf, eio_wd wd, const char *path) 960eio__realpath (struct etp_tmpbuf *tmpbuf, eio_wd wd, const char *path)
1354{ 961{
962 char *res;
1355 const char *rel = path; 963 const char *rel = path;
1356 char *res;
1357 char *tmp1, *tmp2; 964 char *tmp1, *tmp2;
1358#if SYMLOOP_MAX > 32 965#if SYMLOOP_MAX > 32
1359 int symlinks = SYMLOOP_MAX; 966 int symlinks = SYMLOOP_MAX;
1360#else 967#else
1361 int symlinks = 32; 968 int symlinks = 32;
1367 974
1368 errno = ENOENT; 975 errno = ENOENT;
1369 if (!*rel) 976 if (!*rel)
1370 return -1; 977 return -1;
1371 978
1372 res = tmpbuf_get (tmpbuf, PATH_MAX * 3); 979 res = etp_tmpbuf_get (tmpbuf, PATH_MAX * 3);
980#ifdef _WIN32
981 if (_access (rel, 4) != 0)
982 return -1;
983
984 symlinks = GetFullPathName (rel, PATH_MAX * 3, res, 0);
985
986 errno = ENAMETOOLONG;
987 if (symlinks >= PATH_MAX * 3)
988 return -1;
989
990 errno = EIO;
991 if (symlinks <= 0)
992 return -1;
993
994 return symlinks;
995
996#else
1373 tmp1 = res + PATH_MAX; 997 tmp1 = res + PATH_MAX;
1374 tmp2 = tmp1 + PATH_MAX; 998 tmp2 = tmp1 + PATH_MAX;
1375 999
1376#if 0 /* disabled, the musl way to do things is just too racy */ 1000#if 0 /* disabled, the musl way to do things is just too racy */
1377#if __linux && defined(O_NONBLOCK) && defined(O_NOATIME) 1001#if __linux && defined(O_NONBLOCK) && defined(O_NOATIME)
1381 1005
1382 if (fd >= 0) 1006 if (fd >= 0)
1383 { 1007 {
1384 sprintf (tmp1, "/proc/self/fd/%d", fd); 1008 sprintf (tmp1, "/proc/self/fd/%d", fd);
1385 req->result = readlink (tmp1, res, PATH_MAX); 1009 req->result = readlink (tmp1, res, PATH_MAX);
1010 /* here we should probably stat the open file and the disk file, to make sure they still match */
1386 close (fd); 1011 close (fd);
1387
1388 /* here we should probably stat the open file and the disk file, to make sure they still match */
1389 1012
1390 if (req->result > 0) 1013 if (req->result > 0)
1391 goto done; 1014 goto done;
1392 } 1015 }
1393 else if (errno == ELOOP || errno == ENAMETOOLONG || errno == ENOENT || errno == ENOTDIR || errno == EIO) 1016 else if (errno == ELOOP || errno == ENAMETOOLONG || errno == ENOENT || errno == ENOTDIR || errno == EIO)
1394 return; 1017 return -1;
1395 } 1018 }
1396#endif 1019#endif
1397#endif 1020#endif
1398 1021
1399 if (*rel != '/') 1022 if (*rel != '/')
1501 /* special case for the lone root path */ 1124 /* special case for the lone root path */
1502 if (res == tmpbuf->ptr) 1125 if (res == tmpbuf->ptr)
1503 *res++ = '/'; 1126 *res++ = '/';
1504 1127
1505 return res - (char *)tmpbuf->ptr; 1128 return res - (char *)tmpbuf->ptr;
1129#endif
1506} 1130}
1507 1131
1508static signed char 1132static signed char
1509eio_dent_cmp (const eio_dirent *a, const eio_dirent *b) 1133eio_dent_cmp (const eio_dirent *a, const eio_dirent *b)
1510{ 1134{
1883 #ifdef DT_FIFO 1507 #ifdef DT_FIFO
1884 case DT_FIFO: ent->type = EIO_DT_FIFO; break; 1508 case DT_FIFO: ent->type = EIO_DT_FIFO; break;
1885 #endif 1509 #endif
1886 #ifdef DT_CHR 1510 #ifdef DT_CHR
1887 case DT_CHR: ent->type = EIO_DT_CHR; break; 1511 case DT_CHR: ent->type = EIO_DT_CHR; break;
1888 #endif 1512 #endif
1889 #ifdef DT_MPC 1513 #ifdef DT_MPC
1890 case DT_MPC: ent->type = EIO_DT_MPC; break; 1514 case DT_MPC: ent->type = EIO_DT_MPC; break;
1891 #endif 1515 #endif
1892 #ifdef DT_DIR 1516 #ifdef DT_DIR
1893 case DT_DIR: ent->type = EIO_DT_DIR; break; 1517 case DT_DIR: ent->type = EIO_DT_DIR; break;
1894 #endif 1518 #endif
1895 #ifdef DT_NAM 1519 #ifdef DT_NAM
1896 case DT_NAM: ent->type = EIO_DT_NAM; break; 1520 case DT_NAM: ent->type = EIO_DT_NAM; break;
1897 #endif 1521 #endif
1898 #ifdef DT_BLK 1522 #ifdef DT_BLK
1899 case DT_BLK: ent->type = EIO_DT_BLK; break; 1523 case DT_BLK: ent->type = EIO_DT_BLK; break;
1900 #endif 1524 #endif
1901 #ifdef DT_MPB 1525 #ifdef DT_MPB
1902 case DT_MPB: ent->type = EIO_DT_MPB; break; 1526 case DT_MPB: ent->type = EIO_DT_MPB; break;
1903 #endif 1527 #endif
1904 #ifdef DT_REG 1528 #ifdef DT_REG
1905 case DT_REG: ent->type = EIO_DT_REG; break; 1529 case DT_REG: ent->type = EIO_DT_REG; break;
1906 #endif 1530 #endif
1907 #ifdef DT_NWK 1531 #ifdef DT_NWK
1908 case DT_NWK: ent->type = EIO_DT_NWK; break; 1532 case DT_NWK: ent->type = EIO_DT_NWK; break;
1909 #endif 1533 #endif
1910 #ifdef DT_CMP 1534 #ifdef DT_CMP
1911 case DT_CMP: ent->type = EIO_DT_CMP; break; 1535 case DT_CMP: ent->type = EIO_DT_CMP; break;
1912 #endif 1536 #endif
1913 #ifdef DT_LNK 1537 #ifdef DT_LNK
1914 case DT_LNK: ent->type = EIO_DT_LNK; break; 1538 case DT_LNK: ent->type = EIO_DT_LNK; break;
1915 #endif 1539 #endif
1916 #ifdef DT_SOCK 1540 #ifdef DT_SOCK
1917 case DT_SOCK: ent->type = EIO_DT_SOCK; break; 1541 case DT_SOCK: ent->type = EIO_DT_SOCK; break;
1969#if !HAVE_AT 1593#if !HAVE_AT
1970 1594
1971/* a bit like realpath, but usually faster because it doesn'T have to return */ 1595/* a bit like realpath, but usually faster because it doesn'T have to return */
1972/* an absolute or canonical path */ 1596/* an absolute or canonical path */
1973static const char * 1597static const char *
1974wd_expand (struct tmpbuf *tmpbuf, eio_wd wd, const char *path) 1598wd_expand (struct etp_tmpbuf *tmpbuf, eio_wd wd, const char *path)
1975{ 1599{
1976 if (!wd || *path == '/') 1600 if (!wd || *path == '/')
1977 return path; 1601 return path;
1978 1602
1979 if (path [0] == '.' && !path [1]) 1603 if (path [0] == '.' && !path [1])
1981 1605
1982 { 1606 {
1983 int l1 = wd->len; 1607 int l1 = wd->len;
1984 int l2 = strlen (path); 1608 int l2 = strlen (path);
1985 1609
1986 char *res = tmpbuf_get (tmpbuf, l1 + l2 + 2); 1610 char *res = etp_tmpbuf_get (tmpbuf, l1 + l2 + 2);
1987 1611
1988 memcpy (res, wd->str, l1); 1612 memcpy (res, wd->str, l1);
1989 res [l1] = '/'; 1613 res [l1] = '/';
1990 memcpy (res + l1 + 1, path, l2 + 1); 1614 memcpy (res + l1 + 1, path, l2 + 1);
1991 1615
1994} 1618}
1995 1619
1996#endif 1620#endif
1997 1621
1998static eio_wd 1622static eio_wd
1999eio__wd_open_sync (struct tmpbuf *tmpbuf, eio_wd wd, const char *path) 1623eio__wd_open_sync (struct etp_tmpbuf *tmpbuf, eio_wd wd, const char *path)
2000{ 1624{
2001 int fd; 1625 int fd;
2002 eio_wd res; 1626 eio_wd res;
2003 int len = eio__realpath (tmpbuf, wd, path); 1627 int len = eio__realpath (tmpbuf, wd, path);
2004 1628
2026} 1650}
2027 1651
2028eio_wd 1652eio_wd
2029eio_wd_open_sync (eio_wd wd, const char *path) 1653eio_wd_open_sync (eio_wd wd, const char *path)
2030{ 1654{
2031 struct tmpbuf tmpbuf = { 0 }; 1655 struct etp_tmpbuf tmpbuf = { };
2032 wd = eio__wd_open_sync (&tmpbuf, wd, path); 1656 wd = eio__wd_open_sync (&tmpbuf, wd, path);
2033 free (tmpbuf.ptr); 1657 free (tmpbuf.ptr);
2034 1658
2035 return wd; 1659 return wd;
2036} 1660}
2085/*****************************************************************************/ 1709/*****************************************************************************/
2086 1710
2087#define ALLOC(len) \ 1711#define ALLOC(len) \
2088 if (!req->ptr2) \ 1712 if (!req->ptr2) \
2089 { \ 1713 { \
2090 X_LOCK (wrklock); \ 1714 X_LOCK (EIO_POOL->wrklock); \
2091 req->flags |= EIO_FLAG_PTR2_FREE; \ 1715 req->flags |= EIO_FLAG_PTR2_FREE; \
2092 X_UNLOCK (wrklock); \ 1716 X_UNLOCK (EIO_POOL->wrklock); \
2093 req->ptr2 = malloc (len); \ 1717 req->ptr2 = malloc (len); \
2094 if (!req->ptr2) \ 1718 if (!req->ptr2) \
2095 { \ 1719 { \
2096 errno = ENOMEM; \ 1720 errno = ENOMEM; \
2097 req->result = -1; \ 1721 req->result = -1; \
2098 break; \ 1722 break; \
2099 } \ 1723 } \
2100 } 1724 }
2101 1725
2102static void ecb_noinline ecb_cold
2103etp_proc_init (void)
2104{
2105#if HAVE_PRCTL_SET_NAME
2106 /* provide a more sensible "thread name" */
2107 char name[16 + 1];
2108 const int namelen = sizeof (name) - 1;
2109 int len;
2110
2111 prctl (PR_GET_NAME, (unsigned long)name, 0, 0, 0);
2112 name [namelen] = 0;
2113 len = strlen (name);
2114 strcpy (name + (len <= namelen - 4 ? len : namelen - 4), "/eio");
2115 prctl (PR_SET_NAME, (unsigned long)name, 0, 0, 0);
2116#endif
2117}
2118
2119X_THREAD_PROC (etp_proc)
2120{
2121 ETP_REQ *req;
2122 struct timespec ts;
2123 etp_worker *self = (etp_worker *)thr_arg;
2124
2125 etp_proc_init ();
2126
2127 /* try to distribute timeouts somewhat evenly */
2128 ts.tv_nsec = ((unsigned long)self & 1023UL) * (1000000000UL / 1024UL);
2129
2130 for (;;)
2131 {
2132 ts.tv_sec = 0;
2133
2134 X_LOCK (reqlock);
2135
2136 for (;;)
2137 {
2138 req = reqq_shift (&req_queue);
2139
2140 if (req)
2141 break;
2142
2143 if (ts.tv_sec == 1) /* no request, but timeout detected, let's quit */
2144 {
2145 X_UNLOCK (reqlock);
2146 X_LOCK (wrklock);
2147 --started;
2148 X_UNLOCK (wrklock);
2149 goto quit;
2150 }
2151
2152 ++idle;
2153
2154 if (idle <= max_idle)
2155 /* we are allowed to idle, so do so without any timeout */
2156 X_COND_WAIT (reqwait, reqlock);
2157 else
2158 {
2159 /* initialise timeout once */
2160 if (!ts.tv_sec)
2161 ts.tv_sec = time (0) + idle_timeout;
2162
2163 if (X_COND_TIMEDWAIT (reqwait, reqlock, ts) == ETIMEDOUT)
2164 ts.tv_sec = 1; /* assuming this is not a value computed above.,.. */
2165 }
2166
2167 --idle;
2168 }
2169
2170 --nready;
2171
2172 X_UNLOCK (reqlock);
2173
2174 if (req->type < 0)
2175 goto quit;
2176
2177 ETP_EXECUTE (self, req);
2178
2179 X_LOCK (reslock);
2180
2181 ++npending;
2182
2183 if (!reqq_push (&res_queue, req) && want_poll_cb)
2184 want_poll_cb ();
2185
2186 etp_worker_clear (self);
2187
2188 X_UNLOCK (reslock);
2189 }
2190
2191quit:
2192 free (req);
2193
2194 X_LOCK (wrklock);
2195 etp_worker_free (self);
2196 X_UNLOCK (wrklock);
2197}
2198
2199/*****************************************************************************/ 1726/*****************************************************************************/
2200 1727
2201int ecb_cold 1728int ecb_cold
2202eio_init (void (*want_poll)(void), void (*done_poll)(void)) 1729eio_init (void (*want_poll)(void), void (*done_poll)(void))
2203{ 1730{
2204 return etp_init (want_poll, done_poll); 1731 eio_want_poll_cb = want_poll;
1732 eio_done_poll_cb = done_poll;
1733
1734 return etp_init (EIO_POOL, 0, 0, 0);
2205} 1735}
2206 1736
2207ecb_inline void 1737ecb_inline void
2208eio_api_destroy (eio_req *req) 1738eio_api_destroy (eio_req *req)
2209{ 1739{
2232 { \ 1762 { \
2233 eio_api_destroy (req); \ 1763 eio_api_destroy (req); \
2234 return 0; \ 1764 return 0; \
2235 } 1765 }
2236 1766
1767#define SINGLEDOT(ptr) (0[(char *)(ptr)] == '.' && !1[(char *)(ptr)])
1768
2237static void 1769static void
2238eio_execute (etp_worker *self, eio_req *req) 1770eio_execute (etp_worker *self, eio_req *req)
2239{ 1771{
2240#if HAVE_AT 1772#if HAVE_AT
2241 int dirfd; 1773 int dirfd;
2281 : read (req->int1, req->ptr2, req->size); break; 1813 : read (req->int1, req->ptr2, req->size); break;
2282 case EIO_WRITE: req->result = req->offs >= 0 1814 case EIO_WRITE: req->result = req->offs >= 0
2283 ? pwrite (req->int1, req->ptr2, req->size, req->offs) 1815 ? pwrite (req->int1, req->ptr2, req->size, req->offs)
2284 : write (req->int1, req->ptr2, req->size); break; 1816 : write (req->int1, req->ptr2, req->size); break;
2285 1817
1818 case EIO_FCNTL: req->result = fcntl (req->int1, (int) req->int2, req->ptr2); break;
1819 case EIO_IOCTL: req->result = ioctl (req->int1, (unsigned long)req->int2, req->ptr2); break;
1820
2286 case EIO_READAHEAD: req->result = readahead (req->int1, req->offs, req->size); break; 1821 case EIO_READAHEAD: req->result = readahead (req->int1, req->offs, req->size); break;
2287 case EIO_SENDFILE: req->result = eio__sendfile (req->int1, req->int2, req->offs, req->size); break; 1822 case EIO_SENDFILE: req->result = eio__sendfile (req->int1, req->int2, req->offs, req->size); break;
2288 1823
2289#if HAVE_AT 1824#if HAVE_AT
2290 1825
2296 case EIO_CHMOD: req->result = fchmodat (dirfd, req->ptr1, (mode_t)req->int2, 0); break; 1831 case EIO_CHMOD: req->result = fchmodat (dirfd, req->ptr1, (mode_t)req->int2, 0); break;
2297 case EIO_TRUNCATE: req->result = eio__truncateat (dirfd, req->ptr1, req->offs); break; 1832 case EIO_TRUNCATE: req->result = eio__truncateat (dirfd, req->ptr1, req->offs); break;
2298 case EIO_OPEN: req->result = openat (dirfd, req->ptr1, req->int1, (mode_t)req->int2); break; 1833 case EIO_OPEN: req->result = openat (dirfd, req->ptr1, req->int1, (mode_t)req->int2); break;
2299 1834
2300 case EIO_UNLINK: req->result = unlinkat (dirfd, req->ptr1, 0); break; 1835 case EIO_UNLINK: req->result = unlinkat (dirfd, req->ptr1, 0); break;
2301 case EIO_RMDIR: req->result = unlinkat (dirfd, req->ptr1, AT_REMOVEDIR); break; 1836 case EIO_RMDIR: /* complications arise because "." cannot be removed, so we might have to expand */
1837 req->result = req->wd && SINGLEDOT (req->ptr1)
1838 ? rmdir (req->wd->str)
1839 : unlinkat (dirfd, req->ptr1, AT_REMOVEDIR); break;
2302 case EIO_MKDIR: req->result = mkdirat (dirfd, req->ptr1, (mode_t)req->int2); break; 1840 case EIO_MKDIR: req->result = mkdirat (dirfd, req->ptr1, (mode_t)req->int2); break;
2303 case EIO_RENAME: req->result = renameat (dirfd, req->ptr1, WD2FD ((eio_wd)req->int3), req->ptr2); break; 1841 case EIO_RENAME: /* complications arise because "." cannot be renamed, so we might have to expand */
1842 req->result = req->wd && SINGLEDOT (req->ptr1)
1843 ? rename (req->wd->str, req->ptr2)
1844 : renameat (dirfd, req->ptr1, WD2FD ((eio_wd)req->int3), req->ptr2); break;
2304 case EIO_LINK: req->result = linkat (dirfd, req->ptr1, WD2FD ((eio_wd)req->int3), req->ptr2, 0); break; 1845 case EIO_LINK: req->result = linkat (dirfd, req->ptr1, WD2FD ((eio_wd)req->int3), req->ptr2, 0); break;
2305 case EIO_SYMLINK: req->result = symlinkat (req->ptr1, dirfd, req->ptr2); break; 1846 case EIO_SYMLINK: req->result = symlinkat (req->ptr1, dirfd, req->ptr2); break;
2306 case EIO_MKNOD: req->result = mknodat (dirfd, req->ptr1, (mode_t)req->int2, (dev_t)req->offs); break; 1847 case EIO_MKNOD: req->result = mknodat (dirfd, req->ptr1, (mode_t)req->int2, (dev_t)req->offs); break;
2307 case EIO_READLINK: ALLOC (PATH_MAX); 1848 case EIO_READLINK: ALLOC (PATH_MAX);
2308 req->result = readlinkat (dirfd, req->ptr1, req->ptr2, PATH_MAX); break; 1849 req->result = readlinkat (dirfd, req->ptr1, req->ptr2, PATH_MAX); break;
2426 req->result = select (0, 0, 0, 0, &tv); 1967 req->result = select (0, 0, 0, 0, &tv);
2427 } 1968 }
2428#endif 1969#endif
2429 break; 1970 break;
2430 1971
1972#if 0
2431 case EIO_GROUP: 1973 case EIO_GROUP:
2432 abort (); /* handled in eio_request */ 1974 abort (); /* handled in eio_request */
1975#endif
2433 1976
2434 case EIO_NOP: 1977 case EIO_NOP:
2435 req->result = 0; 1978 req->result = 0;
2436 break; 1979 break;
2437 1980
2540} 2083}
2541 2084
2542eio_req *eio_write (int fd, void *buf, size_t length, off_t offset, int pri, eio_cb cb, void *data) 2085eio_req *eio_write (int fd, void *buf, size_t length, off_t offset, int pri, eio_cb cb, void *data)
2543{ 2086{
2544 REQ (EIO_WRITE); req->int1 = fd; req->offs = offset; req->size = length; req->ptr2 = buf; SEND; 2087 REQ (EIO_WRITE); req->int1 = fd; req->offs = offset; req->size = length; req->ptr2 = buf; SEND;
2088}
2089
2090eio_req *eio_fcntl (int fd, int cmd, void *arg, int pri, eio_cb cb, void *data)
2091{
2092 REQ (EIO_IOCTL); req->int1 = fd; req->int2 = cmd; req->ptr2 = arg; SEND;
2093}
2094
2095eio_req *eio_ioctl (int fd, unsigned long request, void *buf, int pri, eio_cb cb, void *data)
2096{
2097 REQ (EIO_IOCTL); req->int1 = fd; req->int2 = request; req->ptr2 = buf; SEND;
2545} 2098}
2546 2099
2547eio_req *eio_fstat (int fd, int pri, eio_cb cb, void *data) 2100eio_req *eio_fstat (int fd, int pri, eio_cb cb, void *data)
2548{ 2101{
2549 REQ (EIO_FSTAT); req->int1 = fd; SEND; 2102 REQ (EIO_FSTAT); req->int1 = fd; SEND;
2737void 2290void
2738eio_grp_add (eio_req *grp, eio_req *req) 2291eio_grp_add (eio_req *grp, eio_req *req)
2739{ 2292{
2740 assert (("cannot add requests to IO::AIO::GRP after the group finished", grp->int1 != 2)); 2293 assert (("cannot add requests to IO::AIO::GRP after the group finished", grp->int1 != 2));
2741 2294
2742 grp->flags |= EIO_FLAG_GROUPADD; 2295 grp->flags |= ETP_FLAG_GROUPADD;
2743 2296
2744 ++grp->size; 2297 ++grp->size;
2745 req->grp = grp; 2298 req->grp = grp;
2746 2299
2747 req->grp_prev = 0; 2300 req->grp_prev = 0;

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines