1 |
/* |
2 |
* libeio implementation |
3 |
* |
4 |
* Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann <libeio@schmorp.de> |
5 |
* All rights reserved. |
6 |
* |
7 |
* Redistribution and use in source and binary forms, with or without modifica- |
8 |
* tion, are permitted provided that the following conditions are met: |
9 |
* |
10 |
* 1. Redistributions of source code must retain the above copyright notice, |
11 |
* this list of conditions and the following disclaimer. |
12 |
* |
13 |
* 2. Redistributions in binary form must reproduce the above copyright |
14 |
* notice, this list of conditions and the following disclaimer in the |
15 |
* documentation and/or other materials provided with the distribution. |
16 |
* |
17 |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
18 |
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER- |
19 |
* CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO |
20 |
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE- |
21 |
* CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
22 |
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; |
23 |
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
24 |
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH- |
25 |
* ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED |
26 |
* OF THE POSSIBILITY OF SUCH DAMAGE. |
27 |
* |
28 |
* Alternatively, the contents of this file may be used under the terms of |
29 |
* the GNU General Public License ("GPL") version 2 or any later version, |
30 |
* in which case the provisions of the GPL are applicable instead of |
31 |
* the above. If you wish to allow the use of your version of this file |
32 |
* only under the terms of the GPL and not to allow others to use your |
33 |
* version of this file under the BSD license, indicate your decision |
34 |
* by deleting the provisions above and replace them with the notice |
35 |
* and other provisions required by the GPL. If you do not delete the |
36 |
* provisions above, a recipient may use your version of this file under |
37 |
* either the BSD or the GPL. |
38 |
*/ |
39 |
|
40 |
#ifndef _WIN32 |
41 |
# include "config.h" |
42 |
#endif |
43 |
|
44 |
#include "eio.h" |
45 |
#include "ecb.h" |
46 |
|
47 |
#ifdef EIO_STACKSIZE |
48 |
# define X_STACKSIZE EIO_STACKSIZE |
49 |
#endif |
50 |
#include "xthread.h" |
51 |
|
52 |
#include <errno.h> |
53 |
#include <stddef.h> |
54 |
#include <stdlib.h> |
55 |
#include <string.h> |
56 |
#include <errno.h> |
57 |
#include <sys/types.h> |
58 |
#include <sys/stat.h> |
59 |
#include <limits.h> |
60 |
#include <fcntl.h> |
61 |
#include <assert.h> |
62 |
|
63 |
/* intptr_t comes from unistd.h, says POSIX/UNIX/tradition */ |
64 |
/* intptr_t only comes from stdint.h, says idiot openbsd coder */ |
65 |
#if HAVE_STDINT_H |
66 |
# include <stdint.h> |
67 |
#endif |
68 |
|
69 |
#ifndef ECANCELED |
70 |
# define ECANCELED EDOM |
71 |
#endif |
72 |
#ifndef ELOOP |
73 |
# define ELOOP EDOM |
74 |
#endif |
75 |
|
76 |
#if !defined(ENOTSOCK) && defined(WSAENOTSOCK) |
77 |
# define ENOTSOCK WSAENOTSOCK |
78 |
#endif |
79 |
|
80 |
static void eio_destroy (eio_req *req); |
81 |
|
82 |
#ifndef EIO_FINISH |
83 |
# define EIO_FINISH(req) ((req)->finish) && !EIO_CANCELLED (req) ? (req)->finish (req) : 0 |
84 |
#endif |
85 |
|
86 |
#ifndef EIO_DESTROY |
87 |
# define EIO_DESTROY(req) do { if ((req)->destroy) (req)->destroy (req); } while (0) |
88 |
#endif |
89 |
|
90 |
#ifndef EIO_FEED |
91 |
# define EIO_FEED(req) do { if ((req)->feed ) (req)->feed (req); } while (0) |
92 |
#endif |
93 |
|
94 |
#ifndef EIO_FD_TO_WIN32_HANDLE |
95 |
# define EIO_FD_TO_WIN32_HANDLE(fd) _get_osfhandle (fd) |
96 |
#endif |
97 |
#ifndef EIO_WIN32_HANDLE_TO_FD |
98 |
# define EIO_WIN32_HANDLE_TO_FD(handle) _open_osfhandle (handle, 0) |
99 |
#endif |
100 |
|
101 |
#define EIO_ERRNO(errval,retval) ((errno = errval), retval) |
102 |
|
103 |
#define EIO_ENOSYS() EIO_ERRNO (ENOSYS, -1) |
104 |
|
105 |
#ifdef _WIN32 |
106 |
|
107 |
#undef PAGESIZE |
108 |
#define PAGESIZE 4096 /* GetSystemInfo? */ |
109 |
|
110 |
/* TODO: look at how perl does stat (non-sloppy), unlink (ro-files), utime, link */ |
111 |
|
112 |
#ifdef EIO_STRUCT_STATI64 |
113 |
/* look at perl's non-sloppy stat */ |
114 |
#define stat(path,buf) _stati64 (path,buf) |
115 |
#define fstat(fd,buf) _fstati64 (fd,buf) |
116 |
#endif |
117 |
#define lstat(path,buf) stat (path,buf) |
118 |
#define fsync(fd) (FlushFileBuffers ((HANDLE)EIO_FD_TO_WIN32_HANDLE (fd)) ? 0 : EIO_ERRNO (EBADF, -1)) |
119 |
#define mkdir(path,mode) _mkdir (path) |
120 |
#define link(old,neu) (CreateHardLink (neu, old, 0) ? 0 : EIO_ERRNO (ENOENT, -1)) |
121 |
|
122 |
#define chmod(path,mode) _chmod (path, mode) |
123 |
#define dup(fd) _dup (fd) |
124 |
#define dup2(fd1,fd2) _dup2 (fd1, fd2) |
125 |
|
126 |
#define fchmod(fd,mode) EIO_ENOSYS () |
127 |
#define chown(path,uid,gid) EIO_ENOSYS () |
128 |
#define fchown(fd,uid,gid) EIO_ENOSYS () |
129 |
#define truncate(path,offs) EIO_ENOSYS () /* far-miss: SetEndOfFile */ |
130 |
#define ftruncate(fd,offs) EIO_ENOSYS () /* near-miss: SetEndOfFile */ |
131 |
#define mknod(path,mode,dev) EIO_ENOSYS () |
132 |
#define sync() EIO_ENOSYS () |
133 |
#define readlink(path,buf,s) EIO_ENOSYS () |
134 |
#define statvfs(path,buf) EIO_ENOSYS () |
135 |
#define fstatvfs(fd,buf) EIO_ENOSYS () |
136 |
|
137 |
/* rename() uses MoveFile, which fails to overwrite */ |
138 |
#define rename(old,neu) eio__rename (old, neu) |
139 |
|
140 |
static int |
141 |
eio__rename (const char *old, const char *neu) |
142 |
{ |
143 |
if (MoveFileEx (old, neu, MOVEFILE_REPLACE_EXISTING)) |
144 |
return 0; |
145 |
|
146 |
/* should steal _dosmaperr */ |
147 |
switch (GetLastError ()) |
148 |
{ |
149 |
case ERROR_FILE_NOT_FOUND: |
150 |
case ERROR_PATH_NOT_FOUND: |
151 |
case ERROR_INVALID_DRIVE: |
152 |
case ERROR_NO_MORE_FILES: |
153 |
case ERROR_BAD_NETPATH: |
154 |
case ERROR_BAD_NET_NAME: |
155 |
case ERROR_BAD_PATHNAME: |
156 |
case ERROR_FILENAME_EXCED_RANGE: |
157 |
errno = ENOENT; |
158 |
break; |
159 |
|
160 |
default: |
161 |
errno = EACCES; |
162 |
break; |
163 |
} |
164 |
|
165 |
return -1; |
166 |
} |
167 |
|
168 |
/* we could even stat and see if it exists */ |
169 |
static int |
170 |
symlink (const char *old, const char *neu) |
171 |
{ |
172 |
#if WINVER >= 0x0600 |
173 |
if (CreateSymbolicLink (neu, old, 1)) |
174 |
return 0; |
175 |
|
176 |
if (CreateSymbolicLink (neu, old, 0)) |
177 |
return 0; |
178 |
#endif |
179 |
|
180 |
return EIO_ERRNO (ENOENT, -1); |
181 |
} |
182 |
|
183 |
/* POSIX API only */ |
184 |
#define CreateHardLink(neu,old,flags) 0 |
185 |
#define CreateSymbolicLink(neu,old,flags) 0 |
186 |
|
187 |
struct statvfs |
188 |
{ |
189 |
int dummy; |
190 |
}; |
191 |
|
192 |
#define DT_DIR EIO_DT_DIR |
193 |
#define DT_REG EIO_DT_REG |
194 |
#define D_NAME(entp) entp.cFileName |
195 |
#define D_TYPE(entp) (entp.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY ? DT_DIR : DT_REG) |
196 |
|
197 |
#else |
198 |
|
199 |
#include <sys/time.h> |
200 |
#include <sys/select.h> |
201 |
#include <sys/statvfs.h> |
202 |
#include <unistd.h> |
203 |
#include <signal.h> |
204 |
#include <dirent.h> |
205 |
|
206 |
#if _POSIX_MEMLOCK || _POSIX_MEMLOCK_RANGE || _POSIX_MAPPED_FILES |
207 |
#include <sys/mman.h> |
208 |
#endif |
209 |
|
210 |
#define D_NAME(entp) entp->d_name |
211 |
|
212 |
/* POSIX_SOURCE is useless on bsd's, and XOPEN_SOURCE is unreliable there, too */ |
213 |
#if __FreeBSD__ || defined __NetBSD__ || defined __OpenBSD__ |
214 |
#define _DIRENT_HAVE_D_TYPE /* sigh */ |
215 |
#define D_INO(de) (de)->d_fileno |
216 |
#define D_NAMLEN(de) (de)->d_namlen |
217 |
#elif __linux || defined d_ino || _XOPEN_SOURCE >= 600 |
218 |
#define D_INO(de) (de)->d_ino |
219 |
#endif |
220 |
|
221 |
#ifdef _D_EXACT_NAMLEN |
222 |
#undef D_NAMLEN |
223 |
#define D_NAMLEN(de) _D_EXACT_NAMLEN (de) |
224 |
#endif |
225 |
|
226 |
#ifdef _DIRENT_HAVE_D_TYPE |
227 |
#define D_TYPE(de) (de)->d_type |
228 |
#endif |
229 |
|
230 |
#ifndef EIO_STRUCT_DIRENT |
231 |
#define EIO_STRUCT_DIRENT struct dirent |
232 |
#endif |
233 |
|
234 |
#endif |
235 |
|
236 |
#if HAVE_UTIMES |
237 |
# include <utime.h> |
238 |
#endif |
239 |
|
240 |
#if HAVE_SYS_SYSCALL_H |
241 |
# include <sys/syscall.h> |
242 |
#endif |
243 |
|
244 |
#if HAVE_SYS_PRCTL_H |
245 |
# include <sys/prctl.h> |
246 |
#endif |
247 |
|
248 |
#if HAVE_SENDFILE |
249 |
# if __linux |
250 |
# include <sys/sendfile.h> |
251 |
# elif __FreeBSD__ || defined __APPLE__ |
252 |
# include <sys/socket.h> |
253 |
# include <sys/uio.h> |
254 |
# elif __hpux |
255 |
# include <sys/socket.h> |
256 |
# elif __solaris |
257 |
# include <sys/sendfile.h> |
258 |
# else |
259 |
# error sendfile support requested but not available |
260 |
# endif |
261 |
#endif |
262 |
|
263 |
#ifndef D_TYPE |
264 |
# define D_TYPE(de) 0 |
265 |
#endif |
266 |
#ifndef D_INO |
267 |
# define D_INO(de) 0 |
268 |
#endif |
269 |
#ifndef D_NAMLEN |
270 |
# define D_NAMLEN(entp) strlen (D_NAME (entp)) |
271 |
#endif |
272 |
|
273 |
/* used for struct dirent, AIX doesn't provide it */ |
274 |
#ifndef NAME_MAX |
275 |
# define NAME_MAX 4096 |
276 |
#endif |
277 |
|
278 |
/* used for readlink etc. */ |
279 |
#ifndef PATH_MAX |
280 |
# define PATH_MAX 4096 |
281 |
#endif |
282 |
|
283 |
/* buffer size for various temporary buffers */ |
284 |
#define EIO_BUFSIZE 65536 |
285 |
|
286 |
#define dBUF \ |
287 |
char *eio_buf = malloc (EIO_BUFSIZE); \ |
288 |
errno = ENOMEM; \ |
289 |
if (!eio_buf) \ |
290 |
return -1 |
291 |
|
292 |
#define FUBd \ |
293 |
free (eio_buf) |
294 |
|
295 |
#define EIO_TICKS ((1000000 + 1023) >> 10) |
296 |
|
297 |
/*****************************************************************************/ |
298 |
|
299 |
struct tmpbuf |
300 |
{ |
301 |
void *ptr; |
302 |
int len; |
303 |
}; |
304 |
|
305 |
static void * |
306 |
tmpbuf_get (struct tmpbuf *buf, int len) |
307 |
{ |
308 |
if (buf->len < len) |
309 |
{ |
310 |
free (buf->ptr); |
311 |
buf->ptr = malloc (buf->len = len); |
312 |
} |
313 |
|
314 |
return buf->ptr; |
315 |
} |
316 |
|
317 |
struct tmpbuf; |
318 |
|
319 |
#if _POSIX_VERSION >= 200809L |
320 |
#define HAVE_AT 1 |
321 |
#define WD2FD(wd) ((wd) ? (wd)->fd : AT_FDCWD) |
322 |
#ifndef O_SEARCH |
323 |
#define O_SEARCH O_RDONLY |
324 |
#endif |
325 |
#else |
326 |
#define HAVE_AT 0 |
327 |
static const char *wd_expand (struct tmpbuf *tmpbuf, eio_wd wd, const char *path); |
328 |
#endif |
329 |
|
330 |
struct eio_pwd |
331 |
{ |
332 |
#if HAVE_AT |
333 |
int fd; |
334 |
#endif |
335 |
int len; |
336 |
char str[1]; /* actually, a 0-terminated canonical path */ |
337 |
}; |
338 |
|
339 |
/*****************************************************************************/ |
340 |
|
341 |
#define ETP_PRI_MIN EIO_PRI_MIN |
342 |
#define ETP_PRI_MAX EIO_PRI_MAX |
343 |
|
344 |
struct etp_worker; |
345 |
|
346 |
#define ETP_REQ eio_req |
347 |
#define ETP_DESTROY(req) eio_destroy (req) |
348 |
static int eio_finish (eio_req *req); |
349 |
#define ETP_FINISH(req) eio_finish (req) |
350 |
static void eio_execute (struct etp_worker *self, eio_req *req); |
351 |
#define ETP_EXECUTE(wrk,req) eio_execute (wrk,req) |
352 |
|
353 |
/*****************************************************************************/ |
354 |
|
355 |
#define ETP_NUM_PRI (ETP_PRI_MAX - ETP_PRI_MIN + 1) |
356 |
|
357 |
/* calculate time difference in ~1/EIO_TICKS of a second */ |
358 |
ecb_inline int |
359 |
tvdiff (struct timeval *tv1, struct timeval *tv2) |
360 |
{ |
361 |
return (tv2->tv_sec - tv1->tv_sec ) * EIO_TICKS |
362 |
+ ((tv2->tv_usec - tv1->tv_usec) >> 10); |
363 |
} |
364 |
|
365 |
static unsigned int started, idle, wanted = 4; |
366 |
|
367 |
static void (*want_poll_cb) (void); |
368 |
static void (*done_poll_cb) (void); |
369 |
|
370 |
static unsigned int max_poll_time; /* reslock */ |
371 |
static unsigned int max_poll_reqs; /* reslock */ |
372 |
|
373 |
static unsigned int nreqs; /* reqlock */ |
374 |
static unsigned int nready; /* reqlock */ |
375 |
static unsigned int npending; /* reqlock */ |
376 |
static unsigned int max_idle = 4; /* maximum number of threads that can idle indefinitely */ |
377 |
static unsigned int idle_timeout = 10; /* number of seconds after which an idle threads exit */ |
378 |
|
379 |
static xmutex_t wrklock; |
380 |
static xmutex_t reslock; |
381 |
static xmutex_t reqlock; |
382 |
static xcond_t reqwait; |
383 |
|
384 |
#if !HAVE_PREADWRITE |
385 |
/* |
386 |
* make our pread/pwrite emulation safe against themselves, but not against |
387 |
* normal read/write by using a mutex. slows down execution a lot, |
388 |
* but that's your problem, not mine. |
389 |
*/ |
390 |
static xmutex_t preadwritelock; |
391 |
#endif |
392 |
|
393 |
typedef struct etp_worker |
394 |
{ |
395 |
struct tmpbuf tmpbuf; |
396 |
|
397 |
/* locked by wrklock */ |
398 |
struct etp_worker *prev, *next; |
399 |
|
400 |
xthread_t tid; |
401 |
|
402 |
#ifdef ETP_WORKER_COMMON |
403 |
ETP_WORKER_COMMON |
404 |
#endif |
405 |
} etp_worker; |
406 |
|
407 |
static etp_worker wrk_first; /* NOT etp */ |
408 |
|
409 |
#define ETP_WORKER_LOCK(wrk) X_LOCK (wrklock) |
410 |
#define ETP_WORKER_UNLOCK(wrk) X_UNLOCK (wrklock) |
411 |
|
412 |
/* worker threads management */ |
413 |
|
414 |
static void |
415 |
etp_worker_clear (etp_worker *wrk) |
416 |
{ |
417 |
} |
418 |
|
419 |
static void ecb_cold |
420 |
etp_worker_free (etp_worker *wrk) |
421 |
{ |
422 |
free (wrk->tmpbuf.ptr); |
423 |
|
424 |
wrk->next->prev = wrk->prev; |
425 |
wrk->prev->next = wrk->next; |
426 |
|
427 |
free (wrk); |
428 |
} |
429 |
|
430 |
static unsigned int |
431 |
etp_nreqs (void) |
432 |
{ |
433 |
int retval; |
434 |
if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
435 |
retval = nreqs; |
436 |
if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); |
437 |
return retval; |
438 |
} |
439 |
|
440 |
static unsigned int |
441 |
etp_nready (void) |
442 |
{ |
443 |
unsigned int retval; |
444 |
|
445 |
if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
446 |
retval = nready; |
447 |
if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); |
448 |
|
449 |
return retval; |
450 |
} |
451 |
|
452 |
static unsigned int |
453 |
etp_npending (void) |
454 |
{ |
455 |
unsigned int retval; |
456 |
|
457 |
if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
458 |
retval = npending; |
459 |
if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); |
460 |
|
461 |
return retval; |
462 |
} |
463 |
|
464 |
static unsigned int |
465 |
etp_nthreads (void) |
466 |
{ |
467 |
unsigned int retval; |
468 |
|
469 |
if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
470 |
retval = started; |
471 |
if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); |
472 |
|
473 |
return retval; |
474 |
} |
475 |
|
476 |
/* |
477 |
* a somewhat faster data structure might be nice, but |
478 |
* with 8 priorities this actually needs <20 insns |
479 |
* per shift, the most expensive operation. |
480 |
*/ |
481 |
typedef struct { |
482 |
ETP_REQ *qs[ETP_NUM_PRI], *qe[ETP_NUM_PRI]; /* qstart, qend */ |
483 |
int size; |
484 |
} etp_reqq; |
485 |
|
486 |
static etp_reqq req_queue; |
487 |
static etp_reqq res_queue; |
488 |
|
489 |
static void ecb_noinline ecb_cold |
490 |
reqq_init (etp_reqq *q) |
491 |
{ |
492 |
int pri; |
493 |
|
494 |
for (pri = 0; pri < ETP_NUM_PRI; ++pri) |
495 |
q->qs[pri] = q->qe[pri] = 0; |
496 |
|
497 |
q->size = 0; |
498 |
} |
499 |
|
500 |
static int ecb_noinline |
501 |
reqq_push (etp_reqq *q, ETP_REQ *req) |
502 |
{ |
503 |
int pri = req->pri; |
504 |
req->next = 0; |
505 |
|
506 |
if (q->qe[pri]) |
507 |
{ |
508 |
q->qe[pri]->next = req; |
509 |
q->qe[pri] = req; |
510 |
} |
511 |
else |
512 |
q->qe[pri] = q->qs[pri] = req; |
513 |
|
514 |
return q->size++; |
515 |
} |
516 |
|
517 |
static ETP_REQ * ecb_noinline |
518 |
reqq_shift (etp_reqq *q) |
519 |
{ |
520 |
int pri; |
521 |
|
522 |
if (!q->size) |
523 |
return 0; |
524 |
|
525 |
--q->size; |
526 |
|
527 |
for (pri = ETP_NUM_PRI; pri--; ) |
528 |
{ |
529 |
eio_req *req = q->qs[pri]; |
530 |
|
531 |
if (req) |
532 |
{ |
533 |
if (!(q->qs[pri] = (eio_req *)req->next)) |
534 |
q->qe[pri] = 0; |
535 |
|
536 |
return req; |
537 |
} |
538 |
} |
539 |
|
540 |
abort (); |
541 |
} |
542 |
|
543 |
static int ecb_cold |
544 |
etp_init (void (*want_poll)(void), void (*done_poll)(void)) |
545 |
{ |
546 |
X_MUTEX_CREATE (wrklock); |
547 |
X_MUTEX_CREATE (reslock); |
548 |
X_MUTEX_CREATE (reqlock); |
549 |
X_COND_CREATE (reqwait); |
550 |
|
551 |
reqq_init (&req_queue); |
552 |
reqq_init (&res_queue); |
553 |
|
554 |
wrk_first.next = |
555 |
wrk_first.prev = &wrk_first; |
556 |
|
557 |
started = 0; |
558 |
idle = 0; |
559 |
nreqs = 0; |
560 |
nready = 0; |
561 |
npending = 0; |
562 |
|
563 |
want_poll_cb = want_poll; |
564 |
done_poll_cb = done_poll; |
565 |
|
566 |
return 0; |
567 |
} |
568 |
|
569 |
X_THREAD_PROC (etp_proc); |
570 |
|
571 |
static void ecb_cold |
572 |
etp_start_thread (void) |
573 |
{ |
574 |
etp_worker *wrk = calloc (1, sizeof (etp_worker)); |
575 |
|
576 |
/*TODO*/ |
577 |
assert (("unable to allocate worker thread data", wrk)); |
578 |
|
579 |
X_LOCK (wrklock); |
580 |
|
581 |
if (thread_create (&wrk->tid, etp_proc, (void *)wrk)) |
582 |
{ |
583 |
wrk->prev = &wrk_first; |
584 |
wrk->next = wrk_first.next; |
585 |
wrk_first.next->prev = wrk; |
586 |
wrk_first.next = wrk; |
587 |
++started; |
588 |
} |
589 |
else |
590 |
free (wrk); |
591 |
|
592 |
X_UNLOCK (wrklock); |
593 |
} |
594 |
|
595 |
static void |
596 |
etp_maybe_start_thread (void) |
597 |
{ |
598 |
if (ecb_expect_true (etp_nthreads () >= wanted)) |
599 |
return; |
600 |
|
601 |
/* todo: maybe use idle here, but might be less exact */ |
602 |
if (ecb_expect_true (0 <= (int)etp_nthreads () + (int)etp_npending () - (int)etp_nreqs ())) |
603 |
return; |
604 |
|
605 |
etp_start_thread (); |
606 |
} |
607 |
|
608 |
static void ecb_cold |
609 |
etp_end_thread (void) |
610 |
{ |
611 |
eio_req *req = calloc (1, sizeof (eio_req)); /* will be freed by worker */ |
612 |
|
613 |
req->type = -1; |
614 |
req->pri = ETP_PRI_MAX - ETP_PRI_MIN; |
615 |
|
616 |
X_LOCK (reqlock); |
617 |
reqq_push (&req_queue, req); |
618 |
X_COND_SIGNAL (reqwait); |
619 |
X_UNLOCK (reqlock); |
620 |
|
621 |
X_LOCK (wrklock); |
622 |
--started; |
623 |
X_UNLOCK (wrklock); |
624 |
} |
625 |
|
626 |
static int |
627 |
etp_poll (void) |
628 |
{ |
629 |
unsigned int maxreqs; |
630 |
unsigned int maxtime; |
631 |
struct timeval tv_start, tv_now; |
632 |
|
633 |
X_LOCK (reslock); |
634 |
maxreqs = max_poll_reqs; |
635 |
maxtime = max_poll_time; |
636 |
X_UNLOCK (reslock); |
637 |
|
638 |
if (maxtime) |
639 |
gettimeofday (&tv_start, 0); |
640 |
|
641 |
for (;;) |
642 |
{ |
643 |
ETP_REQ *req; |
644 |
|
645 |
etp_maybe_start_thread (); |
646 |
|
647 |
X_LOCK (reslock); |
648 |
req = reqq_shift (&res_queue); |
649 |
|
650 |
if (req) |
651 |
{ |
652 |
--npending; |
653 |
|
654 |
if (!res_queue.size && done_poll_cb) |
655 |
done_poll_cb (); |
656 |
} |
657 |
|
658 |
X_UNLOCK (reslock); |
659 |
|
660 |
if (!req) |
661 |
return 0; |
662 |
|
663 |
X_LOCK (reqlock); |
664 |
--nreqs; |
665 |
X_UNLOCK (reqlock); |
666 |
|
667 |
if (ecb_expect_false (req->type == EIO_GROUP && req->size)) |
668 |
{ |
669 |
req->int1 = 1; /* mark request as delayed */ |
670 |
continue; |
671 |
} |
672 |
else |
673 |
{ |
674 |
int res = ETP_FINISH (req); |
675 |
if (ecb_expect_false (res)) |
676 |
return res; |
677 |
} |
678 |
|
679 |
if (ecb_expect_false (maxreqs && !--maxreqs)) |
680 |
break; |
681 |
|
682 |
if (maxtime) |
683 |
{ |
684 |
gettimeofday (&tv_now, 0); |
685 |
|
686 |
if (tvdiff (&tv_start, &tv_now) >= maxtime) |
687 |
break; |
688 |
} |
689 |
} |
690 |
|
691 |
errno = EAGAIN; |
692 |
return -1; |
693 |
} |
694 |
|
695 |
static void |
696 |
etp_cancel (ETP_REQ *req) |
697 |
{ |
698 |
req->cancelled = 1; |
699 |
|
700 |
eio_grp_cancel (req); |
701 |
} |
702 |
|
703 |
static void |
704 |
etp_submit (ETP_REQ *req) |
705 |
{ |
706 |
req->pri -= ETP_PRI_MIN; |
707 |
|
708 |
if (ecb_expect_false (req->pri < ETP_PRI_MIN - ETP_PRI_MIN)) req->pri = ETP_PRI_MIN - ETP_PRI_MIN; |
709 |
if (ecb_expect_false (req->pri > ETP_PRI_MAX - ETP_PRI_MIN)) req->pri = ETP_PRI_MAX - ETP_PRI_MIN; |
710 |
|
711 |
if (ecb_expect_false (req->type == EIO_GROUP)) |
712 |
{ |
713 |
/* I hope this is worth it :/ */ |
714 |
X_LOCK (reqlock); |
715 |
++nreqs; |
716 |
X_UNLOCK (reqlock); |
717 |
|
718 |
X_LOCK (reslock); |
719 |
|
720 |
++npending; |
721 |
|
722 |
if (!reqq_push (&res_queue, req) && want_poll_cb) |
723 |
want_poll_cb (); |
724 |
|
725 |
X_UNLOCK (reslock); |
726 |
} |
727 |
else |
728 |
{ |
729 |
X_LOCK (reqlock); |
730 |
++nreqs; |
731 |
++nready; |
732 |
reqq_push (&req_queue, req); |
733 |
X_COND_SIGNAL (reqwait); |
734 |
X_UNLOCK (reqlock); |
735 |
|
736 |
etp_maybe_start_thread (); |
737 |
} |
738 |
} |
739 |
|
740 |
static void ecb_cold |
741 |
etp_set_max_poll_time (double nseconds) |
742 |
{ |
743 |
if (WORDACCESS_UNSAFE) X_LOCK (reslock); |
744 |
max_poll_time = nseconds * EIO_TICKS; |
745 |
if (WORDACCESS_UNSAFE) X_UNLOCK (reslock); |
746 |
} |
747 |
|
748 |
static void ecb_cold |
749 |
etp_set_max_poll_reqs (unsigned int maxreqs) |
750 |
{ |
751 |
if (WORDACCESS_UNSAFE) X_LOCK (reslock); |
752 |
max_poll_reqs = maxreqs; |
753 |
if (WORDACCESS_UNSAFE) X_UNLOCK (reslock); |
754 |
} |
755 |
|
756 |
static void ecb_cold |
757 |
etp_set_max_idle (unsigned int nthreads) |
758 |
{ |
759 |
if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
760 |
max_idle = nthreads; |
761 |
if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); |
762 |
} |
763 |
|
764 |
static void ecb_cold |
765 |
etp_set_idle_timeout (unsigned int seconds) |
766 |
{ |
767 |
if (WORDACCESS_UNSAFE) X_LOCK (reqlock); |
768 |
idle_timeout = seconds; |
769 |
if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); |
770 |
} |
771 |
|
772 |
static void ecb_cold |
773 |
etp_set_min_parallel (unsigned int nthreads) |
774 |
{ |
775 |
if (wanted < nthreads) |
776 |
wanted = nthreads; |
777 |
} |
778 |
|
779 |
static void ecb_cold |
780 |
etp_set_max_parallel (unsigned int nthreads) |
781 |
{ |
782 |
if (wanted > nthreads) |
783 |
wanted = nthreads; |
784 |
|
785 |
while (started > wanted) |
786 |
etp_end_thread (); |
787 |
} |
788 |
|
789 |
/*****************************************************************************/ |
790 |
|
791 |
static void |
792 |
grp_try_feed (eio_req *grp) |
793 |
{ |
794 |
while (grp->size < grp->int2 && !EIO_CANCELLED (grp)) |
795 |
{ |
796 |
grp->flags &= ~EIO_FLAG_GROUPADD; |
797 |
|
798 |
EIO_FEED (grp); |
799 |
|
800 |
/* stop if no progress has been made */ |
801 |
if (!(grp->flags & EIO_FLAG_GROUPADD)) |
802 |
{ |
803 |
grp->feed = 0; |
804 |
break; |
805 |
} |
806 |
} |
807 |
} |
808 |
|
809 |
static int |
810 |
grp_dec (eio_req *grp) |
811 |
{ |
812 |
--grp->size; |
813 |
|
814 |
/* call feeder, if applicable */ |
815 |
grp_try_feed (grp); |
816 |
|
817 |
/* finish, if done */ |
818 |
if (!grp->size && grp->int1) |
819 |
return eio_finish (grp); |
820 |
else |
821 |
return 0; |
822 |
} |
823 |
|
824 |
static void |
825 |
eio_destroy (eio_req *req) |
826 |
{ |
827 |
if ((req)->flags & EIO_FLAG_PTR1_FREE) free (req->ptr1); |
828 |
if ((req)->flags & EIO_FLAG_PTR2_FREE) free (req->ptr2); |
829 |
|
830 |
EIO_DESTROY (req); |
831 |
} |
832 |
|
833 |
static int |
834 |
eio_finish (eio_req *req) |
835 |
{ |
836 |
int res = EIO_FINISH (req); |
837 |
|
838 |
if (req->grp) |
839 |
{ |
840 |
int res2; |
841 |
eio_req *grp = req->grp; |
842 |
|
843 |
/* unlink request */ |
844 |
if (req->grp_next) req->grp_next->grp_prev = req->grp_prev; |
845 |
if (req->grp_prev) req->grp_prev->grp_next = req->grp_next; |
846 |
|
847 |
if (grp->grp_first == req) |
848 |
grp->grp_first = req->grp_next; |
849 |
|
850 |
res2 = grp_dec (grp); |
851 |
|
852 |
if (!res) |
853 |
res = res2; |
854 |
} |
855 |
|
856 |
eio_destroy (req); |
857 |
|
858 |
return res; |
859 |
} |
860 |
|
861 |
void |
862 |
eio_grp_cancel (eio_req *grp) |
863 |
{ |
864 |
for (grp = grp->grp_first; grp; grp = grp->grp_next) |
865 |
eio_cancel (grp); |
866 |
} |
867 |
|
868 |
void |
869 |
eio_cancel (eio_req *req) |
870 |
{ |
871 |
etp_cancel (req); |
872 |
} |
873 |
|
874 |
void |
875 |
eio_submit (eio_req *req) |
876 |
{ |
877 |
etp_submit (req); |
878 |
} |
879 |
|
880 |
unsigned int |
881 |
eio_nreqs (void) |
882 |
{ |
883 |
return etp_nreqs (); |
884 |
} |
885 |
|
886 |
unsigned int |
887 |
eio_nready (void) |
888 |
{ |
889 |
return etp_nready (); |
890 |
} |
891 |
|
892 |
unsigned int |
893 |
eio_npending (void) |
894 |
{ |
895 |
return etp_npending (); |
896 |
} |
897 |
|
898 |
unsigned int ecb_cold |
899 |
eio_nthreads (void) |
900 |
{ |
901 |
return etp_nthreads (); |
902 |
} |
903 |
|
904 |
void ecb_cold |
905 |
eio_set_max_poll_time (double nseconds) |
906 |
{ |
907 |
etp_set_max_poll_time (nseconds); |
908 |
} |
909 |
|
910 |
void ecb_cold |
911 |
eio_set_max_poll_reqs (unsigned int maxreqs) |
912 |
{ |
913 |
etp_set_max_poll_reqs (maxreqs); |
914 |
} |
915 |
|
916 |
void ecb_cold |
917 |
eio_set_max_idle (unsigned int nthreads) |
918 |
{ |
919 |
etp_set_max_idle (nthreads); |
920 |
} |
921 |
|
922 |
void ecb_cold |
923 |
eio_set_idle_timeout (unsigned int seconds) |
924 |
{ |
925 |
etp_set_idle_timeout (seconds); |
926 |
} |
927 |
|
928 |
void ecb_cold |
929 |
eio_set_min_parallel (unsigned int nthreads) |
930 |
{ |
931 |
etp_set_min_parallel (nthreads); |
932 |
} |
933 |
|
934 |
void ecb_cold |
935 |
eio_set_max_parallel (unsigned int nthreads) |
936 |
{ |
937 |
etp_set_max_parallel (nthreads); |
938 |
} |
939 |
|
940 |
int eio_poll (void) |
941 |
{ |
942 |
return etp_poll (); |
943 |
} |
944 |
|
945 |
/*****************************************************************************/ |
946 |
/* work around various missing functions */ |
947 |
|
948 |
#if !HAVE_PREADWRITE |
949 |
# undef pread |
950 |
# undef pwrite |
951 |
# define pread eio__pread |
952 |
# define pwrite eio__pwrite |
953 |
|
954 |
static eio_ssize_t |
955 |
eio__pread (int fd, void *buf, size_t count, off_t offset) |
956 |
{ |
957 |
eio_ssize_t res; |
958 |
off_t ooffset; |
959 |
|
960 |
X_LOCK (preadwritelock); |
961 |
ooffset = lseek (fd, 0, SEEK_CUR); |
962 |
lseek (fd, offset, SEEK_SET); |
963 |
res = read (fd, buf, count); |
964 |
lseek (fd, ooffset, SEEK_SET); |
965 |
X_UNLOCK (preadwritelock); |
966 |
|
967 |
return res; |
968 |
} |
969 |
|
970 |
static eio_ssize_t |
971 |
eio__pwrite (int fd, void *buf, size_t count, off_t offset) |
972 |
{ |
973 |
eio_ssize_t res; |
974 |
off_t ooffset; |
975 |
|
976 |
X_LOCK (preadwritelock); |
977 |
ooffset = lseek (fd, 0, SEEK_CUR); |
978 |
lseek (fd, offset, SEEK_SET); |
979 |
res = write (fd, buf, count); |
980 |
lseek (fd, ooffset, SEEK_SET); |
981 |
X_UNLOCK (preadwritelock); |
982 |
|
983 |
return res; |
984 |
} |
985 |
#endif |
986 |
|
987 |
#ifndef HAVE_UTIMES |
988 |
|
989 |
# undef utimes |
990 |
# define utimes(path,times) eio__utimes (path, times) |
991 |
|
992 |
static int |
993 |
eio__utimes (const char *filename, const struct timeval times[2]) |
994 |
{ |
995 |
if (times) |
996 |
{ |
997 |
struct utimbuf buf; |
998 |
|
999 |
buf.actime = times[0].tv_sec; |
1000 |
buf.modtime = times[1].tv_sec; |
1001 |
|
1002 |
return utime (filename, &buf); |
1003 |
} |
1004 |
else |
1005 |
return utime (filename, 0); |
1006 |
} |
1007 |
|
1008 |
#endif |
1009 |
|
1010 |
#ifndef HAVE_FUTIMES |
1011 |
|
1012 |
# undef futimes |
1013 |
# define futimes(fd,times) eio__futimes (fd, times) |
1014 |
|
1015 |
static int |
1016 |
eio__futimes (int fd, const struct timeval tv[2]) |
1017 |
{ |
1018 |
errno = ENOSYS; |
1019 |
return -1; |
1020 |
} |
1021 |
|
1022 |
#endif |
1023 |
|
1024 |
#if !HAVE_FDATASYNC |
1025 |
# undef fdatasync |
1026 |
# define fdatasync(fd) fsync (fd) |
1027 |
#endif |
1028 |
|
1029 |
static int |
1030 |
eio__syncfs (int fd) |
1031 |
{ |
1032 |
int res; |
1033 |
|
1034 |
#if HAVE_SYS_SYNCFS |
1035 |
res = (int)syscall (__NR_syncfs, (int)(fd)); |
1036 |
#else |
1037 |
res = -1; |
1038 |
errno = ENOSYS; |
1039 |
#endif |
1040 |
|
1041 |
if (res < 0 && errno == ENOSYS && fd >= 0) |
1042 |
sync (); |
1043 |
|
1044 |
return res; |
1045 |
} |
1046 |
|
1047 |
/* sync_file_range always needs emulation */ |
1048 |
static int |
1049 |
eio__sync_file_range (int fd, off_t offset, size_t nbytes, unsigned int flags) |
1050 |
{ |
1051 |
#if HAVE_SYNC_FILE_RANGE |
1052 |
int res; |
1053 |
|
1054 |
if (EIO_SYNC_FILE_RANGE_WAIT_BEFORE != SYNC_FILE_RANGE_WAIT_BEFORE |
1055 |
|| EIO_SYNC_FILE_RANGE_WRITE != SYNC_FILE_RANGE_WRITE |
1056 |
|| EIO_SYNC_FILE_RANGE_WAIT_AFTER != SYNC_FILE_RANGE_WAIT_AFTER) |
1057 |
{ |
1058 |
flags = 0 |
1059 |
| (flags & EIO_SYNC_FILE_RANGE_WAIT_BEFORE ? SYNC_FILE_RANGE_WAIT_BEFORE : 0) |
1060 |
| (flags & EIO_SYNC_FILE_RANGE_WRITE ? SYNC_FILE_RANGE_WRITE : 0) |
1061 |
| (flags & EIO_SYNC_FILE_RANGE_WAIT_AFTER ? SYNC_FILE_RANGE_WAIT_AFTER : 0); |
1062 |
} |
1063 |
|
1064 |
res = sync_file_range (fd, offset, nbytes, flags); |
1065 |
|
1066 |
if (!res || errno != ENOSYS) |
1067 |
return res; |
1068 |
#endif |
1069 |
|
1070 |
/* even though we could play tricks with the flags, it's better to always |
1071 |
* call fdatasync, as that matches the expectation of its users best */ |
1072 |
return fdatasync (fd); |
1073 |
} |
1074 |
|
1075 |
static int |
1076 |
eio__fallocate (int fd, int mode, off_t offset, size_t len) |
1077 |
{ |
1078 |
#if HAVE_FALLOCATE |
1079 |
return fallocate (fd, mode, offset, len); |
1080 |
#else |
1081 |
errno = ENOSYS; |
1082 |
return -1; |
1083 |
#endif |
1084 |
} |
1085 |
|
1086 |
#if !HAVE_READAHEAD |
1087 |
# undef readahead |
1088 |
# define readahead(fd,offset,count) eio__readahead (fd, offset, count, self) |
1089 |
|
1090 |
static eio_ssize_t |
1091 |
eio__readahead (int fd, off_t offset, size_t count, etp_worker *self) |
1092 |
{ |
1093 |
size_t todo = count; |
1094 |
dBUF; |
1095 |
|
1096 |
while (todo > 0) |
1097 |
{ |
1098 |
size_t len = todo < EIO_BUFSIZE ? todo : EIO_BUFSIZE; |
1099 |
|
1100 |
pread (fd, eio_buf, len, offset); |
1101 |
offset += len; |
1102 |
todo -= len; |
1103 |
} |
1104 |
|
1105 |
FUBd; |
1106 |
|
1107 |
errno = 0; |
1108 |
return count; |
1109 |
} |
1110 |
|
1111 |
#endif |
1112 |
|
1113 |
/* sendfile always needs emulation */ |
1114 |
static eio_ssize_t |
1115 |
eio__sendfile (int ofd, int ifd, off_t offset, size_t count) |
1116 |
{ |
1117 |
eio_ssize_t written = 0; |
1118 |
eio_ssize_t res; |
1119 |
|
1120 |
if (!count) |
1121 |
return 0; |
1122 |
|
1123 |
for (;;) |
1124 |
{ |
1125 |
#ifdef __APPLE__ |
1126 |
# undef HAVE_SENDFILE /* broken, as everything on os x */ |
1127 |
#endif |
1128 |
#if HAVE_SENDFILE |
1129 |
# if __linux |
1130 |
off_t soffset = offset; |
1131 |
res = sendfile (ofd, ifd, &soffset, count); |
1132 |
|
1133 |
# elif __FreeBSD__ |
1134 |
/* |
1135 |
* Of course, the freebsd sendfile is a dire hack with no thoughts |
1136 |
* wasted on making it similar to other I/O functions. |
1137 |
*/ |
1138 |
off_t sbytes; |
1139 |
res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0); |
1140 |
|
1141 |
#if 0 /* according to the manpage, this is correct, but broken behaviour */ |
1142 |
/* freebsd' sendfile will return 0 on success */ |
1143 |
/* freebsd 8 documents it as only setting *sbytes on EINTR and EAGAIN, but */ |
1144 |
/* not on e.g. EIO or EPIPE - sounds broken */ |
1145 |
if ((res < 0 && (errno == EAGAIN || errno == EINTR) && sbytes) || res == 0) |
1146 |
res = sbytes; |
1147 |
#endif |
1148 |
|
1149 |
/* according to source inspection, this is correct, and useful behaviour */ |
1150 |
if (sbytes) |
1151 |
res = sbytes; |
1152 |
|
1153 |
# elif defined (__APPLE__) |
1154 |
off_t sbytes = count; |
1155 |
res = sendfile (ifd, ofd, offset, &sbytes, 0, 0); |
1156 |
|
1157 |
/* according to the manpage, sbytes is always valid */ |
1158 |
if (sbytes) |
1159 |
res = sbytes; |
1160 |
|
1161 |
# elif __hpux |
1162 |
res = sendfile (ofd, ifd, offset, count, 0, 0); |
1163 |
|
1164 |
# elif __solaris |
1165 |
struct sendfilevec vec; |
1166 |
size_t sbytes; |
1167 |
|
1168 |
vec.sfv_fd = ifd; |
1169 |
vec.sfv_flag = 0; |
1170 |
vec.sfv_off = offset; |
1171 |
vec.sfv_len = count; |
1172 |
|
1173 |
res = sendfilev (ofd, &vec, 1, &sbytes); |
1174 |
|
1175 |
if (res < 0 && sbytes) |
1176 |
res = sbytes; |
1177 |
|
1178 |
# endif |
1179 |
|
1180 |
#elif defined (_WIN32) && 0 |
1181 |
/* does not work, just for documentation of what would need to be done */ |
1182 |
/* actually, cannot be done like this, as TransmitFile changes the file offset, */ |
1183 |
/* libeio guarantees that the file offset does not change, and windows */ |
1184 |
/* has no way to get an independent handle to the same file description */ |
1185 |
HANDLE h = TO_SOCKET (ifd); |
1186 |
SetFilePointer (h, offset, 0, FILE_BEGIN); |
1187 |
res = TransmitFile (TO_SOCKET (ofd), h, count, 0, 0, 0, 0); |
1188 |
|
1189 |
#else |
1190 |
res = -1; |
1191 |
errno = ENOSYS; |
1192 |
#endif |
1193 |
|
1194 |
/* we assume sendfile can copy at least 128mb in one go */ |
1195 |
if (res <= 128 * 1024 * 1024) |
1196 |
{ |
1197 |
if (res > 0) |
1198 |
written += res; |
1199 |
|
1200 |
if (written) |
1201 |
return written; |
1202 |
|
1203 |
break; |
1204 |
} |
1205 |
else |
1206 |
{ |
1207 |
/* if we requested more, then probably the kernel was lazy */ |
1208 |
written += res; |
1209 |
offset += res; |
1210 |
count -= res; |
1211 |
|
1212 |
if (!count) |
1213 |
return written; |
1214 |
} |
1215 |
} |
1216 |
|
1217 |
if (res < 0 |
1218 |
&& (errno == ENOSYS || errno == EINVAL || errno == ENOTSOCK |
1219 |
/* BSDs */ |
1220 |
#ifdef ENOTSUP /* sigh, if the steenking pile called openbsd would only try to at least compile posix code... */ |
1221 |
|| errno == ENOTSUP |
1222 |
#endif |
1223 |
#ifdef EOPNOTSUPP /* windows */ |
1224 |
|| errno == EOPNOTSUPP /* BSDs */ |
1225 |
#endif |
1226 |
#if __solaris |
1227 |
|| errno == EAFNOSUPPORT || errno == EPROTOTYPE |
1228 |
#endif |
1229 |
) |
1230 |
) |
1231 |
{ |
1232 |
/* emulate sendfile. this is a major pain in the ass */ |
1233 |
dBUF; |
1234 |
|
1235 |
res = 0; |
1236 |
|
1237 |
while (count) |
1238 |
{ |
1239 |
eio_ssize_t cnt; |
1240 |
|
1241 |
cnt = pread (ifd, eio_buf, count > EIO_BUFSIZE ? EIO_BUFSIZE : count, offset); |
1242 |
|
1243 |
if (cnt <= 0) |
1244 |
{ |
1245 |
if (cnt && !res) res = -1; |
1246 |
break; |
1247 |
} |
1248 |
|
1249 |
cnt = write (ofd, eio_buf, cnt); |
1250 |
|
1251 |
if (cnt <= 0) |
1252 |
{ |
1253 |
if (cnt && !res) res = -1; |
1254 |
break; |
1255 |
} |
1256 |
|
1257 |
offset += cnt; |
1258 |
res += cnt; |
1259 |
count -= cnt; |
1260 |
} |
1261 |
|
1262 |
FUBd; |
1263 |
} |
1264 |
|
1265 |
return res; |
1266 |
} |
1267 |
|
1268 |
#ifdef PAGESIZE |
1269 |
# define eio_pagesize() PAGESIZE |
1270 |
#else |
1271 |
static intptr_t |
1272 |
eio_pagesize (void) |
1273 |
{ |
1274 |
static intptr_t page; |
1275 |
|
1276 |
if (!page) |
1277 |
page = sysconf (_SC_PAGESIZE); |
1278 |
|
1279 |
return page; |
1280 |
} |
1281 |
#endif |
1282 |
|
1283 |
static void |
1284 |
eio_page_align (void **addr, size_t *length) |
1285 |
{ |
1286 |
intptr_t mask = eio_pagesize () - 1; |
1287 |
|
1288 |
/* round down addr */ |
1289 |
intptr_t adj = mask & (intptr_t)*addr; |
1290 |
|
1291 |
*addr = (void *)((intptr_t)*addr - adj); |
1292 |
*length += adj; |
1293 |
|
1294 |
/* round up length */ |
1295 |
*length = (*length + mask) & ~mask; |
1296 |
} |
1297 |
|
1298 |
#if !_POSIX_MEMLOCK |
1299 |
# define eio__mlockall(a) EIO_ENOSYS () |
1300 |
#else |
1301 |
|
1302 |
static int |
1303 |
eio__mlockall (int flags) |
1304 |
{ |
1305 |
#if __GLIBC__ == 2 && __GLIBC_MINOR__ <= 7 |
1306 |
extern int mallopt (int, int); |
1307 |
mallopt (-6, 238); /* http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=473812 */ |
1308 |
#endif |
1309 |
|
1310 |
if (EIO_MCL_CURRENT != MCL_CURRENT |
1311 |
|| EIO_MCL_FUTURE != MCL_FUTURE) |
1312 |
{ |
1313 |
flags = 0 |
1314 |
| (flags & EIO_MCL_CURRENT ? MCL_CURRENT : 0) |
1315 |
| (flags & EIO_MCL_FUTURE ? MCL_FUTURE : 0); |
1316 |
} |
1317 |
|
1318 |
return mlockall (flags); |
1319 |
} |
1320 |
#endif |
1321 |
|
1322 |
#if !_POSIX_MEMLOCK_RANGE |
1323 |
# define eio__mlock(a,b) EIO_ENOSYS () |
1324 |
#else |
1325 |
|
1326 |
static int |
1327 |
eio__mlock (void *addr, size_t length) |
1328 |
{ |
1329 |
eio_page_align (&addr, &length); |
1330 |
|
1331 |
return mlock (addr, length); |
1332 |
} |
1333 |
|
1334 |
#endif |
1335 |
|
1336 |
#if !(_POSIX_MAPPED_FILES && _POSIX_SYNCHRONIZED_IO) |
1337 |
# define eio__msync(a,b,c) EIO_ENOSYS () |
1338 |
#else |
1339 |
|
1340 |
static int |
1341 |
eio__msync (void *mem, size_t len, int flags) |
1342 |
{ |
1343 |
eio_page_align (&mem, &len); |
1344 |
|
1345 |
if (EIO_MS_ASYNC != MS_SYNC |
1346 |
|| EIO_MS_INVALIDATE != MS_INVALIDATE |
1347 |
|| EIO_MS_SYNC != MS_SYNC) |
1348 |
{ |
1349 |
flags = 0 |
1350 |
| (flags & EIO_MS_ASYNC ? MS_ASYNC : 0) |
1351 |
| (flags & EIO_MS_INVALIDATE ? MS_INVALIDATE : 0) |
1352 |
| (flags & EIO_MS_SYNC ? MS_SYNC : 0); |
1353 |
} |
1354 |
|
1355 |
return msync (mem, len, flags); |
1356 |
} |
1357 |
|
1358 |
#endif |
1359 |
|
1360 |
static int |
1361 |
eio__mtouch (eio_req *req) |
1362 |
{ |
1363 |
void *mem = req->ptr2; |
1364 |
size_t len = req->size; |
1365 |
int flags = req->int1; |
1366 |
|
1367 |
eio_page_align (&mem, &len); |
1368 |
|
1369 |
{ |
1370 |
intptr_t addr = (intptr_t)mem; |
1371 |
intptr_t end = addr + len; |
1372 |
intptr_t page = eio_pagesize (); |
1373 |
|
1374 |
if (addr < end) |
1375 |
if (flags & EIO_MT_MODIFY) /* modify */ |
1376 |
do { *((volatile sig_atomic_t *)addr) |= 0; } while ((addr += page) < len && !EIO_CANCELLED (req)); |
1377 |
else |
1378 |
do { *((volatile sig_atomic_t *)addr) ; } while ((addr += page) < len && !EIO_CANCELLED (req)); |
1379 |
} |
1380 |
|
1381 |
return 0; |
1382 |
} |
1383 |
|
1384 |
/*****************************************************************************/ |
1385 |
/* requests implemented outside eio_execute, because they are so large */ |
1386 |
|
1387 |
/* result will always end up in tmpbuf, there is always space for adding a 0-byte */ |
1388 |
static int |
1389 |
eio__realpath (struct tmpbuf *tmpbuf, eio_wd wd, const char *path) |
1390 |
{ |
1391 |
const char *rel = path; |
1392 |
char *res; |
1393 |
char *tmp1, *tmp2; |
1394 |
#if SYMLOOP_MAX > 32 |
1395 |
int symlinks = SYMLOOP_MAX; |
1396 |
#else |
1397 |
int symlinks = 32; |
1398 |
#endif |
1399 |
|
1400 |
errno = EINVAL; |
1401 |
if (!rel) |
1402 |
return -1; |
1403 |
|
1404 |
errno = ENOENT; |
1405 |
if (!*rel) |
1406 |
return -1; |
1407 |
|
1408 |
res = tmpbuf_get (tmpbuf, PATH_MAX * 3); |
1409 |
tmp1 = res + PATH_MAX; |
1410 |
tmp2 = tmp1 + PATH_MAX; |
1411 |
|
1412 |
#if 0 /* disabled, the musl way to do things is just too racy */ |
1413 |
#if __linux && defined(O_NONBLOCK) && defined(O_NOATIME) |
1414 |
/* on linux we may be able to ask the kernel */ |
1415 |
{ |
1416 |
int fd = open (rel, O_RDONLY | O_NONBLOCK | O_NOCTTY | O_NOATIME); |
1417 |
|
1418 |
if (fd >= 0) |
1419 |
{ |
1420 |
sprintf (tmp1, "/proc/self/fd/%d", fd); |
1421 |
req->result = readlink (tmp1, res, PATH_MAX); |
1422 |
close (fd); |
1423 |
|
1424 |
/* here we should probably stat the open file and the disk file, to make sure they still match */ |
1425 |
|
1426 |
if (req->result > 0) |
1427 |
goto done; |
1428 |
} |
1429 |
else if (errno == ELOOP || errno == ENAMETOOLONG || errno == ENOENT || errno == ENOTDIR || errno == EIO) |
1430 |
return; |
1431 |
} |
1432 |
#endif |
1433 |
#endif |
1434 |
|
1435 |
if (*rel != '/') |
1436 |
{ |
1437 |
int len; |
1438 |
|
1439 |
errno = ENOENT; |
1440 |
if (wd == EIO_INVALID_WD) |
1441 |
return -1; |
1442 |
|
1443 |
if (wd == EIO_CWD) |
1444 |
{ |
1445 |
if (!getcwd (res, PATH_MAX)) |
1446 |
return -1; |
1447 |
|
1448 |
len = strlen (res); |
1449 |
} |
1450 |
else |
1451 |
memcpy (res, wd->str, len = wd->len); |
1452 |
|
1453 |
if (res [1]) /* only use if not / */ |
1454 |
res += len; |
1455 |
} |
1456 |
|
1457 |
while (*rel) |
1458 |
{ |
1459 |
eio_ssize_t len, linklen; |
1460 |
const char *beg = rel; |
1461 |
|
1462 |
while (*rel && *rel != '/') |
1463 |
++rel; |
1464 |
|
1465 |
len = rel - beg; |
1466 |
|
1467 |
if (!len) /* skip slashes */ |
1468 |
{ |
1469 |
++rel; |
1470 |
continue; |
1471 |
} |
1472 |
|
1473 |
if (beg [0] == '.') |
1474 |
{ |
1475 |
if (len == 1) |
1476 |
continue; /* . - nop */ |
1477 |
|
1478 |
if (beg [1] == '.' && len == 2) |
1479 |
{ |
1480 |
/* .. - back up one component, if possible */ |
1481 |
|
1482 |
while (res != tmpbuf->ptr) |
1483 |
if (*--res == '/') |
1484 |
break; |
1485 |
|
1486 |
continue; |
1487 |
} |
1488 |
} |
1489 |
|
1490 |
errno = ENAMETOOLONG; |
1491 |
if (res + 1 + len + 1 >= tmp1) |
1492 |
return; |
1493 |
|
1494 |
/* copy one component */ |
1495 |
*res = '/'; |
1496 |
memcpy (res + 1, beg, len); |
1497 |
|
1498 |
/* zero-terminate, for readlink */ |
1499 |
res [len + 1] = 0; |
1500 |
|
1501 |
/* now check if it's a symlink */ |
1502 |
linklen = readlink (tmpbuf->ptr, tmp1, PATH_MAX); |
1503 |
|
1504 |
if (linklen < 0) |
1505 |
{ |
1506 |
if (errno != EINVAL) |
1507 |
return -1; |
1508 |
|
1509 |
/* it's a normal directory. hopefully */ |
1510 |
res += len + 1; |
1511 |
} |
1512 |
else |
1513 |
{ |
1514 |
/* yay, it was a symlink - build new path in tmp2 */ |
1515 |
int rellen = strlen (rel); |
1516 |
|
1517 |
errno = ENAMETOOLONG; |
1518 |
if (linklen + 1 + rellen >= PATH_MAX) |
1519 |
return -1; |
1520 |
|
1521 |
errno = ELOOP; |
1522 |
if (!--symlinks) |
1523 |
return -1; |
1524 |
|
1525 |
if (*tmp1 == '/') |
1526 |
res = tmpbuf->ptr; /* symlink resolves to an absolute path */ |
1527 |
|
1528 |
/* we need to be careful, as rel might point into tmp2 already */ |
1529 |
memmove (tmp2 + linklen + 1, rel, rellen + 1); |
1530 |
tmp2 [linklen] = '/'; |
1531 |
memcpy (tmp2, tmp1, linklen); |
1532 |
|
1533 |
rel = tmp2; |
1534 |
} |
1535 |
} |
1536 |
|
1537 |
/* special case for the lone root path */ |
1538 |
if (res == tmpbuf->ptr) |
1539 |
*res++ = '/'; |
1540 |
|
1541 |
return res - (char *)tmpbuf->ptr; |
1542 |
} |
1543 |
|
1544 |
static signed char |
1545 |
eio_dent_cmp (const eio_dirent *a, const eio_dirent *b) |
1546 |
{ |
1547 |
return a->score - b->score ? a->score - b->score /* works because our signed char is always 0..100 */ |
1548 |
: a->inode < b->inode ? -1 |
1549 |
: a->inode > b->inode ? 1 |
1550 |
: 0; |
1551 |
} |
1552 |
|
1553 |
#define EIO_DENT_CMP(i,op,j) eio_dent_cmp (&i, &j) op 0 |
1554 |
|
1555 |
#define EIO_SORT_CUTOFF 30 /* quite high, but performs well on many filesystems */ |
1556 |
#define EIO_SORT_FAST 60 /* when to only use insertion sort */ |
1557 |
|
1558 |
static void |
1559 |
eio_dent_radix_sort (eio_dirent *dents, int size, signed char score_bits, eio_ino_t inode_bits) |
1560 |
{ |
1561 |
unsigned char bits [9 + sizeof (eio_ino_t) * 8]; |
1562 |
unsigned char *bit = bits; |
1563 |
|
1564 |
assert (CHAR_BIT == 8); |
1565 |
assert (sizeof (eio_dirent) * 8 < 256); |
1566 |
assert (offsetof (eio_dirent, inode)); /* we use bit #0 as sentinel */ |
1567 |
assert (offsetof (eio_dirent, score)); /* we use bit #0 as sentinel */ |
1568 |
|
1569 |
if (size <= EIO_SORT_FAST) |
1570 |
return; |
1571 |
|
1572 |
/* first prepare an array of bits to test in our radix sort */ |
1573 |
/* try to take endianness into account, as well as differences in eio_ino_t sizes */ |
1574 |
/* inode_bits must contain all inodes ORed together */ |
1575 |
/* which is used to skip bits that are 0 everywhere, which is very common */ |
1576 |
{ |
1577 |
eio_ino_t endianness; |
1578 |
int i, j; |
1579 |
|
1580 |
/* we store the byte offset of byte n into byte n of "endianness" */ |
1581 |
for (i = 0; i < sizeof (eio_ino_t); ++i) |
1582 |
((unsigned char *)&endianness)[i] = i; |
1583 |
|
1584 |
*bit++ = 0; |
1585 |
|
1586 |
for (i = 0; i < sizeof (eio_ino_t); ++i) |
1587 |
{ |
1588 |
/* shifting off the byte offsets out of "endianness" */ |
1589 |
int offs = (offsetof (eio_dirent, inode) + (endianness & 0xff)) * 8; |
1590 |
endianness >>= 8; |
1591 |
|
1592 |
for (j = 0; j < 8; ++j) |
1593 |
if (inode_bits & (((eio_ino_t)1) << (i * 8 + j))) |
1594 |
*bit++ = offs + j; |
1595 |
} |
1596 |
|
1597 |
for (j = 0; j < 8; ++j) |
1598 |
if (score_bits & (1 << j)) |
1599 |
*bit++ = offsetof (eio_dirent, score) * 8 + j; |
1600 |
} |
1601 |
|
1602 |
/* now actually do the sorting (a variant of MSD radix sort) */ |
1603 |
{ |
1604 |
eio_dirent *base_stk [9 + sizeof (eio_ino_t) * 8], *base; |
1605 |
eio_dirent *end_stk [9 + sizeof (eio_ino_t) * 8], *end; |
1606 |
unsigned char *bit_stk [9 + sizeof (eio_ino_t) * 8]; |
1607 |
int stk_idx = 0; |
1608 |
|
1609 |
base_stk [stk_idx] = dents; |
1610 |
end_stk [stk_idx] = dents + size; |
1611 |
bit_stk [stk_idx] = bit - 1; |
1612 |
|
1613 |
do |
1614 |
{ |
1615 |
base = base_stk [stk_idx]; |
1616 |
end = end_stk [stk_idx]; |
1617 |
bit = bit_stk [stk_idx]; |
1618 |
|
1619 |
for (;;) |
1620 |
{ |
1621 |
unsigned char O = *bit >> 3; |
1622 |
unsigned char M = 1 << (*bit & 7); |
1623 |
|
1624 |
eio_dirent *a = base; |
1625 |
eio_dirent *b = end; |
1626 |
|
1627 |
if (b - a < EIO_SORT_CUTOFF) |
1628 |
break; |
1629 |
|
1630 |
/* now bit-partition the array on the bit */ |
1631 |
/* this ugly asymmetric loop seems to perform much better than typical */ |
1632 |
/* partition algos found in the literature */ |
1633 |
do |
1634 |
if (!(((unsigned char *)a)[O] & M)) |
1635 |
++a; |
1636 |
else if (!(((unsigned char *)--b)[O] & M)) |
1637 |
{ |
1638 |
eio_dirent tmp = *a; *a = *b; *b = tmp; |
1639 |
++a; |
1640 |
} |
1641 |
while (b > a); |
1642 |
|
1643 |
/* next bit, or stop, if no bits left in this path */ |
1644 |
if (!*--bit) |
1645 |
break; |
1646 |
|
1647 |
base_stk [stk_idx] = a; |
1648 |
end_stk [stk_idx] = end; |
1649 |
bit_stk [stk_idx] = bit; |
1650 |
++stk_idx; |
1651 |
|
1652 |
end = a; |
1653 |
} |
1654 |
} |
1655 |
while (stk_idx--); |
1656 |
} |
1657 |
} |
1658 |
|
1659 |
static void |
1660 |
eio_dent_insertion_sort (eio_dirent *dents, int size) |
1661 |
{ |
1662 |
/* first move the smallest element to the front, to act as a sentinel */ |
1663 |
{ |
1664 |
int i; |
1665 |
eio_dirent *min = dents; |
1666 |
|
1667 |
/* the radix pre-pass ensures that the minimum element is in the first EIO_SORT_CUTOFF + 1 elements */ |
1668 |
for (i = size > EIO_SORT_FAST ? EIO_SORT_CUTOFF + 1 : size; --i; ) |
1669 |
if (EIO_DENT_CMP (dents [i], <, *min)) |
1670 |
min = &dents [i]; |
1671 |
|
1672 |
/* swap elements 0 and j (minimum) */ |
1673 |
{ |
1674 |
eio_dirent tmp = *dents; *dents = *min; *min = tmp; |
1675 |
} |
1676 |
} |
1677 |
|
1678 |
/* then do standard insertion sort, assuming that all elements are >= dents [0] */ |
1679 |
{ |
1680 |
eio_dirent *i, *j; |
1681 |
|
1682 |
for (i = dents + 1; i < dents + size; ++i) |
1683 |
{ |
1684 |
eio_dirent value = *i; |
1685 |
|
1686 |
for (j = i - 1; EIO_DENT_CMP (*j, >, value); --j) |
1687 |
j [1] = j [0]; |
1688 |
|
1689 |
j [1] = value; |
1690 |
} |
1691 |
} |
1692 |
} |
1693 |
|
1694 |
static void |
1695 |
eio_dent_sort (eio_dirent *dents, int size, signed char score_bits, eio_ino_t inode_bits) |
1696 |
{ |
1697 |
if (size <= 1) |
1698 |
return; /* our insertion sort relies on size > 0 */ |
1699 |
|
1700 |
/* first we use a radix sort, but only for dirs >= EIO_SORT_FAST */ |
1701 |
/* and stop sorting when the partitions are <= EIO_SORT_CUTOFF */ |
1702 |
eio_dent_radix_sort (dents, size, score_bits, inode_bits); |
1703 |
|
1704 |
/* use an insertion sort at the end, or for small arrays, */ |
1705 |
/* as insertion sort is more efficient for small partitions */ |
1706 |
eio_dent_insertion_sort (dents, size); |
1707 |
} |
1708 |
|
1709 |
/* read a full directory */ |
1710 |
static void |
1711 |
eio__scandir (eio_req *req, etp_worker *self) |
1712 |
{ |
1713 |
char *name, *names; |
1714 |
int namesalloc = 4096 - sizeof (void *) * 4; |
1715 |
int namesoffs = 0; |
1716 |
int flags = req->int1; |
1717 |
eio_dirent *dents = 0; |
1718 |
int dentalloc = 128; |
1719 |
int dentoffs = 0; |
1720 |
eio_ino_t inode_bits = 0; |
1721 |
#ifdef _WIN32 |
1722 |
HANDLE dirp; |
1723 |
WIN32_FIND_DATA entp; |
1724 |
#else |
1725 |
DIR *dirp; |
1726 |
EIO_STRUCT_DIRENT *entp; |
1727 |
#endif |
1728 |
|
1729 |
req->result = -1; |
1730 |
|
1731 |
if (!(flags & EIO_READDIR_DENTS)) |
1732 |
flags &= ~(EIO_READDIR_DIRS_FIRST | EIO_READDIR_STAT_ORDER); |
1733 |
|
1734 |
#ifdef _WIN32 |
1735 |
{ |
1736 |
int len = strlen ((const char *)req->ptr1); |
1737 |
char *path = malloc (MAX_PATH); |
1738 |
const char *fmt; |
1739 |
const char *reqpath = wd_expand (&self->tmpbuf, req->wd, req->ptr1); |
1740 |
|
1741 |
if (!len) |
1742 |
fmt = "./*"; |
1743 |
else if (reqpath[len - 1] == '/' || reqpath[len - 1] == '\\') |
1744 |
fmt = "%s*"; |
1745 |
else |
1746 |
fmt = "%s/*"; |
1747 |
|
1748 |
_snprintf (path, MAX_PATH, fmt, reqpath); |
1749 |
dirp = FindFirstFile (path, &entp); |
1750 |
free (path); |
1751 |
|
1752 |
if (dirp == INVALID_HANDLE_VALUE) |
1753 |
{ |
1754 |
/* should steal _dosmaperr */ |
1755 |
switch (GetLastError ()) |
1756 |
{ |
1757 |
case ERROR_FILE_NOT_FOUND: |
1758 |
req->result = 0; |
1759 |
break; |
1760 |
|
1761 |
case ERROR_INVALID_NAME: |
1762 |
case ERROR_PATH_NOT_FOUND: |
1763 |
case ERROR_NO_MORE_FILES: |
1764 |
errno = ENOENT; |
1765 |
break; |
1766 |
|
1767 |
case ERROR_NOT_ENOUGH_MEMORY: |
1768 |
errno = ENOMEM; |
1769 |
break; |
1770 |
|
1771 |
default: |
1772 |
errno = EINVAL; |
1773 |
break; |
1774 |
} |
1775 |
|
1776 |
return; |
1777 |
} |
1778 |
} |
1779 |
#else |
1780 |
#if HAVE_AT |
1781 |
if (req->wd) |
1782 |
{ |
1783 |
int fd = openat (WD2FD (req->wd), req->ptr1, O_CLOEXEC | O_SEARCH | O_DIRECTORY); |
1784 |
|
1785 |
if (fd < 0) |
1786 |
return; |
1787 |
|
1788 |
dirp = fdopendir (fd); |
1789 |
|
1790 |
if (!dirp) |
1791 |
close (fd); |
1792 |
} |
1793 |
else |
1794 |
dirp = opendir (req->ptr1); |
1795 |
#else |
1796 |
dirp = opendir (wd_expand (&self->tmpbuf, req->wd, req->ptr1)); |
1797 |
#endif |
1798 |
|
1799 |
if (!dirp) |
1800 |
return; |
1801 |
#endif |
1802 |
|
1803 |
if (req->flags & EIO_FLAG_PTR1_FREE) |
1804 |
free (req->ptr1); |
1805 |
|
1806 |
req->flags |= EIO_FLAG_PTR1_FREE | EIO_FLAG_PTR2_FREE; |
1807 |
req->ptr1 = dents = flags ? malloc (dentalloc * sizeof (eio_dirent)) : 0; |
1808 |
req->ptr2 = names = malloc (namesalloc); |
1809 |
|
1810 |
if (!names || (flags && !dents)) |
1811 |
return; |
1812 |
|
1813 |
for (;;) |
1814 |
{ |
1815 |
int done; |
1816 |
|
1817 |
#ifdef _WIN32 |
1818 |
done = !dirp; |
1819 |
#else |
1820 |
errno = 0; |
1821 |
entp = readdir (dirp); |
1822 |
done = !entp; |
1823 |
#endif |
1824 |
|
1825 |
if (done) |
1826 |
{ |
1827 |
#ifndef _WIN32 |
1828 |
int old_errno = errno; |
1829 |
closedir (dirp); |
1830 |
errno = old_errno; |
1831 |
|
1832 |
if (errno) |
1833 |
break; |
1834 |
#endif |
1835 |
|
1836 |
/* sort etc. */ |
1837 |
req->int1 = flags; |
1838 |
req->result = dentoffs; |
1839 |
|
1840 |
if (flags & EIO_READDIR_STAT_ORDER) |
1841 |
eio_dent_sort (dents, dentoffs, flags & EIO_READDIR_DIRS_FIRST ? 7 : 0, inode_bits); |
1842 |
else if (flags & EIO_READDIR_DIRS_FIRST) |
1843 |
if (flags & EIO_READDIR_FOUND_UNKNOWN) |
1844 |
eio_dent_sort (dents, dentoffs, 7, inode_bits); /* sort by score and inode */ |
1845 |
else |
1846 |
{ |
1847 |
/* in this case, all is known, and we just put dirs first and sort them */ |
1848 |
eio_dirent *oth = dents + dentoffs; |
1849 |
eio_dirent *dir = dents; |
1850 |
|
1851 |
/* now partition dirs to the front, and non-dirs to the back */ |
1852 |
/* by walking from both sides and swapping if necessary */ |
1853 |
while (oth > dir) |
1854 |
{ |
1855 |
if (dir->type == EIO_DT_DIR) |
1856 |
++dir; |
1857 |
else if ((--oth)->type == EIO_DT_DIR) |
1858 |
{ |
1859 |
eio_dirent tmp = *dir; *dir = *oth; *oth = tmp; |
1860 |
|
1861 |
++dir; |
1862 |
} |
1863 |
} |
1864 |
|
1865 |
/* now sort the dirs only (dirs all have the same score) */ |
1866 |
eio_dent_sort (dents, dir - dents, 0, inode_bits); |
1867 |
} |
1868 |
|
1869 |
break; |
1870 |
} |
1871 |
|
1872 |
/* now add the entry to our list(s) */ |
1873 |
name = D_NAME (entp); |
1874 |
|
1875 |
/* skip . and .. entries */ |
1876 |
if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2]))) |
1877 |
{ |
1878 |
int len = D_NAMLEN (entp) + 1; |
1879 |
|
1880 |
while (ecb_expect_false (namesoffs + len > namesalloc)) |
1881 |
{ |
1882 |
namesalloc *= 2; |
1883 |
req->ptr2 = names = realloc (names, namesalloc); |
1884 |
|
1885 |
if (!names) |
1886 |
break; |
1887 |
} |
1888 |
|
1889 |
memcpy (names + namesoffs, name, len); |
1890 |
|
1891 |
if (dents) |
1892 |
{ |
1893 |
struct eio_dirent *ent; |
1894 |
|
1895 |
if (ecb_expect_false (dentoffs == dentalloc)) |
1896 |
{ |
1897 |
dentalloc *= 2; |
1898 |
req->ptr1 = dents = realloc (dents, dentalloc * sizeof (eio_dirent)); |
1899 |
|
1900 |
if (!dents) |
1901 |
break; |
1902 |
} |
1903 |
|
1904 |
ent = dents + dentoffs; |
1905 |
|
1906 |
ent->nameofs = namesoffs; /* rather dirtily we store the offset in the pointer */ |
1907 |
ent->namelen = len - 1; |
1908 |
ent->inode = D_INO (entp); |
1909 |
|
1910 |
inode_bits |= ent->inode; |
1911 |
|
1912 |
switch (D_TYPE (entp)) |
1913 |
{ |
1914 |
default: |
1915 |
ent->type = EIO_DT_UNKNOWN; |
1916 |
flags |= EIO_READDIR_FOUND_UNKNOWN; |
1917 |
break; |
1918 |
|
1919 |
#ifdef DT_FIFO |
1920 |
case DT_FIFO: ent->type = EIO_DT_FIFO; break; |
1921 |
#endif |
1922 |
#ifdef DT_CHR |
1923 |
case DT_CHR: ent->type = EIO_DT_CHR; break; |
1924 |
#endif |
1925 |
#ifdef DT_MPC |
1926 |
case DT_MPC: ent->type = EIO_DT_MPC; break; |
1927 |
#endif |
1928 |
#ifdef DT_DIR |
1929 |
case DT_DIR: ent->type = EIO_DT_DIR; break; |
1930 |
#endif |
1931 |
#ifdef DT_NAM |
1932 |
case DT_NAM: ent->type = EIO_DT_NAM; break; |
1933 |
#endif |
1934 |
#ifdef DT_BLK |
1935 |
case DT_BLK: ent->type = EIO_DT_BLK; break; |
1936 |
#endif |
1937 |
#ifdef DT_MPB |
1938 |
case DT_MPB: ent->type = EIO_DT_MPB; break; |
1939 |
#endif |
1940 |
#ifdef DT_REG |
1941 |
case DT_REG: ent->type = EIO_DT_REG; break; |
1942 |
#endif |
1943 |
#ifdef DT_NWK |
1944 |
case DT_NWK: ent->type = EIO_DT_NWK; break; |
1945 |
#endif |
1946 |
#ifdef DT_CMP |
1947 |
case DT_CMP: ent->type = EIO_DT_CMP; break; |
1948 |
#endif |
1949 |
#ifdef DT_LNK |
1950 |
case DT_LNK: ent->type = EIO_DT_LNK; break; |
1951 |
#endif |
1952 |
#ifdef DT_SOCK |
1953 |
case DT_SOCK: ent->type = EIO_DT_SOCK; break; |
1954 |
#endif |
1955 |
#ifdef DT_DOOR |
1956 |
case DT_DOOR: ent->type = EIO_DT_DOOR; break; |
1957 |
#endif |
1958 |
#ifdef DT_WHT |
1959 |
case DT_WHT: ent->type = EIO_DT_WHT; break; |
1960 |
#endif |
1961 |
} |
1962 |
|
1963 |
ent->score = 7; |
1964 |
|
1965 |
if (flags & EIO_READDIR_DIRS_FIRST) |
1966 |
{ |
1967 |
if (ent->type == EIO_DT_UNKNOWN) |
1968 |
{ |
1969 |
if (*name == '.') /* leading dots are likely directories, and, in any case, rare */ |
1970 |
ent->score = 1; |
1971 |
else if (!strchr (name, '.')) /* absense of dots indicate likely dirs */ |
1972 |
ent->score = len <= 2 ? 4 - len : len <= 4 ? 4 : len <= 7 ? 5 : 6; /* shorter == more likely dir, but avoid too many classes */ |
1973 |
} |
1974 |
else if (ent->type == EIO_DT_DIR) |
1975 |
ent->score = 0; |
1976 |
} |
1977 |
} |
1978 |
|
1979 |
namesoffs += len; |
1980 |
++dentoffs; |
1981 |
} |
1982 |
|
1983 |
if (EIO_CANCELLED (req)) |
1984 |
{ |
1985 |
errno = ECANCELED; |
1986 |
break; |
1987 |
} |
1988 |
|
1989 |
#ifdef _WIN32 |
1990 |
if (!FindNextFile (dirp, &entp)) |
1991 |
{ |
1992 |
FindClose (dirp); |
1993 |
dirp = 0; |
1994 |
} |
1995 |
#endif |
1996 |
} |
1997 |
} |
1998 |
|
1999 |
/*****************************************************************************/ |
2000 |
/* working directory stuff */ |
2001 |
/* various deficiencies in the posix 2008 api force us to */ |
2002 |
/* keep the absolute path in string form at all times */ |
2003 |
/* fuck yeah. */ |
2004 |
|
2005 |
#if !HAVE_AT |
2006 |
|
2007 |
/* a bit like realpath, but usually faster because it doesn'T have to return */ |
2008 |
/* an absolute or canonical path */ |
2009 |
static const char * |
2010 |
wd_expand (struct tmpbuf *tmpbuf, eio_wd wd, const char *path) |
2011 |
{ |
2012 |
if (!wd || *path == '/') |
2013 |
return path; |
2014 |
|
2015 |
if (path [0] == '.' && !path [1]) |
2016 |
return wd->str; |
2017 |
|
2018 |
{ |
2019 |
int l1 = wd->len; |
2020 |
int l2 = strlen (path); |
2021 |
|
2022 |
char *res = tmpbuf_get (tmpbuf, l1 + l2 + 2); |
2023 |
|
2024 |
memcpy (res, wd->str, l1); |
2025 |
res [l1] = '/'; |
2026 |
memcpy (res + l1 + 1, path, l2 + 1); |
2027 |
|
2028 |
return res; |
2029 |
} |
2030 |
} |
2031 |
|
2032 |
#endif |
2033 |
|
2034 |
static eio_wd |
2035 |
eio__wd_open_sync (struct tmpbuf *tmpbuf, eio_wd wd, const char *path) |
2036 |
{ |
2037 |
int fd; |
2038 |
eio_wd res; |
2039 |
int len = eio__realpath (tmpbuf, wd, path); |
2040 |
|
2041 |
if (len < 0) |
2042 |
return EIO_INVALID_WD; |
2043 |
|
2044 |
#if HAVE_AT |
2045 |
fd = openat (WD2FD (wd), path, O_CLOEXEC | O_SEARCH | O_DIRECTORY); |
2046 |
|
2047 |
if (fd < 0) |
2048 |
return EIO_INVALID_WD; |
2049 |
#endif |
2050 |
|
2051 |
res = malloc (sizeof (*res) + len); /* one extra 0-byte */ |
2052 |
|
2053 |
#if HAVE_AT |
2054 |
res->fd = fd; |
2055 |
#endif |
2056 |
|
2057 |
res->len = len; |
2058 |
memcpy (res->str, tmpbuf->ptr, len); |
2059 |
res->str [len] = 0; |
2060 |
|
2061 |
return res; |
2062 |
} |
2063 |
|
2064 |
eio_wd |
2065 |
eio_wd_open_sync (eio_wd wd, const char *path) |
2066 |
{ |
2067 |
struct tmpbuf tmpbuf = { 0 }; |
2068 |
wd = eio__wd_open_sync (&tmpbuf, wd, path); |
2069 |
free (tmpbuf.ptr); |
2070 |
|
2071 |
return wd; |
2072 |
} |
2073 |
|
2074 |
void |
2075 |
eio_wd_close_sync (eio_wd wd) |
2076 |
{ |
2077 |
if (wd != EIO_INVALID_WD && wd != EIO_CWD) |
2078 |
{ |
2079 |
#if HAVE_AT |
2080 |
close (wd->fd); |
2081 |
#endif |
2082 |
free (wd); |
2083 |
} |
2084 |
} |
2085 |
|
2086 |
#if HAVE_AT |
2087 |
|
2088 |
/* they forgot these */ |
2089 |
|
2090 |
static int |
2091 |
eio__truncateat (int dirfd, const char *path, off_t length) |
2092 |
{ |
2093 |
int fd = openat (dirfd, path, O_WRONLY | O_CLOEXEC); |
2094 |
int res; |
2095 |
|
2096 |
if (fd < 0) |
2097 |
return fd; |
2098 |
|
2099 |
res = ftruncate (fd, length); |
2100 |
close (fd); |
2101 |
return res; |
2102 |
} |
2103 |
|
2104 |
static int |
2105 |
eio__statvfsat (int dirfd, const char *path, struct statvfs *buf) |
2106 |
{ |
2107 |
int fd = openat (dirfd, path, O_SEARCH | O_CLOEXEC); |
2108 |
int res; |
2109 |
|
2110 |
if (fd < 0) |
2111 |
return fd; |
2112 |
|
2113 |
res = fstatvfs (fd, buf); |
2114 |
close (fd); |
2115 |
return res; |
2116 |
|
2117 |
} |
2118 |
|
2119 |
#endif |
2120 |
|
2121 |
/*****************************************************************************/ |
2122 |
|
2123 |
#define ALLOC(len) \ |
2124 |
if (!req->ptr2) \ |
2125 |
{ \ |
2126 |
X_LOCK (wrklock); \ |
2127 |
req->flags |= EIO_FLAG_PTR2_FREE; \ |
2128 |
X_UNLOCK (wrklock); \ |
2129 |
req->ptr2 = malloc (len); \ |
2130 |
if (!req->ptr2) \ |
2131 |
{ \ |
2132 |
errno = ENOMEM; \ |
2133 |
req->result = -1; \ |
2134 |
break; \ |
2135 |
} \ |
2136 |
} |
2137 |
|
2138 |
static void ecb_noinline ecb_cold |
2139 |
etp_proc_init (void) |
2140 |
{ |
2141 |
#if HAVE_PRCTL_SET_NAME |
2142 |
/* provide a more sensible "thread name" */ |
2143 |
char name[16 + 1]; |
2144 |
const int namelen = sizeof (name) - 1; |
2145 |
int len; |
2146 |
|
2147 |
prctl (PR_GET_NAME, (unsigned long)name, 0, 0, 0); |
2148 |
name [namelen] = 0; |
2149 |
len = strlen (name); |
2150 |
strcpy (name + (len <= namelen - 4 ? len : namelen - 4), "/eio"); |
2151 |
prctl (PR_SET_NAME, (unsigned long)name, 0, 0, 0); |
2152 |
} |
2153 |
#endif |
2154 |
|
2155 |
X_THREAD_PROC (etp_proc) |
2156 |
{ |
2157 |
ETP_REQ *req; |
2158 |
struct timespec ts; |
2159 |
etp_worker *self = (etp_worker *)thr_arg; |
2160 |
|
2161 |
etp_proc_init (); |
2162 |
|
2163 |
/* try to distribute timeouts somewhat evenly */ |
2164 |
ts.tv_nsec = ((unsigned long)self & 1023UL) * (1000000000UL / 1024UL); |
2165 |
|
2166 |
for (;;) |
2167 |
{ |
2168 |
ts.tv_sec = 0; |
2169 |
|
2170 |
X_LOCK (reqlock); |
2171 |
|
2172 |
for (;;) |
2173 |
{ |
2174 |
req = reqq_shift (&req_queue); |
2175 |
|
2176 |
if (req) |
2177 |
break; |
2178 |
|
2179 |
if (ts.tv_sec == 1) /* no request, but timeout detected, let's quit */ |
2180 |
{ |
2181 |
X_UNLOCK (reqlock); |
2182 |
X_LOCK (wrklock); |
2183 |
--started; |
2184 |
X_UNLOCK (wrklock); |
2185 |
goto quit; |
2186 |
} |
2187 |
|
2188 |
++idle; |
2189 |
|
2190 |
if (idle <= max_idle) |
2191 |
/* we are allowed to idle, so do so without any timeout */ |
2192 |
X_COND_WAIT (reqwait, reqlock); |
2193 |
else |
2194 |
{ |
2195 |
/* initialise timeout once */ |
2196 |
if (!ts.tv_sec) |
2197 |
ts.tv_sec = time (0) + idle_timeout; |
2198 |
|
2199 |
if (X_COND_TIMEDWAIT (reqwait, reqlock, ts) == ETIMEDOUT) |
2200 |
ts.tv_sec = 1; /* assuming this is not a value computed above.,.. */ |
2201 |
} |
2202 |
|
2203 |
--idle; |
2204 |
} |
2205 |
|
2206 |
--nready; |
2207 |
|
2208 |
X_UNLOCK (reqlock); |
2209 |
|
2210 |
if (req->type < 0) |
2211 |
goto quit; |
2212 |
|
2213 |
ETP_EXECUTE (self, req); |
2214 |
|
2215 |
X_LOCK (reslock); |
2216 |
|
2217 |
++npending; |
2218 |
|
2219 |
if (!reqq_push (&res_queue, req) && want_poll_cb) |
2220 |
want_poll_cb (); |
2221 |
|
2222 |
etp_worker_clear (self); |
2223 |
|
2224 |
X_UNLOCK (reslock); |
2225 |
} |
2226 |
|
2227 |
quit: |
2228 |
free (req); |
2229 |
|
2230 |
X_LOCK (wrklock); |
2231 |
etp_worker_free (self); |
2232 |
X_UNLOCK (wrklock); |
2233 |
|
2234 |
return 0; |
2235 |
} |
2236 |
|
2237 |
/*****************************************************************************/ |
2238 |
|
2239 |
int ecb_cold |
2240 |
eio_init (void (*want_poll)(void), void (*done_poll)(void)) |
2241 |
{ |
2242 |
#if !HAVE_PREADWRITE |
2243 |
X_MUTEX_CREATE (preadwritelock); |
2244 |
#endif |
2245 |
|
2246 |
return etp_init (want_poll, done_poll); |
2247 |
} |
2248 |
|
2249 |
ecb_inline void |
2250 |
eio_api_destroy (eio_req *req) |
2251 |
{ |
2252 |
free (req); |
2253 |
} |
2254 |
|
2255 |
#define REQ(rtype) \ |
2256 |
eio_req *req; \ |
2257 |
\ |
2258 |
req = (eio_req *)calloc (1, sizeof *req); \ |
2259 |
if (!req) \ |
2260 |
return 0; \ |
2261 |
\ |
2262 |
req->type = rtype; \ |
2263 |
req->pri = pri; \ |
2264 |
req->finish = cb; \ |
2265 |
req->data = data; \ |
2266 |
req->destroy = eio_api_destroy; |
2267 |
|
2268 |
#define SEND eio_submit (req); return req |
2269 |
|
2270 |
#define PATH \ |
2271 |
req->flags |= EIO_FLAG_PTR1_FREE; \ |
2272 |
req->ptr1 = strdup (path); \ |
2273 |
if (!req->ptr1) \ |
2274 |
{ \ |
2275 |
eio_api_destroy (req); \ |
2276 |
return 0; \ |
2277 |
} |
2278 |
|
2279 |
static void |
2280 |
eio_execute (etp_worker *self, eio_req *req) |
2281 |
{ |
2282 |
#if HAVE_AT |
2283 |
int dirfd; |
2284 |
#else |
2285 |
const char *path; |
2286 |
#endif |
2287 |
|
2288 |
if (ecb_expect_false (EIO_CANCELLED (req))) |
2289 |
{ |
2290 |
req->result = -1; |
2291 |
req->errorno = ECANCELED; |
2292 |
return; |
2293 |
} |
2294 |
|
2295 |
if (ecb_expect_false (req->wd == EIO_INVALID_WD)) |
2296 |
{ |
2297 |
req->result = -1; |
2298 |
req->errorno = ENOENT; |
2299 |
return; |
2300 |
} |
2301 |
|
2302 |
if (req->type >= EIO_OPEN) |
2303 |
{ |
2304 |
#if HAVE_AT |
2305 |
dirfd = WD2FD (req->wd); |
2306 |
#else |
2307 |
path = wd_expand (&self->tmpbuf, req->wd, req->ptr1); |
2308 |
#endif |
2309 |
} |
2310 |
|
2311 |
switch (req->type) |
2312 |
{ |
2313 |
case EIO_WD_OPEN: req->wd = eio__wd_open_sync (&self->tmpbuf, req->wd, req->ptr1); |
2314 |
req->result = req->wd == EIO_INVALID_WD ? -1 : 0; |
2315 |
break; |
2316 |
case EIO_WD_CLOSE: req->result = 0; |
2317 |
eio_wd_close_sync (req->wd); break; |
2318 |
|
2319 |
case EIO_READ: ALLOC (req->size); |
2320 |
req->result = req->offs >= 0 |
2321 |
? pread (req->int1, req->ptr2, req->size, req->offs) |
2322 |
: read (req->int1, req->ptr2, req->size); break; |
2323 |
case EIO_WRITE: req->result = req->offs >= 0 |
2324 |
? pwrite (req->int1, req->ptr2, req->size, req->offs) |
2325 |
: write (req->int1, req->ptr2, req->size); break; |
2326 |
|
2327 |
case EIO_READAHEAD: req->result = readahead (req->int1, req->offs, req->size); break; |
2328 |
case EIO_SENDFILE: req->result = eio__sendfile (req->int1, req->int2, req->offs, req->size); break; |
2329 |
|
2330 |
#if HAVE_AT |
2331 |
|
2332 |
case EIO_STAT: ALLOC (sizeof (EIO_STRUCT_STAT)); |
2333 |
req->result = fstatat (dirfd, req->ptr1, (EIO_STRUCT_STAT *)req->ptr2, 0); break; |
2334 |
case EIO_LSTAT: ALLOC (sizeof (EIO_STRUCT_STAT)); |
2335 |
req->result = fstatat (dirfd, req->ptr1, (EIO_STRUCT_STAT *)req->ptr2, AT_SYMLINK_NOFOLLOW); break; |
2336 |
case EIO_CHOWN: req->result = fchownat (dirfd, req->ptr1, req->int2, req->int3, 0); break; |
2337 |
case EIO_CHMOD: req->result = fchmodat (dirfd, req->ptr1, (mode_t)req->int2, 0); break; |
2338 |
case EIO_TRUNCATE: req->result = eio__truncateat (dirfd, req->ptr1, req->offs); break; |
2339 |
case EIO_OPEN: req->result = openat (dirfd, req->ptr1, req->int1, (mode_t)req->int2); break; |
2340 |
|
2341 |
case EIO_UNLINK: req->result = unlinkat (dirfd, req->ptr1, 0); break; |
2342 |
case EIO_RMDIR: req->result = unlinkat (dirfd, req->ptr1, AT_REMOVEDIR); break; |
2343 |
case EIO_MKDIR: req->result = mkdirat (dirfd, req->ptr1, (mode_t)req->int2); break; |
2344 |
case EIO_RENAME: req->result = renameat (dirfd, req->ptr1, WD2FD ((eio_wd)req->int3), req->ptr2); break; |
2345 |
case EIO_LINK: req->result = linkat (dirfd, req->ptr1, WD2FD ((eio_wd)req->int3), req->ptr2, 0); break; |
2346 |
case EIO_SYMLINK: req->result = symlinkat (req->ptr1, dirfd, req->ptr2); break; |
2347 |
case EIO_MKNOD: req->result = mknodat (dirfd, req->ptr1, (mode_t)req->int2, (dev_t)req->offs); break; |
2348 |
case EIO_READLINK: ALLOC (PATH_MAX); |
2349 |
req->result = readlinkat (dirfd, req->ptr1, req->ptr2, PATH_MAX); break; |
2350 |
case EIO_STATVFS: ALLOC (sizeof (EIO_STRUCT_STATVFS)); |
2351 |
req->result = eio__statvfsat (dirfd, req->ptr1, (EIO_STRUCT_STATVFS *)req->ptr2); break; |
2352 |
case EIO_UTIME: |
2353 |
case EIO_FUTIME: |
2354 |
{ |
2355 |
struct timespec ts[2]; |
2356 |
struct timespec *times; |
2357 |
|
2358 |
if (req->nv1 != -1. || req->nv2 != -1.) |
2359 |
{ |
2360 |
ts[0].tv_sec = req->nv1; |
2361 |
ts[0].tv_nsec = (req->nv1 - ts[0].tv_sec) * 1e9; |
2362 |
ts[1].tv_sec = req->nv2; |
2363 |
ts[1].tv_nsec = (req->nv2 - ts[1].tv_sec) * 1e9; |
2364 |
|
2365 |
times = ts; |
2366 |
} |
2367 |
else |
2368 |
times = 0; |
2369 |
|
2370 |
req->result = req->type == EIO_FUTIME |
2371 |
? futimens (req->int1, times) |
2372 |
: utimensat (dirfd, req->ptr1, times, 0); |
2373 |
} |
2374 |
break; |
2375 |
|
2376 |
#else |
2377 |
|
2378 |
case EIO_STAT: ALLOC (sizeof (EIO_STRUCT_STAT)); |
2379 |
req->result = stat (path , (EIO_STRUCT_STAT *)req->ptr2); break; |
2380 |
case EIO_LSTAT: ALLOC (sizeof (EIO_STRUCT_STAT)); |
2381 |
req->result = lstat (path , (EIO_STRUCT_STAT *)req->ptr2); break; |
2382 |
case EIO_CHOWN: req->result = chown (path , req->int2, req->int3); break; |
2383 |
case EIO_CHMOD: req->result = chmod (path , (mode_t)req->int2); break; |
2384 |
case EIO_TRUNCATE: req->result = truncate (path , req->offs); break; |
2385 |
case EIO_OPEN: req->result = open (path , req->int1, (mode_t)req->int2); break; |
2386 |
|
2387 |
case EIO_UNLINK: req->result = unlink (path ); break; |
2388 |
case EIO_RMDIR: req->result = rmdir (path ); break; |
2389 |
case EIO_MKDIR: req->result = mkdir (path , (mode_t)req->int2); break; |
2390 |
case EIO_RENAME: req->result = rename (path , req->ptr2); break; |
2391 |
case EIO_LINK: req->result = link (path , req->ptr2); break; |
2392 |
case EIO_SYMLINK: req->result = symlink (path , req->ptr2); break; |
2393 |
case EIO_MKNOD: req->result = mknod (path , (mode_t)req->int2, (dev_t)req->offs); break; |
2394 |
case EIO_READLINK: ALLOC (PATH_MAX); |
2395 |
req->result = readlink (path, req->ptr2, PATH_MAX); break; |
2396 |
case EIO_STATVFS: ALLOC (sizeof (EIO_STRUCT_STATVFS)); |
2397 |
req->result = statvfs (path , (EIO_STRUCT_STATVFS *)req->ptr2); break; |
2398 |
|
2399 |
case EIO_UTIME: |
2400 |
case EIO_FUTIME: |
2401 |
{ |
2402 |
struct timeval tv[2]; |
2403 |
struct timeval *times; |
2404 |
|
2405 |
if (req->nv1 != -1. || req->nv2 != -1.) |
2406 |
{ |
2407 |
tv[0].tv_sec = req->nv1; |
2408 |
tv[0].tv_usec = (req->nv1 - tv[0].tv_sec) * 1e6; |
2409 |
tv[1].tv_sec = req->nv2; |
2410 |
tv[1].tv_usec = (req->nv2 - tv[1].tv_sec) * 1e6; |
2411 |
|
2412 |
times = tv; |
2413 |
} |
2414 |
else |
2415 |
times = 0; |
2416 |
|
2417 |
req->result = req->type == EIO_FUTIME |
2418 |
? futimes (req->int1, times) |
2419 |
: utimes (req->ptr1, times); |
2420 |
} |
2421 |
break; |
2422 |
|
2423 |
#endif |
2424 |
|
2425 |
case EIO_REALPATH: if (0 <= (req->result = eio__realpath (&self->tmpbuf, req->wd, req->ptr1))) |
2426 |
{ |
2427 |
ALLOC (req->result); |
2428 |
memcpy (req->ptr2, self->tmpbuf.ptr, req->result); |
2429 |
} |
2430 |
break; |
2431 |
|
2432 |
case EIO_FSTAT: ALLOC (sizeof (EIO_STRUCT_STAT)); |
2433 |
req->result = fstat (req->int1, (EIO_STRUCT_STAT *)req->ptr2); break; |
2434 |
|
2435 |
case EIO_FSTATVFS: ALLOC (sizeof (EIO_STRUCT_STATVFS)); |
2436 |
req->result = fstatvfs (req->int1, (EIO_STRUCT_STATVFS *)req->ptr2); break; |
2437 |
|
2438 |
case EIO_FCHOWN: req->result = fchown (req->int1, req->int2, req->int3); break; |
2439 |
case EIO_FCHMOD: req->result = fchmod (req->int1, (mode_t)req->int2); break; |
2440 |
case EIO_FTRUNCATE: req->result = ftruncate (req->int1, req->offs); break; |
2441 |
|
2442 |
case EIO_CLOSE: req->result = close (req->int1); break; |
2443 |
case EIO_DUP2: req->result = dup2 (req->int1, req->int2); break; |
2444 |
case EIO_SYNC: req->result = 0; sync (); break; |
2445 |
case EIO_FSYNC: req->result = fsync (req->int1); break; |
2446 |
case EIO_FDATASYNC: req->result = fdatasync (req->int1); break; |
2447 |
case EIO_SYNCFS: req->result = eio__syncfs (req->int1); break; |
2448 |
case EIO_SYNC_FILE_RANGE: req->result = eio__sync_file_range (req->int1, req->offs, req->size, req->int2); break; |
2449 |
case EIO_MSYNC: req->result = eio__msync (req->ptr2, req->size, req->int1); break; |
2450 |
case EIO_MTOUCH: req->result = eio__mtouch (req); break; |
2451 |
case EIO_MLOCK: req->result = eio__mlock (req->ptr2, req->size); break; |
2452 |
case EIO_MLOCKALL: req->result = eio__mlockall (req->int1); break; |
2453 |
case EIO_FALLOCATE: req->result = eio__fallocate (req->int1, req->int2, req->offs, req->size); break; |
2454 |
|
2455 |
case EIO_READDIR: eio__scandir (req, self); break; |
2456 |
|
2457 |
case EIO_BUSY: |
2458 |
#ifdef _WIN32 |
2459 |
Sleep (req->nv1 * 1e3); |
2460 |
#else |
2461 |
{ |
2462 |
struct timeval tv; |
2463 |
|
2464 |
tv.tv_sec = req->nv1; |
2465 |
tv.tv_usec = (req->nv1 - tv.tv_sec) * 1e6; |
2466 |
|
2467 |
req->result = select (0, 0, 0, 0, &tv); |
2468 |
} |
2469 |
#endif |
2470 |
break; |
2471 |
|
2472 |
case EIO_GROUP: |
2473 |
abort (); /* handled in eio_request */ |
2474 |
|
2475 |
case EIO_NOP: |
2476 |
req->result = 0; |
2477 |
break; |
2478 |
|
2479 |
case EIO_CUSTOM: |
2480 |
req->feed (req); |
2481 |
break; |
2482 |
|
2483 |
default: |
2484 |
errno = ENOSYS; |
2485 |
req->result = -1; |
2486 |
break; |
2487 |
} |
2488 |
|
2489 |
req->errorno = errno; |
2490 |
} |
2491 |
|
2492 |
#ifndef EIO_NO_WRAPPERS |
2493 |
|
2494 |
eio_req *eio_wd_open (const char *path, int pri, eio_cb cb, void *data) |
2495 |
{ |
2496 |
REQ (EIO_WD_OPEN); PATH; SEND; |
2497 |
} |
2498 |
|
2499 |
eio_req *eio_wd_close (eio_wd wd, int pri, eio_cb cb, void *data) |
2500 |
{ |
2501 |
REQ (EIO_WD_CLOSE); req->wd = wd; SEND; |
2502 |
} |
2503 |
|
2504 |
eio_req *eio_nop (int pri, eio_cb cb, void *data) |
2505 |
{ |
2506 |
REQ (EIO_NOP); SEND; |
2507 |
} |
2508 |
|
2509 |
eio_req *eio_busy (double delay, int pri, eio_cb cb, void *data) |
2510 |
{ |
2511 |
REQ (EIO_BUSY); req->nv1 = delay; SEND; |
2512 |
} |
2513 |
|
2514 |
eio_req *eio_sync (int pri, eio_cb cb, void *data) |
2515 |
{ |
2516 |
REQ (EIO_SYNC); SEND; |
2517 |
} |
2518 |
|
2519 |
eio_req *eio_fsync (int fd, int pri, eio_cb cb, void *data) |
2520 |
{ |
2521 |
REQ (EIO_FSYNC); req->int1 = fd; SEND; |
2522 |
} |
2523 |
|
2524 |
eio_req *eio_msync (void *addr, size_t length, int flags, int pri, eio_cb cb, void *data) |
2525 |
{ |
2526 |
REQ (EIO_MSYNC); req->ptr2 = addr; req->size = length; req->int1 = flags; SEND; |
2527 |
} |
2528 |
|
2529 |
eio_req *eio_fdatasync (int fd, int pri, eio_cb cb, void *data) |
2530 |
{ |
2531 |
REQ (EIO_FDATASYNC); req->int1 = fd; SEND; |
2532 |
} |
2533 |
|
2534 |
eio_req *eio_syncfs (int fd, int pri, eio_cb cb, void *data) |
2535 |
{ |
2536 |
REQ (EIO_SYNCFS); req->int1 = fd; SEND; |
2537 |
} |
2538 |
|
2539 |
eio_req *eio_sync_file_range (int fd, off_t offset, size_t nbytes, unsigned int flags, int pri, eio_cb cb, void *data) |
2540 |
{ |
2541 |
REQ (EIO_SYNC_FILE_RANGE); req->int1 = fd; req->offs = offset; req->size = nbytes; req->int2 = flags; SEND; |
2542 |
} |
2543 |
|
2544 |
eio_req *eio_mtouch (void *addr, size_t length, int flags, int pri, eio_cb cb, void *data) |
2545 |
{ |
2546 |
REQ (EIO_MTOUCH); req->ptr2 = addr; req->size = length; req->int1 = flags; SEND; |
2547 |
} |
2548 |
|
2549 |
eio_req *eio_mlock (void *addr, size_t length, int pri, eio_cb cb, void *data) |
2550 |
{ |
2551 |
REQ (EIO_MLOCK); req->ptr2 = addr; req->size = length; SEND; |
2552 |
} |
2553 |
|
2554 |
eio_req *eio_mlockall (int flags, int pri, eio_cb cb, void *data) |
2555 |
{ |
2556 |
REQ (EIO_MLOCKALL); req->int1 = flags; SEND; |
2557 |
} |
2558 |
|
2559 |
eio_req *eio_fallocate (int fd, int mode, off_t offset, size_t len, int pri, eio_cb cb, void *data) |
2560 |
{ |
2561 |
REQ (EIO_FALLOCATE); req->int1 = fd; req->int2 = mode; req->offs = offset; req->size = len; SEND; |
2562 |
} |
2563 |
|
2564 |
eio_req *eio_close (int fd, int pri, eio_cb cb, void *data) |
2565 |
{ |
2566 |
REQ (EIO_CLOSE); req->int1 = fd; SEND; |
2567 |
} |
2568 |
|
2569 |
eio_req *eio_readahead (int fd, off_t offset, size_t length, int pri, eio_cb cb, void *data) |
2570 |
{ |
2571 |
REQ (EIO_READAHEAD); req->int1 = fd; req->offs = offset; req->size = length; SEND; |
2572 |
} |
2573 |
|
2574 |
eio_req *eio_read (int fd, void *buf, size_t length, off_t offset, int pri, eio_cb cb, void *data) |
2575 |
{ |
2576 |
REQ (EIO_READ); req->int1 = fd; req->offs = offset; req->size = length; req->ptr2 = buf; SEND; |
2577 |
} |
2578 |
|
2579 |
eio_req *eio_write (int fd, void *buf, size_t length, off_t offset, int pri, eio_cb cb, void *data) |
2580 |
{ |
2581 |
REQ (EIO_WRITE); req->int1 = fd; req->offs = offset; req->size = length; req->ptr2 = buf; SEND; |
2582 |
} |
2583 |
|
2584 |
eio_req *eio_fstat (int fd, int pri, eio_cb cb, void *data) |
2585 |
{ |
2586 |
REQ (EIO_FSTAT); req->int1 = fd; SEND; |
2587 |
} |
2588 |
|
2589 |
eio_req *eio_fstatvfs (int fd, int pri, eio_cb cb, void *data) |
2590 |
{ |
2591 |
REQ (EIO_FSTATVFS); req->int1 = fd; SEND; |
2592 |
} |
2593 |
|
2594 |
eio_req *eio_futime (int fd, double atime, double mtime, int pri, eio_cb cb, void *data) |
2595 |
{ |
2596 |
REQ (EIO_FUTIME); req->int1 = fd; req->nv1 = atime; req->nv2 = mtime; SEND; |
2597 |
} |
2598 |
|
2599 |
eio_req *eio_ftruncate (int fd, off_t offset, int pri, eio_cb cb, void *data) |
2600 |
{ |
2601 |
REQ (EIO_FTRUNCATE); req->int1 = fd; req->offs = offset; SEND; |
2602 |
} |
2603 |
|
2604 |
eio_req *eio_fchmod (int fd, mode_t mode, int pri, eio_cb cb, void *data) |
2605 |
{ |
2606 |
REQ (EIO_FCHMOD); req->int1 = fd; req->int2 = (long)mode; SEND; |
2607 |
} |
2608 |
|
2609 |
eio_req *eio_fchown (int fd, eio_uid_t uid, eio_gid_t gid, int pri, eio_cb cb, void *data) |
2610 |
{ |
2611 |
REQ (EIO_FCHOWN); req->int1 = fd; req->int2 = (long)uid; req->int3 = (long)gid; SEND; |
2612 |
} |
2613 |
|
2614 |
eio_req *eio_dup2 (int fd, int fd2, int pri, eio_cb cb, void *data) |
2615 |
{ |
2616 |
REQ (EIO_DUP2); req->int1 = fd; req->int2 = fd2; SEND; |
2617 |
} |
2618 |
|
2619 |
eio_req *eio_sendfile (int out_fd, int in_fd, off_t in_offset, size_t length, int pri, eio_cb cb, void *data) |
2620 |
{ |
2621 |
REQ (EIO_SENDFILE); req->int1 = out_fd; req->int2 = in_fd; req->offs = in_offset; req->size = length; SEND; |
2622 |
} |
2623 |
|
2624 |
eio_req *eio_open (const char *path, int flags, mode_t mode, int pri, eio_cb cb, void *data) |
2625 |
{ |
2626 |
REQ (EIO_OPEN); PATH; req->int1 = flags; req->int2 = (long)mode; SEND; |
2627 |
} |
2628 |
|
2629 |
eio_req *eio_utime (const char *path, double atime, double mtime, int pri, eio_cb cb, void *data) |
2630 |
{ |
2631 |
REQ (EIO_UTIME); PATH; req->nv1 = atime; req->nv2 = mtime; SEND; |
2632 |
} |
2633 |
|
2634 |
eio_req *eio_truncate (const char *path, off_t offset, int pri, eio_cb cb, void *data) |
2635 |
{ |
2636 |
REQ (EIO_TRUNCATE); PATH; req->offs = offset; SEND; |
2637 |
} |
2638 |
|
2639 |
eio_req *eio_chown (const char *path, eio_uid_t uid, eio_gid_t gid, int pri, eio_cb cb, void *data) |
2640 |
{ |
2641 |
REQ (EIO_CHOWN); PATH; req->int2 = (long)uid; req->int3 = (long)gid; SEND; |
2642 |
} |
2643 |
|
2644 |
eio_req *eio_chmod (const char *path, mode_t mode, int pri, eio_cb cb, void *data) |
2645 |
{ |
2646 |
REQ (EIO_CHMOD); PATH; req->int2 = (long)mode; SEND; |
2647 |
} |
2648 |
|
2649 |
eio_req *eio_mkdir (const char *path, mode_t mode, int pri, eio_cb cb, void *data) |
2650 |
{ |
2651 |
REQ (EIO_MKDIR); PATH; req->int2 = (long)mode; SEND; |
2652 |
} |
2653 |
|
2654 |
static eio_req * |
2655 |
eio__1path (int type, const char *path, int pri, eio_cb cb, void *data) |
2656 |
{ |
2657 |
REQ (type); PATH; SEND; |
2658 |
} |
2659 |
|
2660 |
eio_req *eio_readlink (const char *path, int pri, eio_cb cb, void *data) |
2661 |
{ |
2662 |
return eio__1path (EIO_READLINK, path, pri, cb, data); |
2663 |
} |
2664 |
|
2665 |
eio_req *eio_realpath (const char *path, int pri, eio_cb cb, void *data) |
2666 |
{ |
2667 |
return eio__1path (EIO_REALPATH, path, pri, cb, data); |
2668 |
} |
2669 |
|
2670 |
eio_req *eio_stat (const char *path, int pri, eio_cb cb, void *data) |
2671 |
{ |
2672 |
return eio__1path (EIO_STAT, path, pri, cb, data); |
2673 |
} |
2674 |
|
2675 |
eio_req *eio_lstat (const char *path, int pri, eio_cb cb, void *data) |
2676 |
{ |
2677 |
return eio__1path (EIO_LSTAT, path, pri, cb, data); |
2678 |
} |
2679 |
|
2680 |
eio_req *eio_statvfs (const char *path, int pri, eio_cb cb, void *data) |
2681 |
{ |
2682 |
return eio__1path (EIO_STATVFS, path, pri, cb, data); |
2683 |
} |
2684 |
|
2685 |
eio_req *eio_unlink (const char *path, int pri, eio_cb cb, void *data) |
2686 |
{ |
2687 |
return eio__1path (EIO_UNLINK, path, pri, cb, data); |
2688 |
} |
2689 |
|
2690 |
eio_req *eio_rmdir (const char *path, int pri, eio_cb cb, void *data) |
2691 |
{ |
2692 |
return eio__1path (EIO_RMDIR, path, pri, cb, data); |
2693 |
} |
2694 |
|
2695 |
eio_req *eio_readdir (const char *path, int flags, int pri, eio_cb cb, void *data) |
2696 |
{ |
2697 |
REQ (EIO_READDIR); PATH; req->int1 = flags; SEND; |
2698 |
} |
2699 |
|
2700 |
eio_req *eio_mknod (const char *path, mode_t mode, dev_t dev, int pri, eio_cb cb, void *data) |
2701 |
{ |
2702 |
REQ (EIO_MKNOD); PATH; req->int2 = (long)mode; req->offs = (off_t)dev; SEND; |
2703 |
} |
2704 |
|
2705 |
static eio_req * |
2706 |
eio__2path (int type, const char *path, const char *new_path, int pri, eio_cb cb, void *data) |
2707 |
{ |
2708 |
REQ (type); PATH; |
2709 |
|
2710 |
req->flags |= EIO_FLAG_PTR2_FREE; |
2711 |
req->ptr2 = strdup (new_path); |
2712 |
if (!req->ptr2) |
2713 |
{ |
2714 |
eio_api_destroy (req); |
2715 |
return 0; |
2716 |
} |
2717 |
|
2718 |
SEND; |
2719 |
} |
2720 |
|
2721 |
eio_req *eio_link (const char *path, const char *new_path, int pri, eio_cb cb, void *data) |
2722 |
{ |
2723 |
return eio__2path (EIO_LINK, path, new_path, pri, cb, data); |
2724 |
} |
2725 |
|
2726 |
eio_req *eio_symlink (const char *path, const char *new_path, int pri, eio_cb cb, void *data) |
2727 |
{ |
2728 |
return eio__2path (EIO_SYMLINK, path, new_path, pri, cb, data); |
2729 |
} |
2730 |
|
2731 |
eio_req *eio_rename (const char *path, const char *new_path, int pri, eio_cb cb, void *data) |
2732 |
{ |
2733 |
return eio__2path (EIO_RENAME, path, new_path, pri, cb, data); |
2734 |
} |
2735 |
|
2736 |
eio_req *eio_custom (void (*execute)(eio_req *), int pri, eio_cb cb, void *data) |
2737 |
{ |
2738 |
REQ (EIO_CUSTOM); req->feed = execute; SEND; |
2739 |
} |
2740 |
|
2741 |
#endif |
2742 |
|
2743 |
eio_req *eio_grp (eio_cb cb, void *data) |
2744 |
{ |
2745 |
const int pri = EIO_PRI_MAX; |
2746 |
|
2747 |
REQ (EIO_GROUP); SEND; |
2748 |
} |
2749 |
|
2750 |
#undef REQ |
2751 |
#undef PATH |
2752 |
#undef SEND |
2753 |
|
2754 |
/*****************************************************************************/ |
2755 |
/* grp functions */ |
2756 |
|
2757 |
void |
2758 |
eio_grp_feed (eio_req *grp, void (*feed)(eio_req *req), int limit) |
2759 |
{ |
2760 |
grp->int2 = limit; |
2761 |
grp->feed = feed; |
2762 |
|
2763 |
grp_try_feed (grp); |
2764 |
} |
2765 |
|
2766 |
void |
2767 |
eio_grp_limit (eio_req *grp, int limit) |
2768 |
{ |
2769 |
grp->int2 = limit; |
2770 |
|
2771 |
grp_try_feed (grp); |
2772 |
} |
2773 |
|
2774 |
void |
2775 |
eio_grp_add (eio_req *grp, eio_req *req) |
2776 |
{ |
2777 |
assert (("cannot add requests to IO::AIO::GRP after the group finished", grp->int1 != 2)); |
2778 |
|
2779 |
grp->flags |= EIO_FLAG_GROUPADD; |
2780 |
|
2781 |
++grp->size; |
2782 |
req->grp = grp; |
2783 |
|
2784 |
req->grp_prev = 0; |
2785 |
req->grp_next = grp->grp_first; |
2786 |
|
2787 |
if (grp->grp_first) |
2788 |
grp->grp_first->grp_prev = req; |
2789 |
|
2790 |
grp->grp_first = req; |
2791 |
} |
2792 |
|
2793 |
/*****************************************************************************/ |
2794 |
/* misc garbage */ |
2795 |
|
2796 |
eio_ssize_t |
2797 |
eio_sendfile_sync (int ofd, int ifd, off_t offset, size_t count) |
2798 |
{ |
2799 |
return eio__sendfile (ofd, ifd, offset, count); |
2800 |
} |
2801 |
|