ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libeio/eio.c
(Generate patch)

Comparing libeio/eio.c (file contents):
Revision 1.27 by root, Wed Oct 22 18:15:36 2008 UTC vs.
Revision 1.83 by root, Thu Jul 7 22:36:18 2011 UTC

1/* 1/*
2 * libeio implementation 2 * libeio implementation
3 * 3 *
4 * Copyright (c) 2007,2008 Marc Alexander Lehmann <libeio@schmorp.de> 4 * Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann <libeio@schmorp.de>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without modifica- 7 * Redistribution and use in source and binary forms, with or without modifica-
8 * tion, are permitted provided that the following conditions are met: 8 * tion, are permitted provided that the following conditions are met:
9 * 9 *
35 * and other provisions required by the GPL. If you do not delete the 35 * and other provisions required by the GPL. If you do not delete the
36 * provisions above, a recipient may use your version of this file under 36 * provisions above, a recipient may use your version of this file under
37 * either the BSD or the GPL. 37 * either the BSD or the GPL.
38 */ 38 */
39 39
40#ifndef _WIN32
41# include "config.h"
42#endif
43
40#include "eio.h" 44#include "eio.h"
45#include "ecb.h"
46
47#ifdef EIO_STACKSIZE
48# define XTHREAD_STACKSIZE EIO_STACKSIZE
49#endif
41#include "xthread.h" 50#include "xthread.h"
42 51
43#include <errno.h> 52#include <errno.h>
44#include <stddef.h> 53#include <stddef.h>
45#include <stdlib.h> 54#include <stdlib.h>
46#include <string.h> 55#include <string.h>
47#include <errno.h> 56#include <errno.h>
48#include <sys/types.h> 57#include <sys/types.h>
49#include <sys/stat.h> 58#include <sys/stat.h>
59#include <sys/statvfs.h>
50#include <limits.h> 60#include <limits.h>
51#include <fcntl.h> 61#include <fcntl.h>
52#include <assert.h> 62#include <assert.h>
53 63
64/* intptr_t comes from unistd.h, says POSIX/UNIX/tradition */
65/* intptr_t only comes from stdint.h, says idiot openbsd coder */
66#if HAVE_STDINT_H
67# include <stdint.h>
68#endif
69
70#ifndef ECANCELED
71# define ECANCELED EDOM
72#endif
73
74static void eio_destroy (eio_req *req);
75
54#ifndef EIO_FINISH 76#ifndef EIO_FINISH
55# define EIO_FINISH(req) ((req)->finish) && !EIO_CANCELLED (req) ? (req)->finish (req) : 0 77# define EIO_FINISH(req) ((req)->finish) && !EIO_CANCELLED (req) ? (req)->finish (req) : 0
56#endif 78#endif
57 79
58#ifndef EIO_DESTROY 80#ifndef EIO_DESTROY
64#endif 86#endif
65 87
66#ifdef _WIN32 88#ifdef _WIN32
67 89
68 /*doh*/ 90 /*doh*/
69
70#else 91#else
71 92
72# include "config.h"
73# include <sys/time.h> 93# include <sys/time.h>
74# include <sys/select.h> 94# include <sys/select.h>
75# include <unistd.h> 95# include <unistd.h>
76# include <utime.h> 96# include <utime.h>
77# include <signal.h> 97# include <signal.h>
78# include <dirent.h> 98# include <dirent.h>
79 99
100#if _POSIX_MEMLOCK || _POSIX_MEMLOCK_RANGE || _POSIX_MAPPED_FILES
101# include <sys/mman.h>
102#endif
103
104/* POSIX_SOURCE is useless on bsd's, and XOPEN_SOURCE is unreliable there, too */
105# if __FreeBSD__ || defined __NetBSD__ || defined __OpenBSD__
106# define _DIRENT_HAVE_D_TYPE /* sigh */
107# define D_INO(de) (de)->d_fileno
108# define D_NAMLEN(de) (de)->d_namlen
109# elif __linux || defined d_ino || _XOPEN_SOURCE >= 600
110# define D_INO(de) (de)->d_ino
111# endif
112
113#ifdef _D_EXACT_NAMLEN
114# undef D_NAMLEN
115# define D_NAMLEN(de) _D_EXACT_NAMLEN (de)
116#endif
117
118# ifdef _DIRENT_HAVE_D_TYPE
119# define D_TYPE(de) (de)->d_type
120# endif
121
80# ifndef EIO_STRUCT_DIRENT 122# ifndef EIO_STRUCT_DIRENT
81# define EIO_STRUCT_DIRENT struct dirent 123# define EIO_STRUCT_DIRENT struct dirent
82# endif 124# endif
83 125
84#endif 126#endif
85 127
86#if HAVE_SENDFILE 128#if HAVE_SENDFILE
87# if __linux 129# if __linux
88# include <sys/sendfile.h> 130# include <sys/sendfile.h>
89# elif __freebsd 131# elif __FreeBSD__ || defined __APPLE__
90# include <sys/socket.h> 132# include <sys/socket.h>
91# include <sys/uio.h> 133# include <sys/uio.h>
92# elif __hpux 134# elif __hpux
93# include <sys/socket.h> 135# include <sys/socket.h>
94# elif __solaris /* not yet */ 136# elif __solaris
95# include <sys/sendfile.h> 137# include <sys/sendfile.h>
96# else 138# else
97# error sendfile support requested but not available 139# error sendfile support requested but not available
98# endif 140# endif
99#endif 141#endif
100 142
101/* number of seconds after which an idle threads exit */ 143#ifndef D_TYPE
102#define IDLE_TIMEOUT 10 144# define D_TYPE(de) 0
145#endif
146#ifndef D_INO
147# define D_INO(de) 0
148#endif
149#ifndef D_NAMLEN
150# define D_NAMLEN(de) strlen ((de)->d_name)
151#endif
103 152
104/* used for struct dirent, AIX doesn't provide it */ 153/* used for struct dirent, AIX doesn't provide it */
105#ifndef NAME_MAX 154#ifndef NAME_MAX
106# define NAME_MAX 4096 155# define NAME_MAX 4096
156#endif
157
158/* used for readlink etc. */
159#ifndef PATH_MAX
160# define PATH_MAX 4096
107#endif 161#endif
108 162
109/* buffer size for various temporary buffers */ 163/* buffer size for various temporary buffers */
110#define EIO_BUFSIZE 65536 164#define EIO_BUFSIZE 65536
111 165
117 errno = ENOMEM; \ 171 errno = ENOMEM; \
118 if (!eio_buf) \ 172 if (!eio_buf) \
119 return -1; 173 return -1;
120 174
121#define EIO_TICKS ((1000000 + 1023) >> 10) 175#define EIO_TICKS ((1000000 + 1023) >> 10)
122
123/*****************************************************************************/
124
125#if __GNUC__ >= 3
126# define expect(expr,value) __builtin_expect ((expr),(value))
127#else
128# define expect(expr,value) (expr)
129#endif
130
131#define expect_false(expr) expect ((expr) != 0, 0)
132#define expect_true(expr) expect ((expr) != 0, 1)
133
134/*****************************************************************************/
135 176
136#define ETP_PRI_MIN EIO_PRI_MIN 177#define ETP_PRI_MIN EIO_PRI_MIN
137#define ETP_PRI_MAX EIO_PRI_MAX 178#define ETP_PRI_MAX EIO_PRI_MAX
138 179
139struct etp_worker; 180struct etp_worker;
155 if (wrk->dirp) \ 196 if (wrk->dirp) \
156 { \ 197 { \
157 closedir (wrk->dirp); \ 198 closedir (wrk->dirp); \
158 wrk->dirp = 0; \ 199 wrk->dirp = 0; \
159 } 200 }
201
160#define ETP_WORKER_COMMON \ 202#define ETP_WORKER_COMMON \
161 void *dbuf; \ 203 void *dbuf; \
162 DIR *dirp; 204 DIR *dirp;
163 205
164/*****************************************************************************/ 206/*****************************************************************************/
165 207
166#define ETP_NUM_PRI (ETP_PRI_MAX - ETP_PRI_MIN + 1) 208#define ETP_NUM_PRI (ETP_PRI_MAX - ETP_PRI_MIN + 1)
167 209
168/* calculcate time difference in ~1/EIO_TICKS of a second */ 210/* calculate time difference in ~1/EIO_TICKS of a second */
211ecb_inline int
169static int tvdiff (struct timeval *tv1, struct timeval *tv2) 212tvdiff (struct timeval *tv1, struct timeval *tv2)
170{ 213{
171 return (tv2->tv_sec - tv1->tv_sec ) * EIO_TICKS 214 return (tv2->tv_sec - tv1->tv_sec ) * EIO_TICKS
172 + ((tv2->tv_usec - tv1->tv_usec) >> 10); 215 + ((tv2->tv_usec - tv1->tv_usec) >> 10);
173} 216}
174 217
181static unsigned int max_poll_reqs; /* reslock */ 224static unsigned int max_poll_reqs; /* reslock */
182 225
183static volatile unsigned int nreqs; /* reqlock */ 226static volatile unsigned int nreqs; /* reqlock */
184static volatile unsigned int nready; /* reqlock */ 227static volatile unsigned int nready; /* reqlock */
185static volatile unsigned int npending; /* reqlock */ 228static volatile unsigned int npending; /* reqlock */
186static volatile unsigned int max_idle = 4; 229static volatile unsigned int max_idle = 4; /* maximum number of threads that can idle indefinitely */
230static volatile unsigned int idle_timeout = 10; /* number of seconds after which an idle threads exit */
187 231
188static mutex_t wrklock = X_MUTEX_INIT; 232static xmutex_t wrklock;
189static mutex_t reslock = X_MUTEX_INIT; 233static xmutex_t reslock;
190static mutex_t reqlock = X_MUTEX_INIT; 234static xmutex_t reqlock;
191static cond_t reqwait = X_COND_INIT; 235static xcond_t reqwait;
192 236
193#if !HAVE_PREADWRITE 237#if !HAVE_PREADWRITE
194/* 238/*
195 * make our pread/pwrite emulation safe against themselves, but not against 239 * make our pread/pwrite emulation safe against themselves, but not against
196 * normal read/write by using a mutex. slows down execution a lot, 240 * normal read/write by using a mutex. slows down execution a lot,
197 * but that's your problem, not mine. 241 * but that's your problem, not mine.
198 */ 242 */
199static mutex_t preadwritelock = X_MUTEX_INIT; 243static xmutex_t preadwritelock = X_MUTEX_INIT;
200#endif 244#endif
201 245
202typedef struct etp_worker 246typedef struct etp_worker
203{ 247{
204 /* locked by wrklock */ 248 /* locked by wrklock */
205 struct etp_worker *prev, *next; 249 struct etp_worker *prev, *next;
206 250
207 thread_t tid; 251 xthread_t tid;
208 252
209 /* locked by reslock, reqlock or wrklock */ 253 /* locked by reslock, reqlock or wrklock */
210 ETP_REQ *req; /* currently processed request */ 254 ETP_REQ *req; /* currently processed request */
211 255
212 ETP_WORKER_COMMON 256 ETP_WORKER_COMMON
217#define ETP_WORKER_LOCK(wrk) X_LOCK (wrklock) 261#define ETP_WORKER_LOCK(wrk) X_LOCK (wrklock)
218#define ETP_WORKER_UNLOCK(wrk) X_UNLOCK (wrklock) 262#define ETP_WORKER_UNLOCK(wrk) X_UNLOCK (wrklock)
219 263
220/* worker threads management */ 264/* worker threads management */
221 265
266static void ecb_cold
222static void etp_worker_clear (etp_worker *wrk) 267etp_worker_clear (etp_worker *wrk)
223{ 268{
224 ETP_WORKER_CLEAR (wrk); 269 ETP_WORKER_CLEAR (wrk);
225} 270}
226 271
272static void ecb_cold
227static void etp_worker_free (etp_worker *wrk) 273etp_worker_free (etp_worker *wrk)
228{ 274{
229 wrk->next->prev = wrk->prev; 275 wrk->next->prev = wrk->prev;
230 wrk->prev->next = wrk->next; 276 wrk->prev->next = wrk->next;
231 277
232 free (wrk); 278 free (wrk);
233} 279}
234 280
235static unsigned int etp_nreqs (void) 281static unsigned int
282etp_nreqs (void)
236{ 283{
237 int retval; 284 int retval;
238 if (WORDACCESS_UNSAFE) X_LOCK (reqlock); 285 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
239 retval = nreqs; 286 retval = nreqs;
240 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); 287 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
241 return retval; 288 return retval;
242} 289}
243 290
244static unsigned int etp_nready (void) 291static unsigned int
292etp_nready (void)
245{ 293{
246 unsigned int retval; 294 unsigned int retval;
247 295
248 if (WORDACCESS_UNSAFE) X_LOCK (reqlock); 296 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
249 retval = nready; 297 retval = nready;
250 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); 298 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
251 299
252 return retval; 300 return retval;
253} 301}
254 302
255static unsigned int etp_npending (void) 303static unsigned int
304etp_npending (void)
256{ 305{
257 unsigned int retval; 306 unsigned int retval;
258 307
259 if (WORDACCESS_UNSAFE) X_LOCK (reqlock); 308 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
260 retval = npending; 309 retval = npending;
261 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); 310 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
262 311
263 return retval; 312 return retval;
264} 313}
265 314
266static unsigned int etp_nthreads (void) 315static unsigned int
316etp_nthreads (void)
267{ 317{
268 unsigned int retval; 318 unsigned int retval;
269 319
270 if (WORDACCESS_UNSAFE) X_LOCK (reqlock); 320 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
271 retval = started; 321 retval = started;
285} etp_reqq; 335} etp_reqq;
286 336
287static etp_reqq req_queue; 337static etp_reqq req_queue;
288static etp_reqq res_queue; 338static etp_reqq res_queue;
289 339
340static int ecb_noinline
290static int reqq_push (etp_reqq *q, ETP_REQ *req) 341reqq_push (etp_reqq *q, ETP_REQ *req)
291{ 342{
292 int pri = req->pri; 343 int pri = req->pri;
293 req->next = 0; 344 req->next = 0;
294 345
295 if (q->qe[pri]) 346 if (q->qe[pri])
301 q->qe[pri] = q->qs[pri] = req; 352 q->qe[pri] = q->qs[pri] = req;
302 353
303 return q->size++; 354 return q->size++;
304} 355}
305 356
357static ETP_REQ * ecb_noinline
306static ETP_REQ *reqq_shift (etp_reqq *q) 358reqq_shift (etp_reqq *q)
307{ 359{
308 int pri; 360 int pri;
309 361
310 if (!q->size) 362 if (!q->size)
311 return 0; 363 return 0;
326 } 378 }
327 379
328 abort (); 380 abort ();
329} 381}
330 382
383static void ecb_cold
384etp_thread_init (void)
385{
386 X_MUTEX_CREATE (wrklock);
387 X_MUTEX_CREATE (reslock);
388 X_MUTEX_CREATE (reqlock);
389 X_COND_CREATE (reqwait);
390}
391
392static void ecb_cold
331static void etp_atfork_prepare (void) 393etp_atfork_prepare (void)
332{ 394{
333 X_LOCK (wrklock); 395 X_LOCK (wrklock);
334 X_LOCK (reqlock); 396 X_LOCK (reqlock);
335 X_LOCK (reslock); 397 X_LOCK (reslock);
336#if !HAVE_PREADWRITE 398#if !HAVE_PREADWRITE
337 X_LOCK (preadwritelock); 399 X_LOCK (preadwritelock);
338#endif 400#endif
339} 401}
340 402
403static void ecb_cold
341static void etp_atfork_parent (void) 404etp_atfork_parent (void)
342{ 405{
343#if !HAVE_PREADWRITE 406#if !HAVE_PREADWRITE
344 X_UNLOCK (preadwritelock); 407 X_UNLOCK (preadwritelock);
345#endif 408#endif
346 X_UNLOCK (reslock); 409 X_UNLOCK (reslock);
347 X_UNLOCK (reqlock); 410 X_UNLOCK (reqlock);
348 X_UNLOCK (wrklock); 411 X_UNLOCK (wrklock);
349} 412}
350 413
414static void ecb_cold
351static void etp_atfork_child (void) 415etp_atfork_child (void)
352{ 416{
353 ETP_REQ *prv; 417 ETP_REQ *prv;
354 418
355 while ((prv = reqq_shift (&req_queue))) 419 while ((prv = reqq_shift (&req_queue)))
356 ETP_DESTROY (prv); 420 ETP_DESTROY (prv);
373 idle = 0; 437 idle = 0;
374 nreqs = 0; 438 nreqs = 0;
375 nready = 0; 439 nready = 0;
376 npending = 0; 440 npending = 0;
377 441
378 etp_atfork_parent (); 442 etp_thread_init ();
379} 443}
380 444
381static void 445static void ecb_cold
382etp_once_init (void) 446etp_once_init (void)
383{ 447{
448 etp_thread_init ();
384 X_THREAD_ATFORK (etp_atfork_prepare, etp_atfork_parent, etp_atfork_child); 449 X_THREAD_ATFORK (etp_atfork_prepare, etp_atfork_parent, etp_atfork_child);
385} 450}
386 451
387static int 452static int ecb_cold
388etp_init (void (*want_poll)(void), void (*done_poll)(void)) 453etp_init (void (*want_poll)(void), void (*done_poll)(void))
389{ 454{
390 static pthread_once_t doinit = PTHREAD_ONCE_INIT; 455 static pthread_once_t doinit = PTHREAD_ONCE_INIT;
391 456
392 pthread_once (&doinit, etp_once_init); 457 pthread_once (&doinit, etp_once_init);
397 return 0; 462 return 0;
398} 463}
399 464
400X_THREAD_PROC (etp_proc); 465X_THREAD_PROC (etp_proc);
401 466
467static void ecb_cold
402static void etp_start_thread (void) 468etp_start_thread (void)
403{ 469{
404 etp_worker *wrk = calloc (1, sizeof (etp_worker)); 470 etp_worker *wrk = calloc (1, sizeof (etp_worker));
405 471
406 /*TODO*/ 472 /*TODO*/
407 assert (("unable to allocate worker thread data", wrk)); 473 assert (("unable to allocate worker thread data", wrk));
420 free (wrk); 486 free (wrk);
421 487
422 X_UNLOCK (wrklock); 488 X_UNLOCK (wrklock);
423} 489}
424 490
491static void
425static void etp_maybe_start_thread (void) 492etp_maybe_start_thread (void)
426{ 493{
427 if (expect_true (etp_nthreads () >= wanted)) 494 if (ecb_expect_true (etp_nthreads () >= wanted))
428 return; 495 return;
429 496
430 /* todo: maybe use idle here, but might be less exact */ 497 /* todo: maybe use idle here, but might be less exact */
431 if (expect_true (0 <= (int)etp_nthreads () + (int)etp_npending () - (int)etp_nreqs ())) 498 if (ecb_expect_true (0 <= (int)etp_nthreads () + (int)etp_npending () - (int)etp_nreqs ()))
432 return; 499 return;
433 500
434 etp_start_thread (); 501 etp_start_thread ();
435} 502}
436 503
504static void ecb_cold
437static void etp_end_thread (void) 505etp_end_thread (void)
438{ 506{
439 eio_req *req = calloc (1, sizeof (eio_req)); 507 eio_req *req = calloc (1, sizeof (eio_req));
440 508
441 req->type = -1; 509 req->type = -1;
442 req->pri = ETP_PRI_MAX - ETP_PRI_MIN; 510 req->pri = ETP_PRI_MAX - ETP_PRI_MIN;
449 X_LOCK (wrklock); 517 X_LOCK (wrklock);
450 --started; 518 --started;
451 X_UNLOCK (wrklock); 519 X_UNLOCK (wrklock);
452} 520}
453 521
454static int etp_poll (void) 522static int
523etp_poll (void)
455{ 524{
456 unsigned int maxreqs; 525 unsigned int maxreqs;
457 unsigned int maxtime; 526 unsigned int maxtime;
458 struct timeval tv_start, tv_now; 527 struct timeval tv_start, tv_now;
459 528
489 558
490 X_LOCK (reqlock); 559 X_LOCK (reqlock);
491 --nreqs; 560 --nreqs;
492 X_UNLOCK (reqlock); 561 X_UNLOCK (reqlock);
493 562
494 if (expect_false (req->type == EIO_GROUP && req->size)) 563 if (ecb_expect_false (req->type == EIO_GROUP && req->size))
495 { 564 {
496 req->int1 = 1; /* mark request as delayed */ 565 req->int1 = 1; /* mark request as delayed */
497 continue; 566 continue;
498 } 567 }
499 else 568 else
500 { 569 {
501 int res = ETP_FINISH (req); 570 int res = ETP_FINISH (req);
502 if (expect_false (res)) 571 if (ecb_expect_false (res))
503 return res; 572 return res;
504 } 573 }
505 574
506 if (expect_false (maxreqs && !--maxreqs)) 575 if (ecb_expect_false (maxreqs && !--maxreqs))
507 break; 576 break;
508 577
509 if (maxtime) 578 if (maxtime)
510 { 579 {
511 gettimeofday (&tv_now, 0); 580 gettimeofday (&tv_now, 0);
517 586
518 errno = EAGAIN; 587 errno = EAGAIN;
519 return -1; 588 return -1;
520} 589}
521 590
591static void
522static void etp_cancel (ETP_REQ *req) 592etp_cancel (ETP_REQ *req)
523{ 593{
524 X_LOCK (wrklock); 594 req->cancelled = 1;
525 req->flags |= EIO_FLAG_CANCELLED;
526 X_UNLOCK (wrklock);
527 595
528 eio_grp_cancel (req); 596 eio_grp_cancel (req);
529} 597}
530 598
599static void
531static void etp_submit (ETP_REQ *req) 600etp_submit (ETP_REQ *req)
532{ 601{
533 req->pri -= ETP_PRI_MIN; 602 req->pri -= ETP_PRI_MIN;
534 603
535 if (expect_false (req->pri < ETP_PRI_MIN - ETP_PRI_MIN)) req->pri = ETP_PRI_MIN - ETP_PRI_MIN; 604 if (ecb_expect_false (req->pri < ETP_PRI_MIN - ETP_PRI_MIN)) req->pri = ETP_PRI_MIN - ETP_PRI_MIN;
536 if (expect_false (req->pri > ETP_PRI_MAX - ETP_PRI_MIN)) req->pri = ETP_PRI_MAX - ETP_PRI_MIN; 605 if (ecb_expect_false (req->pri > ETP_PRI_MAX - ETP_PRI_MIN)) req->pri = ETP_PRI_MAX - ETP_PRI_MIN;
537 606
538 if (expect_false (req->type == EIO_GROUP)) 607 if (ecb_expect_false (req->type == EIO_GROUP))
539 { 608 {
540 /* I hope this is worth it :/ */ 609 /* I hope this is worth it :/ */
541 X_LOCK (reqlock); 610 X_LOCK (reqlock);
542 ++nreqs; 611 ++nreqs;
543 X_UNLOCK (reqlock); 612 X_UNLOCK (reqlock);
562 631
563 etp_maybe_start_thread (); 632 etp_maybe_start_thread ();
564 } 633 }
565} 634}
566 635
636static void ecb_cold
567static void etp_set_max_poll_time (double nseconds) 637etp_set_max_poll_time (double nseconds)
568{ 638{
569 if (WORDACCESS_UNSAFE) X_LOCK (reslock); 639 if (WORDACCESS_UNSAFE) X_LOCK (reslock);
570 max_poll_time = nseconds; 640 max_poll_time = nseconds * EIO_TICKS;
571 if (WORDACCESS_UNSAFE) X_UNLOCK (reslock); 641 if (WORDACCESS_UNSAFE) X_UNLOCK (reslock);
572} 642}
573 643
644static void ecb_cold
574static void etp_set_max_poll_reqs (unsigned int maxreqs) 645etp_set_max_poll_reqs (unsigned int maxreqs)
575{ 646{
576 if (WORDACCESS_UNSAFE) X_LOCK (reslock); 647 if (WORDACCESS_UNSAFE) X_LOCK (reslock);
577 max_poll_reqs = maxreqs; 648 max_poll_reqs = maxreqs;
578 if (WORDACCESS_UNSAFE) X_UNLOCK (reslock); 649 if (WORDACCESS_UNSAFE) X_UNLOCK (reslock);
579} 650}
580 651
652static void ecb_cold
581static void etp_set_max_idle (unsigned int nthreads) 653etp_set_max_idle (unsigned int nthreads)
582{ 654{
583 if (WORDACCESS_UNSAFE) X_LOCK (reqlock); 655 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
584 max_idle = nthreads <= 0 ? 1 : nthreads; 656 max_idle = nthreads;
585 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); 657 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
586} 658}
587 659
660static void ecb_cold
661etp_set_idle_timeout (unsigned int seconds)
662{
663 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
664 idle_timeout = seconds;
665 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
666}
667
668static void ecb_cold
588static void etp_set_min_parallel (unsigned int nthreads) 669etp_set_min_parallel (unsigned int nthreads)
589{ 670{
590 if (wanted < nthreads) 671 if (wanted < nthreads)
591 wanted = nthreads; 672 wanted = nthreads;
592} 673}
593 674
675static void ecb_cold
594static void etp_set_max_parallel (unsigned int nthreads) 676etp_set_max_parallel (unsigned int nthreads)
595{ 677{
596 if (wanted > nthreads) 678 if (wanted > nthreads)
597 wanted = nthreads; 679 wanted = nthreads;
598 680
599 while (started > wanted) 681 while (started > wanted)
600 etp_end_thread (); 682 etp_end_thread ();
601} 683}
602 684
603/*****************************************************************************/ 685/*****************************************************************************/
604 686
687static void
605static void grp_try_feed (eio_req *grp) 688grp_try_feed (eio_req *grp)
606{ 689{
607 while (grp->size < grp->int2 && !EIO_CANCELLED (grp)) 690 while (grp->size < grp->int2 && !EIO_CANCELLED (grp))
608 { 691 {
609 grp->flags &= ~EIO_FLAG_GROUPADD; 692 grp->flags &= ~EIO_FLAG_GROUPADD;
610 693
617 break; 700 break;
618 } 701 }
619 } 702 }
620} 703}
621 704
705static int
622static int grp_dec (eio_req *grp) 706grp_dec (eio_req *grp)
623{ 707{
624 --grp->size; 708 --grp->size;
625 709
626 /* call feeder, if applicable */ 710 /* call feeder, if applicable */
627 grp_try_feed (grp); 711 grp_try_feed (grp);
631 return eio_finish (grp); 715 return eio_finish (grp);
632 else 716 else
633 return 0; 717 return 0;
634} 718}
635 719
720static void
636void eio_destroy (eio_req *req) 721eio_destroy (eio_req *req)
637{ 722{
638 if ((req)->flags & EIO_FLAG_PTR1_FREE) free (req->ptr1); 723 if ((req)->flags & EIO_FLAG_PTR1_FREE) free (req->ptr1);
639 if ((req)->flags & EIO_FLAG_PTR2_FREE) free (req->ptr2); 724 if ((req)->flags & EIO_FLAG_PTR2_FREE) free (req->ptr2);
640 725
641 EIO_DESTROY (req); 726 EIO_DESTROY (req);
642} 727}
643 728
729static int
644static int eio_finish (eio_req *req) 730eio_finish (eio_req *req)
645{ 731{
646 int res = EIO_FINISH (req); 732 int res = EIO_FINISH (req);
647 733
648 if (req->grp) 734 if (req->grp)
649 { 735 {
657 if (grp->grp_first == req) 743 if (grp->grp_first == req)
658 grp->grp_first = req->grp_next; 744 grp->grp_first = req->grp_next;
659 745
660 res2 = grp_dec (grp); 746 res2 = grp_dec (grp);
661 747
662 if (!res && res2) 748 if (!res)
663 res = res2; 749 res = res2;
664 } 750 }
665 751
666 eio_destroy (req); 752 eio_destroy (req);
667 753
668 return res; 754 return res;
669} 755}
670 756
757void
671void eio_grp_cancel (eio_req *grp) 758eio_grp_cancel (eio_req *grp)
672{ 759{
673 for (grp = grp->grp_first; grp; grp = grp->grp_next) 760 for (grp = grp->grp_first; grp; grp = grp->grp_next)
674 eio_cancel (grp); 761 eio_cancel (grp);
675} 762}
676 763
764void
677void eio_cancel (eio_req *req) 765eio_cancel (eio_req *req)
678{ 766{
679 etp_cancel (req); 767 etp_cancel (req);
680} 768}
681 769
770void
682void eio_submit (eio_req *req) 771eio_submit (eio_req *req)
683{ 772{
684 etp_submit (req); 773 etp_submit (req);
685} 774}
686 775
687unsigned int eio_nreqs (void) 776unsigned int
777eio_nreqs (void)
688{ 778{
689 return etp_nreqs (); 779 return etp_nreqs ();
690} 780}
691 781
692unsigned int eio_nready (void) 782unsigned int
783eio_nready (void)
693{ 784{
694 return etp_nready (); 785 return etp_nready ();
695} 786}
696 787
697unsigned int eio_npending (void) 788unsigned int
789eio_npending (void)
698{ 790{
699 return etp_npending (); 791 return etp_npending ();
700} 792}
701 793
702unsigned int eio_nthreads (void) 794unsigned int ecb_cold
795eio_nthreads (void)
703{ 796{
704 return etp_nthreads (); 797 return etp_nthreads ();
705} 798}
706 799
800void ecb_cold
707void eio_set_max_poll_time (double nseconds) 801eio_set_max_poll_time (double nseconds)
708{ 802{
709 etp_set_max_poll_time (nseconds); 803 etp_set_max_poll_time (nseconds);
710} 804}
711 805
806void ecb_cold
712void eio_set_max_poll_reqs (unsigned int maxreqs) 807eio_set_max_poll_reqs (unsigned int maxreqs)
713{ 808{
714 etp_set_max_poll_reqs (maxreqs); 809 etp_set_max_poll_reqs (maxreqs);
715} 810}
716 811
812void ecb_cold
717void eio_set_max_idle (unsigned int nthreads) 813eio_set_max_idle (unsigned int nthreads)
718{ 814{
719 etp_set_max_idle (nthreads); 815 etp_set_max_idle (nthreads);
720} 816}
721 817
818void ecb_cold
819eio_set_idle_timeout (unsigned int seconds)
820{
821 etp_set_idle_timeout (seconds);
822}
823
824void ecb_cold
722void eio_set_min_parallel (unsigned int nthreads) 825eio_set_min_parallel (unsigned int nthreads)
723{ 826{
724 etp_set_min_parallel (nthreads); 827 etp_set_min_parallel (nthreads);
725} 828}
726 829
830void ecb_cold
727void eio_set_max_parallel (unsigned int nthreads) 831eio_set_max_parallel (unsigned int nthreads)
728{ 832{
729 etp_set_max_parallel (nthreads); 833 etp_set_max_parallel (nthreads);
730} 834}
731 835
732int eio_poll (void) 836int eio_poll (void)
767 871
768 X_LOCK (preadwritelock); 872 X_LOCK (preadwritelock);
769 ooffset = lseek (fd, 0, SEEK_CUR); 873 ooffset = lseek (fd, 0, SEEK_CUR);
770 lseek (fd, offset, SEEK_SET); 874 lseek (fd, offset, SEEK_SET);
771 res = write (fd, buf, count); 875 res = write (fd, buf, count);
772 lseek (fd, offset, SEEK_SET); 876 lseek (fd, ooffset, SEEK_SET);
773 X_UNLOCK (preadwritelock); 877 X_UNLOCK (preadwritelock);
774 878
775 return res; 879 return res;
776} 880}
777#endif 881#endif
778 882
779#ifndef HAVE_FUTIMES 883#ifndef HAVE_UTIMES
780 884
781# undef utimes 885# undef utimes
782# undef futimes
783# define utimes(path,times) eio__utimes (path, times) 886# define utimes(path,times) eio__utimes (path, times)
784# define futimes(fd,times) eio__futimes (fd, times)
785 887
786static int 888static int
787eio__utimes (const char *filename, const struct timeval times[2]) 889eio__utimes (const char *filename, const struct timeval times[2])
788{ 890{
789 if (times) 891 if (times)
797 } 899 }
798 else 900 else
799 return utime (filename, 0); 901 return utime (filename, 0);
800} 902}
801 903
904#endif
905
906#ifndef HAVE_FUTIMES
907
908# undef futimes
909# define futimes(fd,times) eio__futimes (fd, times)
910
911static int
802static int eio__futimes (int fd, const struct timeval tv[2]) 912eio__futimes (int fd, const struct timeval tv[2])
803{ 913{
804 errno = ENOSYS; 914 errno = ENOSYS;
805 return -1; 915 return -1;
806} 916}
807 917
811# undef fdatasync 921# undef fdatasync
812# define fdatasync(fd) fsync (fd) 922# define fdatasync(fd) fsync (fd)
813#endif 923#endif
814 924
815/* sync_file_range always needs emulation */ 925/* sync_file_range always needs emulation */
816int 926static int
817eio__sync_file_range (int fd, off_t offset, size_t nbytes, unsigned int flags) 927eio__sync_file_range (int fd, off_t offset, size_t nbytes, unsigned int flags)
818{ 928{
819#if HAVE_SYNC_FILE_RANGE 929#if HAVE_SYNC_FILE_RANGE
820 int res; 930 int res;
821 931
829 | (flags & EIO_SYNC_FILE_RANGE_WAIT_AFTER ? SYNC_FILE_RANGE_WAIT_AFTER : 0); 939 | (flags & EIO_SYNC_FILE_RANGE_WAIT_AFTER ? SYNC_FILE_RANGE_WAIT_AFTER : 0);
830 } 940 }
831 941
832 res = sync_file_range (fd, offset, nbytes, flags); 942 res = sync_file_range (fd, offset, nbytes, flags);
833 943
834 if (res != ENOSYS) 944 if (!res || errno != ENOSYS)
835 return res; 945 return res;
836#endif 946#endif
837 947
838 /* even though we could play tricks with the flags, it's better to always 948 /* even though we could play tricks with the flags, it's better to always
839 * call fdatasync, as thta matches the expectation of it's users best */ 949 * call fdatasync, as that matches the expectation of its users best */
840 return fdatasync (fd); 950 return fdatasync (fd);
951}
952
953static int
954eio__fallocate (int fd, int mode, off_t offset, size_t nbytes)
955{
956#if HAVE_FALLOCATE
957 return fallocate (fd, offset, nbytes, flags);
958#else
959 errno = ENOSYS;
960 return -1;
961#endif
841} 962}
842 963
843#if !HAVE_READAHEAD 964#if !HAVE_READAHEAD
844# undef readahead 965# undef readahead
845# define readahead(fd,offset,count) eio__readahead (fd, offset, count, self) 966# define readahead(fd,offset,count) eio__readahead (fd, offset, count, self)
867 988
868/* sendfile always needs emulation */ 989/* sendfile always needs emulation */
869static ssize_t 990static ssize_t
870eio__sendfile (int ofd, int ifd, off_t offset, size_t count, etp_worker *self) 991eio__sendfile (int ofd, int ifd, off_t offset, size_t count, etp_worker *self)
871{ 992{
993 ssize_t written = 0;
872 ssize_t res; 994 ssize_t res;
873 995
874 if (!count) 996 if (!count)
875 return 0; 997 return 0;
876 998
999 for (;;)
1000 {
1001#ifdef __APPLE__
1002# undef HAVE_SENDFILE /* broken, as everything on os x */
1003#endif
877#if HAVE_SENDFILE 1004#if HAVE_SENDFILE
878# if __linux 1005# if __linux
1006 off_t soffset = offset;
879 res = sendfile (ofd, ifd, &offset, count); 1007 res = sendfile (ofd, ifd, &soffset, count);
880 1008
881# elif __freebsd 1009# elif __FreeBSD__
882 /* 1010 /*
883 * Of course, the freebsd sendfile is a dire hack with no thoughts 1011 * Of course, the freebsd sendfile is a dire hack with no thoughts
884 * wasted on making it similar to other I/O functions. 1012 * wasted on making it similar to other I/O functions.
885 */ 1013 */
886 {
887 off_t sbytes; 1014 off_t sbytes;
888 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0); 1015 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0);
889 1016
890 if (res < 0 && sbytes) 1017 #if 0 /* according to the manpage, this is correct, but broken behaviour */
891 /* maybe only on EAGAIN: as usual, the manpage leaves you guessing */ 1018 /* freebsd' sendfile will return 0 on success */
1019 /* freebsd 8 documents it as only setting *sbytes on EINTR and EAGAIN, but */
1020 /* not on e.g. EIO or EPIPE - sounds broken */
1021 if ((res < 0 && (errno == EAGAIN || errno == EINTR) && sbytes) || res == 0)
892 res = sbytes; 1022 res = sbytes;
893 } 1023 #endif
1024
1025 /* according to source inspection, this is correct, and useful behaviour */
1026 if (sbytes)
1027 res = sbytes;
1028
1029# elif defined (__APPLE__)
1030 off_t sbytes = count;
1031 res = sendfile (ifd, ofd, offset, &sbytes, 0, 0);
1032
1033 /* according to the manpage, sbytes is always valid */
1034 if (sbytes)
1035 res = sbytes;
894 1036
895# elif __hpux 1037# elif __hpux
896 res = sendfile (ofd, ifd, offset, count, 0, 0); 1038 res = sendfile (ofd, ifd, offset, count, 0, 0);
897 1039
898# elif __solaris 1040# elif __solaris
899 {
900 struct sendfilevec vec; 1041 struct sendfilevec vec;
901 size_t sbytes; 1042 size_t sbytes;
902 1043
903 vec.sfv_fd = ifd; 1044 vec.sfv_fd = ifd;
904 vec.sfv_flag = 0; 1045 vec.sfv_flag = 0;
905 vec.sfv_off = offset; 1046 vec.sfv_off = offset;
906 vec.sfv_len = count; 1047 vec.sfv_len = count;
907 1048
908 res = sendfilev (ofd, &vec, 1, &sbytes); 1049 res = sendfilev (ofd, &vec, 1, &sbytes);
909 1050
910 if (res < 0 && sbytes) 1051 if (res < 0 && sbytes)
911 res = sbytes; 1052 res = sbytes;
912 }
913 1053
914# endif 1054# endif
1055
1056#elif defined (_WIN32)
1057 /* does not work, just for documentation of what would need to be done */
1058 /* actually, cannot be done like this, as TransmitFile changes the file offset, */
1059 /* libeio guarantees that the file offset does not change, and windows */
1060 /* has no way to get an independent handle to the same file description */
1061 HANDLE h = TO_SOCKET (ifd);
1062 SetFilePointer (h, offset, 0, FILE_BEGIN);
1063 res = TransmitFile (TO_SOCKET (ofd), h, count, 0, 0, 0, 0);
1064
915#else 1065#else
916 res = -1; 1066 res = -1;
917 errno = ENOSYS; 1067 errno = ENOSYS;
918#endif 1068#endif
919 1069
1070 /* we assume sendfile can copy at least 128mb in one go */
1071 if (res <= 128 * 1024 * 1024)
1072 {
1073 if (res > 0)
1074 written += res;
1075
1076 if (written)
1077 return written;
1078
1079 break;
1080 }
1081 else
1082 {
1083 /* if we requested more, then probably the kernel was lazy */
1084 written += res;
1085 offset += res;
1086 count -= res;
1087
1088 if (!count)
1089 return written;
1090 }
1091 }
1092
920 if (res < 0 1093 if (res < 0
921 && (errno == ENOSYS || errno == EINVAL || errno == ENOTSOCK 1094 && (errno == ENOSYS || errno == EINVAL || errno == ENOTSOCK
1095 /* BSDs */
1096#ifdef ENOTSUP /* sigh, if the steenking pile called openbsd would only try to at least compile posix code... */
1097 || errno == ENOTSUP
1098#endif
1099 || errno == EOPNOTSUPP /* BSDs */
922#if __solaris 1100#if __solaris
923 || errno == EAFNOSUPPORT || errno == EPROTOTYPE 1101 || errno == EAFNOSUPPORT || errno == EPROTOTYPE
924#endif 1102#endif
925 ) 1103 )
926 ) 1104 )
957 } 1135 }
958 1136
959 return res; 1137 return res;
960} 1138}
961 1139
1140#ifdef PAGESIZE
1141# define eio_pagesize() PAGESIZE
1142#else
1143static intptr_t
1144eio_pagesize (void)
1145{
1146 static intptr_t page;
1147
1148 if (!page)
1149 page = sysconf (_SC_PAGESIZE);
1150
1151 return page;
1152}
1153#endif
1154
1155static void
1156eio_page_align (void **addr, size_t *length)
1157{
1158 intptr_t mask = eio_pagesize () - 1;
1159
1160 /* round down addr */
1161 intptr_t adj = mask & (intptr_t)*addr;
1162
1163 *addr = (void *)((intptr_t)*addr - adj);
1164 *length += adj;
1165
1166 /* round up length */
1167 *length = (*length + mask) & ~mask;
1168}
1169
1170#if !_POSIX_MEMLOCK
1171# define eio__mlockall(a) ((errno = ENOSYS), -1)
1172#else
1173
1174static int
1175eio__mlockall (int flags)
1176{
1177 #if __GLIBC__ == 2 && __GLIBC_MINOR__ <= 7
1178 extern int mallopt (int, int);
1179 mallopt (-6, 238); /* http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=473812 */
1180 #endif
1181
1182 if (EIO_MCL_CURRENT != MCL_CURRENT
1183 || EIO_MCL_FUTURE != MCL_FUTURE)
1184 {
1185 flags = 0
1186 | (flags & EIO_MCL_CURRENT ? MCL_CURRENT : 0)
1187 | (flags & EIO_MCL_FUTURE ? MCL_FUTURE : 0);
1188 }
1189
1190 return mlockall (flags);
1191}
1192#endif
1193
1194#if !_POSIX_MEMLOCK_RANGE
1195# define eio__mlock(a,b) ((errno = ENOSYS), -1)
1196#else
1197
1198static int
1199eio__mlock (void *addr, size_t length)
1200{
1201 eio_page_align (&addr, &length);
1202
1203 return mlock (addr, length);
1204}
1205
1206#endif
1207
1208#if !(_POSIX_MAPPED_FILES && _POSIX_SYNCHRONIZED_IO)
1209# define eio__msync(a,b,c) ((errno = ENOSYS), -1)
1210#else
1211
1212static int
1213eio__msync (void *mem, size_t len, int flags)
1214{
1215 eio_page_align (&mem, &len);
1216
1217 if (EIO_MS_ASYNC != MS_SYNC
1218 || EIO_MS_INVALIDATE != MS_INVALIDATE
1219 || EIO_MS_SYNC != MS_SYNC)
1220 {
1221 flags = 0
1222 | (flags & EIO_MS_ASYNC ? MS_ASYNC : 0)
1223 | (flags & EIO_MS_INVALIDATE ? MS_INVALIDATE : 0)
1224 | (flags & EIO_MS_SYNC ? MS_SYNC : 0);
1225 }
1226
1227 return msync (mem, len, flags);
1228}
1229
1230#endif
1231
1232static int
1233eio__mtouch (eio_req *req)
1234{
1235 void *mem = req->ptr2;
1236 size_t len = req->size;
1237 int flags = req->int1;
1238
1239 eio_page_align (&mem, &len);
1240
1241 {
1242 intptr_t addr = (intptr_t)mem;
1243 intptr_t end = addr + len;
1244 intptr_t page = eio_pagesize ();
1245
1246 if (addr < end)
1247 if (flags & EIO_MT_MODIFY) /* modify */
1248 do { *((volatile sig_atomic_t *)addr) |= 0; } while ((addr += page) < len && !EIO_CANCELLED (req));
1249 else
1250 do { *((volatile sig_atomic_t *)addr) ; } while ((addr += page) < len && !EIO_CANCELLED (req));
1251 }
1252
1253 return 0;
1254}
1255
1256/*****************************************************************************/
1257/* requests implemented outside eio_execute, because they are so large */
1258
1259static void
1260eio__realpath (eio_req *req, etp_worker *self)
1261{
1262 char *rel = req->ptr1;
1263 char *res;
1264 char *tmp1, *tmp2;
1265#if SYMLOOP_MAX > 32
1266 int symlinks = SYMLOOP_MAX;
1267#else
1268 int symlinks = 32;
1269#endif
1270
1271 req->result = -1;
1272
1273 errno = EINVAL;
1274 if (!rel)
1275 return;
1276
1277 errno = ENOENT;
1278 if (!*rel)
1279 return;
1280
1281 if (!req->ptr2)
1282 {
1283 X_LOCK (wrklock);
1284 req->flags |= EIO_FLAG_PTR2_FREE;
1285 X_UNLOCK (wrklock);
1286 req->ptr2 = malloc (PATH_MAX * 3);
1287
1288 errno = ENOMEM;
1289 if (!req->ptr2)
1290 return;
1291 }
1292
1293 res = req->ptr2;
1294 tmp1 = res + PATH_MAX;
1295 tmp2 = tmp1 + PATH_MAX;
1296
1297#if 0 /* disabled, the musl way to do things is just too racy */
1298#if __linux && defined(O_NONBLOCK) && defined(O_NOATIME)
1299 /* on linux we may be able to ask the kernel */
1300 {
1301 int fd = open (rel, O_RDONLY | O_NONBLOCK | O_NOCTTY | O_NOATIME);
1302
1303 if (fd >= 0)
1304 {
1305 sprintf (tmp1, "/proc/self/fd/%d", fd);
1306 req->result = readlink (tmp1, res, PATH_MAX);
1307 close (fd);
1308
1309 /* here we should probably stat the open file and the disk file, to make sure they still match */
1310
1311 if (req->result > 0)
1312 goto done;
1313 }
1314 else if (errno == ELOOP || errno == ENAMETOOLONG || errno == ENOENT || errno == ENOTDIR || errno == EIO)
1315 return;
1316 }
1317#endif
1318#endif
1319
1320 if (*rel != '/')
1321 {
1322 if (!getcwd (res, PATH_MAX))
1323 return;
1324
1325 if (res [1]) /* only use if not / */
1326 res += strlen (res);
1327 }
1328
1329 while (*rel)
1330 {
1331 ssize_t len, linklen;
1332 char *beg = rel;
1333
1334 while (*rel && *rel != '/')
1335 ++rel;
1336
1337 len = rel - beg;
1338
1339 if (!len) /* skip slashes */
1340 {
1341 ++rel;
1342 continue;
1343 }
1344
1345 if (beg [0] == '.')
1346 {
1347 if (len == 1)
1348 continue; /* . - nop */
1349
1350 if (beg [1] == '.' && len == 2)
1351 {
1352 /* .. - back up one component, if possible */
1353
1354 while (res != req->ptr2)
1355 if (*--res == '/')
1356 break;
1357
1358 continue;
1359 }
1360 }
1361
1362 errno = ENAMETOOLONG;
1363 if (res + 1 + len + 1 >= tmp1)
1364 return;
1365
1366 /* copy one component */
1367 *res = '/';
1368 memcpy (res + 1, beg, len);
1369
1370 /* zero-terminate, for readlink */
1371 res [len + 1] = 0;
1372
1373 /* now check if it's a symlink */
1374 linklen = readlink (req->ptr2, tmp1, PATH_MAX);
1375
1376 if (linklen < 0)
1377 {
1378 if (errno != EINVAL)
1379 return;
1380
1381 /* it's a normal directory. hopefully */
1382 res += len + 1;
1383 }
1384 else
1385 {
1386 /* yay, it was a symlink - build new path in tmp2 */
1387 int rellen = strlen (rel);
1388
1389 errno = ENAMETOOLONG;
1390 if (linklen + 1 + rellen >= PATH_MAX)
1391 return;
1392
1393 errno = ELOOP;
1394 if (!--symlinks)
1395 return;
1396
1397 if (*tmp1 == '/')
1398 res = req->ptr2; /* symlink resolves to an absolute path */
1399
1400 /* we need to be careful, as rel might point into tmp2 already */
1401 memmove (tmp2 + linklen + 1, rel, rellen + 1);
1402 tmp2 [linklen] = '/';
1403 memcpy (tmp2, tmp1, linklen);
1404
1405 rel = tmp2;
1406 }
1407 }
1408
1409 /* special case for the lone root path */
1410 if (res == req->ptr2)
1411 *res++ = '/';
1412
1413 req->result = res - (char *)req->ptr2;
1414
1415done:
1416 req->ptr2 = realloc (req->ptr2, req->result); /* trade time for space savings */
1417}
1418
1419static signed char
1420eio_dent_cmp (const eio_dirent *a, const eio_dirent *b)
1421{
1422 return a->score - b->score ? a->score - b->score /* works because our signed char is always 0..100 */
1423 : a->inode < b->inode ? -1
1424 : a->inode > b->inode ? 1
1425 : 0;
1426}
1427
1428#define EIO_DENT_CMP(i,op,j) eio_dent_cmp (&i, &j) op 0
1429
1430#define EIO_SORT_CUTOFF 30 /* quite high, but performs well on many filesystems */
1431#define EIO_SORT_FAST 60 /* when to only use insertion sort */
1432
1433static void
1434eio_dent_radix_sort (eio_dirent *dents, int size, signed char score_bits, ino_t inode_bits)
1435{
1436 unsigned char bits [9 + sizeof (ino_t) * 8];
1437 unsigned char *bit = bits;
1438
1439 assert (CHAR_BIT == 8);
1440 assert (sizeof (eio_dirent) * 8 < 256);
1441 assert (offsetof (eio_dirent, inode)); /* we use bit #0 as sentinel */
1442 assert (offsetof (eio_dirent, score)); /* we use bit #0 as sentinel */
1443
1444 if (size <= EIO_SORT_FAST)
1445 return;
1446
1447 /* first prepare an array of bits to test in our radix sort */
1448 /* try to take endianness into account, as well as differences in ino_t sizes */
1449 /* inode_bits must contain all inodes ORed together */
1450 /* which is used to skip bits that are 0 everywhere, which is very common */
1451 {
1452 ino_t endianness;
1453 int i, j;
1454
1455 /* we store the byte offset of byte n into byte n of "endianness" */
1456 for (i = 0; i < sizeof (ino_t); ++i)
1457 ((unsigned char *)&endianness)[i] = i;
1458
1459 *bit++ = 0;
1460
1461 for (i = 0; i < sizeof (ino_t); ++i)
1462 {
1463 /* shifting off the byte offsets out of "endianness" */
1464 int offs = (offsetof (eio_dirent, inode) + (endianness & 0xff)) * 8;
1465 endianness >>= 8;
1466
1467 for (j = 0; j < 8; ++j)
1468 if (inode_bits & (((ino_t)1) << (i * 8 + j)))
1469 *bit++ = offs + j;
1470 }
1471
1472 for (j = 0; j < 8; ++j)
1473 if (score_bits & (1 << j))
1474 *bit++ = offsetof (eio_dirent, score) * 8 + j;
1475 }
1476
1477 /* now actually do the sorting (a variant of MSD radix sort) */
1478 {
1479 eio_dirent *base_stk [9 + sizeof (ino_t) * 8], *base;
1480 eio_dirent *end_stk [9 + sizeof (ino_t) * 8], *end;
1481 unsigned char *bit_stk [9 + sizeof (ino_t) * 8];
1482 int stk_idx = 0;
1483
1484 base_stk [stk_idx] = dents;
1485 end_stk [stk_idx] = dents + size;
1486 bit_stk [stk_idx] = bit - 1;
1487
1488 do
1489 {
1490 base = base_stk [stk_idx];
1491 end = end_stk [stk_idx];
1492 bit = bit_stk [stk_idx];
1493
1494 for (;;)
1495 {
1496 unsigned char O = *bit >> 3;
1497 unsigned char M = 1 << (*bit & 7);
1498
1499 eio_dirent *a = base;
1500 eio_dirent *b = end;
1501
1502 if (b - a < EIO_SORT_CUTOFF)
1503 break;
1504
1505 /* now bit-partition the array on the bit */
1506 /* this ugly asymmetric loop seems to perform much better than typical */
1507 /* partition algos found in the literature */
1508 do
1509 if (!(((unsigned char *)a)[O] & M))
1510 ++a;
1511 else if (!(((unsigned char *)--b)[O] & M))
1512 {
1513 eio_dirent tmp = *a; *a = *b; *b = tmp;
1514 ++a;
1515 }
1516 while (b > a);
1517
1518 /* next bit, or stop, if no bits left in this path */
1519 if (!*--bit)
1520 break;
1521
1522 base_stk [stk_idx] = a;
1523 end_stk [stk_idx] = end;
1524 bit_stk [stk_idx] = bit;
1525 ++stk_idx;
1526
1527 end = a;
1528 }
1529 }
1530 while (stk_idx--);
1531 }
1532}
1533
1534static void
1535eio_dent_insertion_sort (eio_dirent *dents, int size)
1536{
1537 /* first move the smallest element to the front, to act as a sentinel */
1538 {
1539 int i;
1540 eio_dirent *min = dents;
1541
1542 /* the radix pre-pass ensures that the minimum element is in the first EIO_SORT_CUTOFF + 1 elements */
1543 for (i = size > EIO_SORT_FAST ? EIO_SORT_CUTOFF + 1 : size; --i; )
1544 if (EIO_DENT_CMP (dents [i], <, *min))
1545 min = &dents [i];
1546
1547 /* swap elements 0 and j (minimum) */
1548 {
1549 eio_dirent tmp = *dents; *dents = *min; *min = tmp;
1550 }
1551 }
1552
1553 /* then do standard insertion sort, assuming that all elements are >= dents [0] */
1554 {
1555 eio_dirent *i, *j;
1556
1557 for (i = dents + 1; i < dents + size; ++i)
1558 {
1559 eio_dirent value = *i;
1560
1561 for (j = i - 1; EIO_DENT_CMP (*j, >, value); --j)
1562 j [1] = j [0];
1563
1564 j [1] = value;
1565 }
1566 }
1567}
1568
1569static void
1570eio_dent_sort (eio_dirent *dents, int size, signed char score_bits, ino_t inode_bits)
1571{
1572 if (size <= 1)
1573 return; /* our insertion sort relies on size > 0 */
1574
1575 /* first we use a radix sort, but only for dirs >= EIO_SORT_FAST */
1576 /* and stop sorting when the partitions are <= EIO_SORT_CUTOFF */
1577 eio_dent_radix_sort (dents, size, score_bits, inode_bits);
1578
1579 /* use an insertion sort at the end, or for small arrays, */
1580 /* as insertion sort is more efficient for small partitions */
1581 eio_dent_insertion_sort (dents, size);
1582}
1583
962/* read a full directory */ 1584/* read a full directory */
963static void 1585static void
964eio__scandir (eio_req *req, etp_worker *self) 1586eio__scandir (eio_req *req, etp_worker *self)
965{ 1587{
966 DIR *dirp; 1588 DIR *dirp;
967 EIO_STRUCT_DIRENT *entp; 1589 EIO_STRUCT_DIRENT *entp;
968 char *name, *names; 1590 char *name, *names;
969 int memlen = 4096; 1591 int namesalloc = 4096;
970 int memofs = 0; 1592 int namesoffs = 0;
1593 int flags = req->int1;
1594 eio_dirent *dents = 0;
1595 int dentalloc = 128;
971 int res = 0; 1596 int dentoffs = 0;
1597 ino_t inode_bits = 0;
1598
1599 req->result = -1;
1600
1601 if (!(flags & EIO_READDIR_DENTS))
1602 flags &= ~(EIO_READDIR_DIRS_FIRST | EIO_READDIR_STAT_ORDER);
972 1603
973 X_LOCK (wrklock); 1604 X_LOCK (wrklock);
974 /* the corresponding closedir is in ETP_WORKER_CLEAR */ 1605 /* the corresponding closedir is in ETP_WORKER_CLEAR */
975 self->dirp = dirp = opendir (req->ptr1); 1606 self->dirp = dirp = opendir (req->ptr1);
1607
976 req->flags |= EIO_FLAG_PTR2_FREE; 1608 if (req->flags & EIO_FLAG_PTR1_FREE)
1609 free (req->ptr1);
1610
1611 req->flags |= EIO_FLAG_PTR1_FREE | EIO_FLAG_PTR2_FREE;
1612 req->ptr1 = dents = flags ? malloc (dentalloc * sizeof (eio_dirent)) : 0;
977 req->ptr2 = names = malloc (memlen); 1613 req->ptr2 = names = malloc (namesalloc);
978 X_UNLOCK (wrklock); 1614 X_UNLOCK (wrklock);
979 1615
980 if (dirp && names) 1616 if (dirp && names && (!flags || dents))
981 for (;;) 1617 for (;;)
982 { 1618 {
983 errno = 0; 1619 errno = 0;
984 entp = readdir (dirp); 1620 entp = readdir (dirp);
985 1621
986 if (!entp) 1622 if (!entp)
1623 {
1624 if (errno)
1625 break;
1626
1627 /* sort etc. */
1628 req->int1 = flags;
1629 req->result = dentoffs;
1630
1631 if (flags & EIO_READDIR_STAT_ORDER)
1632 eio_dent_sort (dents, dentoffs, flags & EIO_READDIR_DIRS_FIRST ? 7 : 0, inode_bits);
1633 else if (flags & EIO_READDIR_DIRS_FIRST)
1634 if (flags & EIO_READDIR_FOUND_UNKNOWN)
1635 eio_dent_sort (dents, dentoffs, 7, inode_bits); /* sort by score and inode */
1636 else
1637 {
1638 /* in this case, all is known, and we just put dirs first and sort them */
1639 eio_dirent *oth = dents + dentoffs;
1640 eio_dirent *dir = dents;
1641
1642 /* now partition dirs to the front, and non-dirs to the back */
1643 /* by walking from both sides and swapping if necessary */
1644 while (oth > dir)
1645 {
1646 if (dir->type == EIO_DT_DIR)
1647 ++dir;
1648 else if ((--oth)->type == EIO_DT_DIR)
1649 {
1650 eio_dirent tmp = *dir; *dir = *oth; *oth = tmp;
1651
1652 ++dir;
1653 }
1654 }
1655
1656 /* now sort the dirs only (dirs all have the same score) */
1657 eio_dent_sort (dents, dir - dents, 0, inode_bits);
1658 }
1659
987 break; 1660 break;
1661 }
988 1662
1663 /* now add the entry to our list(s) */
989 name = entp->d_name; 1664 name = entp->d_name;
990 1665
1666 /* skip . and .. entries */
991 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2]))) 1667 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
992 { 1668 {
993 int len = strlen (name) + 1; 1669 int len = D_NAMLEN (entp) + 1;
994 1670
995 res++; 1671 while (ecb_expect_false (namesoffs + len > namesalloc))
996
997 while (memofs + len > memlen)
998 { 1672 {
999 memlen *= 2; 1673 namesalloc *= 2;
1000 X_LOCK (wrklock); 1674 X_LOCK (wrklock);
1001 req->ptr2 = names = realloc (names, memlen); 1675 req->ptr2 = names = realloc (names, namesalloc);
1002 X_UNLOCK (wrklock); 1676 X_UNLOCK (wrklock);
1003 1677
1004 if (!names) 1678 if (!names)
1005 break; 1679 break;
1006 } 1680 }
1007 1681
1008 memcpy (names + memofs, name, len); 1682 memcpy (names + namesoffs, name, len);
1683
1684 if (dents)
1685 {
1686 struct eio_dirent *ent;
1687
1688 if (ecb_expect_false (dentoffs == dentalloc))
1689 {
1690 dentalloc *= 2;
1691 X_LOCK (wrklock);
1692 req->ptr1 = dents = realloc (dents, dentalloc * sizeof (eio_dirent));
1693 X_UNLOCK (wrklock);
1694
1695 if (!dents)
1696 break;
1697 }
1698
1699 ent = dents + dentoffs;
1700
1701 ent->nameofs = namesoffs; /* rather dirtily we store the offset in the pointer */
1702 ent->namelen = len - 1;
1703 ent->inode = D_INO (entp);
1704
1705 inode_bits |= ent->inode;
1706
1707 switch (D_TYPE (entp))
1708 {
1709 default:
1710 ent->type = EIO_DT_UNKNOWN;
1711 flags |= EIO_READDIR_FOUND_UNKNOWN;
1712 break;
1713
1714 #ifdef DT_FIFO
1715 case DT_FIFO: ent->type = EIO_DT_FIFO; break;
1716 #endif
1717 #ifdef DT_CHR
1718 case DT_CHR: ent->type = EIO_DT_CHR; break;
1719 #endif
1720 #ifdef DT_MPC
1721 case DT_MPC: ent->type = EIO_DT_MPC; break;
1722 #endif
1723 #ifdef DT_DIR
1724 case DT_DIR: ent->type = EIO_DT_DIR; break;
1725 #endif
1726 #ifdef DT_NAM
1727 case DT_NAM: ent->type = EIO_DT_NAM; break;
1728 #endif
1729 #ifdef DT_BLK
1730 case DT_BLK: ent->type = EIO_DT_BLK; break;
1731 #endif
1732 #ifdef DT_MPB
1733 case DT_MPB: ent->type = EIO_DT_MPB; break;
1734 #endif
1735 #ifdef DT_REG
1736 case DT_REG: ent->type = EIO_DT_REG; break;
1737 #endif
1738 #ifdef DT_NWK
1739 case DT_NWK: ent->type = EIO_DT_NWK; break;
1740 #endif
1741 #ifdef DT_CMP
1742 case DT_CMP: ent->type = EIO_DT_CMP; break;
1743 #endif
1744 #ifdef DT_LNK
1745 case DT_LNK: ent->type = EIO_DT_LNK; break;
1746 #endif
1747 #ifdef DT_SOCK
1748 case DT_SOCK: ent->type = EIO_DT_SOCK; break;
1749 #endif
1750 #ifdef DT_DOOR
1751 case DT_DOOR: ent->type = EIO_DT_DOOR; break;
1752 #endif
1753 #ifdef DT_WHT
1754 case DT_WHT: ent->type = EIO_DT_WHT; break;
1755 #endif
1756 }
1757
1758 ent->score = 7;
1759
1760 if (flags & EIO_READDIR_DIRS_FIRST)
1761 {
1762 if (ent->type == EIO_DT_UNKNOWN)
1763 {
1764 if (*name == '.') /* leading dots are likely directories, and, in any case, rare */
1765 ent->score = 1;
1766 else if (!strchr (name, '.')) /* absense of dots indicate likely dirs */
1767 ent->score = len <= 2 ? 4 - len : len <= 4 ? 4 : len <= 7 ? 5 : 6; /* shorter == more likely dir, but avoid too many classes */
1768 }
1769 else if (ent->type == EIO_DT_DIR)
1770 ent->score = 0;
1771 }
1772 }
1773
1009 memofs += len; 1774 namesoffs += len;
1775 ++dentoffs;
1776 }
1777
1778 if (EIO_CANCELLED (req))
1779 {
1780 errno = ECANCELED;
1781 break;
1010 } 1782 }
1011 } 1783 }
1012
1013 if (errno)
1014 res = -1;
1015
1016 req->result = res;
1017}
1018
1019#if !(_POSIX_MAPPED_FILES && _POSIX_SYNCHRONIZED_IO)
1020# undef msync
1021# define msync(a,b,c) ENOSYS
1022#endif
1023
1024int
1025eio__mtouch (void *mem, size_t len, int flags)
1026{
1027 intptr_t addr = (intptr_t)mem;
1028 intptr_t end = addr + len;
1029#ifdef PAGESIZE
1030 const intptr_t page = PAGESIZE;
1031#else
1032 static intptr_t page;
1033
1034 if (!page)
1035 page = sysconf (_SC_PAGESIZE);
1036#endif
1037
1038 addr &= ~(page - 1); /* assume page size is always a power of two */
1039
1040 if (addr < end)
1041 if (flags) /* modify */
1042 do { *((volatile sig_atomic_t *)addr) |= 0; } while ((addr += page) < len);
1043 else
1044 do { *((volatile sig_atomic_t *)addr) ; } while ((addr += page) < len);
1045
1046 return 0;
1047} 1784}
1048 1785
1049/*****************************************************************************/ 1786/*****************************************************************************/
1050 1787
1051#define ALLOC(len) \ 1788#define ALLOC(len) \
1083 if (req) 1820 if (req)
1084 break; 1821 break;
1085 1822
1086 ++idle; 1823 ++idle;
1087 1824
1088 ts.tv_sec = time (0) + IDLE_TIMEOUT; 1825 ts.tv_sec = time (0) + idle_timeout;
1089 if (X_COND_TIMEDWAIT (reqwait, reqlock, ts) == ETIMEDOUT) 1826 if (X_COND_TIMEDWAIT (reqwait, reqlock, ts) == ETIMEDOUT)
1090 { 1827 {
1091 if (idle > max_idle) 1828 if (idle > max_idle)
1092 { 1829 {
1093 --idle; 1830 --idle;
1110 X_UNLOCK (reqlock); 1847 X_UNLOCK (reqlock);
1111 1848
1112 if (req->type < 0) 1849 if (req->type < 0)
1113 goto quit; 1850 goto quit;
1114 1851
1115 if (!EIO_CANCELLED (req))
1116 ETP_EXECUTE (self, req); 1852 ETP_EXECUTE (self, req);
1117 1853
1118 X_LOCK (reslock); 1854 X_LOCK (reslock);
1119 1855
1120 ++npending; 1856 ++npending;
1121 1857
1136 return 0; 1872 return 0;
1137} 1873}
1138 1874
1139/*****************************************************************************/ 1875/*****************************************************************************/
1140 1876
1877int ecb_cold
1141int eio_init (void (*want_poll)(void), void (*done_poll)(void)) 1878eio_init (void (*want_poll)(void), void (*done_poll)(void))
1142{ 1879{
1143 return etp_init (want_poll, done_poll); 1880 return etp_init (want_poll, done_poll);
1144} 1881}
1145 1882
1883ecb_inline void
1146static void eio_api_destroy (eio_req *req) 1884eio_api_destroy (eio_req *req)
1147{ 1885{
1148 free (req); 1886 free (req);
1149} 1887}
1150 1888
1151#define REQ(rtype) \ 1889#define REQ(rtype) \
1170 { \ 1908 { \
1171 eio_api_destroy (req); \ 1909 eio_api_destroy (req); \
1172 return 0; \ 1910 return 0; \
1173 } 1911 }
1174 1912
1913static void
1175static void eio_execute (etp_worker *self, eio_req *req) 1914eio_execute (etp_worker *self, eio_req *req)
1176{ 1915{
1177 errno = 0; 1916 if (ecb_expect_false (EIO_CANCELLED (req)))
1917 {
1918 req->result = -1;
1919 req->errorno = ECANCELED;
1920 return;
1921 }
1178 1922
1179 switch (req->type) 1923 switch (req->type)
1180 { 1924 {
1181 case EIO_READ: ALLOC (req->size); 1925 case EIO_READ: ALLOC (req->size);
1182 req->result = req->offs >= 0 1926 req->result = req->offs >= 0
1194 case EIO_LSTAT: ALLOC (sizeof (EIO_STRUCT_STAT)); 1938 case EIO_LSTAT: ALLOC (sizeof (EIO_STRUCT_STAT));
1195 req->result = lstat (req->ptr1, (EIO_STRUCT_STAT *)req->ptr2); break; 1939 req->result = lstat (req->ptr1, (EIO_STRUCT_STAT *)req->ptr2); break;
1196 case EIO_FSTAT: ALLOC (sizeof (EIO_STRUCT_STAT)); 1940 case EIO_FSTAT: ALLOC (sizeof (EIO_STRUCT_STAT));
1197 req->result = fstat (req->int1, (EIO_STRUCT_STAT *)req->ptr2); break; 1941 req->result = fstat (req->int1, (EIO_STRUCT_STAT *)req->ptr2); break;
1198 1942
1943 case EIO_STATVFS: ALLOC (sizeof (EIO_STRUCT_STATVFS));
1944 req->result = statvfs (req->ptr1, (EIO_STRUCT_STATVFS *)req->ptr2); break;
1945 case EIO_FSTATVFS: ALLOC (sizeof (EIO_STRUCT_STATVFS));
1946 req->result = fstatvfs (req->int1, (EIO_STRUCT_STATVFS *)req->ptr2); break;
1947
1199 case EIO_CHOWN: req->result = chown (req->ptr1, req->int2, req->int3); break; 1948 case EIO_CHOWN: req->result = chown (req->ptr1, req->int2, req->int3); break;
1200 case EIO_FCHOWN: req->result = fchown (req->int1, req->int2, req->int3); break; 1949 case EIO_FCHOWN: req->result = fchown (req->int1, req->int2, req->int3); break;
1201 case EIO_CHMOD: req->result = chmod (req->ptr1, (mode_t)req->int2); break; 1950 case EIO_CHMOD: req->result = chmod (req->ptr1, (mode_t)req->int2); break;
1202 case EIO_FCHMOD: req->result = fchmod (req->int1, (mode_t)req->int2); break; 1951 case EIO_FCHMOD: req->result = fchmod (req->int1, (mode_t)req->int2); break;
1203 case EIO_TRUNCATE: req->result = truncate (req->ptr1, req->offs); break; 1952 case EIO_TRUNCATE: req->result = truncate (req->ptr1, req->offs); break;
1210 case EIO_RMDIR: req->result = rmdir (req->ptr1); break; 1959 case EIO_RMDIR: req->result = rmdir (req->ptr1); break;
1211 case EIO_MKDIR: req->result = mkdir (req->ptr1, (mode_t)req->int2); break; 1960 case EIO_MKDIR: req->result = mkdir (req->ptr1, (mode_t)req->int2); break;
1212 case EIO_RENAME: req->result = rename (req->ptr1, req->ptr2); break; 1961 case EIO_RENAME: req->result = rename (req->ptr1, req->ptr2); break;
1213 case EIO_LINK: req->result = link (req->ptr1, req->ptr2); break; 1962 case EIO_LINK: req->result = link (req->ptr1, req->ptr2); break;
1214 case EIO_SYMLINK: req->result = symlink (req->ptr1, req->ptr2); break; 1963 case EIO_SYMLINK: req->result = symlink (req->ptr1, req->ptr2); break;
1215 case EIO_MKNOD: req->result = mknod (req->ptr1, (mode_t)req->int2, (dev_t)req->int3); break; 1964 case EIO_MKNOD: req->result = mknod (req->ptr1, (mode_t)req->int2, (dev_t)req->offs); break;
1216 1965
1966 case EIO_REALPATH: eio__realpath (req, self); break;
1967
1217 case EIO_READLINK: ALLOC (NAME_MAX); 1968 case EIO_READLINK: ALLOC (PATH_MAX);
1218 req->result = readlink (req->ptr1, req->ptr2, NAME_MAX); break; 1969 req->result = readlink (req->ptr1, req->ptr2, PATH_MAX); break;
1219 1970
1220 case EIO_SYNC: req->result = 0; sync (); break; 1971 case EIO_SYNC: req->result = 0; sync (); break;
1221 case EIO_FSYNC: req->result = fsync (req->int1); break; 1972 case EIO_FSYNC: req->result = fsync (req->int1); break;
1222 case EIO_FDATASYNC: req->result = fdatasync (req->int1); break; 1973 case EIO_FDATASYNC: req->result = fdatasync (req->int1); break;
1223 case EIO_MSYNC: req->result = msync (req->ptr2, req->size, req->int1); break; 1974 case EIO_MSYNC: req->result = eio__msync (req->ptr2, req->size, req->int1); break;
1975 case EIO_MTOUCH: req->result = eio__mtouch (req); break;
1224 case EIO_MTOUCH: req->result = eio__mtouch (req->ptr2, req->size, req->int1); break; 1976 case EIO_MLOCK: req->result = eio__mlock (req->ptr2, req->size); break;
1977 case EIO_MLOCKALL: req->result = eio__mlockall (req->int1); break;
1225 case EIO_SYNC_FILE_RANGE: req->result = eio__sync_file_range (req->int1, req->offs, req->size, req->int2); break; 1978 case EIO_SYNC_FILE_RANGE: req->result = eio__sync_file_range (req->int1, req->offs, req->size, req->int2); break;
1979 case EIO_FALLOCATE: req->result = eio__fallocate (req->int1, req->int2, req->offs, req->size); break;
1226 1980
1227 case EIO_READDIR: eio__scandir (req, self); break; 1981 case EIO_READDIR: eio__scandir (req, self); break;
1228 1982
1229 case EIO_BUSY: 1983 case EIO_BUSY:
1230#ifdef _WIN32 1984#ifdef _WIN32
1231 Sleep (req->nv1 * 1000.); 1985 Sleep (req->nv1 * 1e3);
1232#else 1986#else
1233 { 1987 {
1234 struct timeval tv; 1988 struct timeval tv;
1235 1989
1236 tv.tv_sec = req->nv1; 1990 tv.tv_sec = req->nv1;
1237 tv.tv_usec = (req->nv1 - tv.tv_sec) * 1000000.; 1991 tv.tv_usec = (req->nv1 - tv.tv_sec) * 1e6;
1238 1992
1239 req->result = select (0, 0, 0, 0, &tv); 1993 req->result = select (0, 0, 0, 0, &tv);
1240 } 1994 }
1241#endif 1995#endif
1242 break; 1996 break;
1257 times = tv; 2011 times = tv;
1258 } 2012 }
1259 else 2013 else
1260 times = 0; 2014 times = 0;
1261 2015
1262
1263 req->result = req->type == EIO_FUTIME 2016 req->result = req->type == EIO_FUTIME
1264 ? futimes (req->int1, times) 2017 ? futimes (req->int1, times)
1265 : utimes (req->ptr1, times); 2018 : utimes (req->ptr1, times);
1266 } 2019 }
1267 break; 2020 break;
1272 case EIO_NOP: 2025 case EIO_NOP:
1273 req->result = 0; 2026 req->result = 0;
1274 break; 2027 break;
1275 2028
1276 case EIO_CUSTOM: 2029 case EIO_CUSTOM:
1277 ((void (*)(eio_req *))req->feed) (req); 2030 req->feed (req);
1278 break; 2031 break;
1279 2032
1280 default: 2033 default:
2034 errno = ENOSYS;
1281 req->result = -1; 2035 req->result = -1;
1282 break; 2036 break;
1283 } 2037 }
1284 2038
1285 req->errorno = errno; 2039 req->errorno = errno;
1315eio_req *eio_mtouch (void *addr, size_t length, int flags, int pri, eio_cb cb, void *data) 2069eio_req *eio_mtouch (void *addr, size_t length, int flags, int pri, eio_cb cb, void *data)
1316{ 2070{
1317 REQ (EIO_MTOUCH); req->ptr2 = addr; req->size = length; req->int1 = flags; SEND; 2071 REQ (EIO_MTOUCH); req->ptr2 = addr; req->size = length; req->int1 = flags; SEND;
1318} 2072}
1319 2073
2074eio_req *eio_mlock (void *addr, size_t length, int pri, eio_cb cb, void *data)
2075{
2076 REQ (EIO_MLOCK); req->ptr2 = addr; req->size = length; SEND;
2077}
2078
2079eio_req *eio_mlockall (int flags, int pri, eio_cb cb, void *data)
2080{
2081 REQ (EIO_MLOCKALL); req->int1 = flags; SEND;
2082}
2083
1320eio_req *eio_sync_file_range (int fd, off_t offset, size_t nbytes, unsigned int flags, int pri, eio_cb cb, void *data) 2084eio_req *eio_sync_file_range (int fd, off_t offset, size_t nbytes, unsigned int flags, int pri, eio_cb cb, void *data)
1321{ 2085{
1322 REQ (EIO_SYNC_FILE_RANGE); req->int1 = fd; req->offs = offset; req->size = nbytes; req->int2 = flags; SEND; 2086 REQ (EIO_SYNC_FILE_RANGE); req->int1 = fd; req->offs = offset; req->size = nbytes; req->int2 = flags; SEND;
1323} 2087}
1324 2088
2089eio_req *eio_fallocate (int fd, int mode, off_t offset, size_t len, int pri, eio_cb cb, void *data)
2090{
2091 REQ (EIO_FALLOCATE); req->int1 = fd; req->int2 = mode; req->offs = offset; req->size = len; SEND;
2092}
2093
1325eio_req *eio_fdatasync (int fd, int pri, eio_cb cb, void *data) 2094eio_req *eio_fdatasync (int fd, int pri, eio_cb cb, void *data)
1326{ 2095{
1327 REQ (EIO_FDATASYNC); req->int1 = fd; SEND; 2096 REQ (EIO_FDATASYNC); req->int1 = fd; SEND;
1328} 2097}
1329 2098
1348} 2117}
1349 2118
1350eio_req *eio_fstat (int fd, int pri, eio_cb cb, void *data) 2119eio_req *eio_fstat (int fd, int pri, eio_cb cb, void *data)
1351{ 2120{
1352 REQ (EIO_FSTAT); req->int1 = fd; SEND; 2121 REQ (EIO_FSTAT); req->int1 = fd; SEND;
2122}
2123
2124eio_req *eio_fstatvfs (int fd, int pri, eio_cb cb, void *data)
2125{
2126 REQ (EIO_FSTATVFS); req->int1 = fd; SEND;
1353} 2127}
1354 2128
1355eio_req *eio_futime (int fd, double atime, double mtime, int pri, eio_cb cb, void *data) 2129eio_req *eio_futime (int fd, double atime, double mtime, int pri, eio_cb cb, void *data)
1356{ 2130{
1357 REQ (EIO_FUTIME); req->int1 = fd; req->nv1 = atime; req->nv2 = mtime; SEND; 2131 REQ (EIO_FUTIME); req->int1 = fd; req->nv1 = atime; req->nv2 = mtime; SEND;
1421eio_req *eio_readlink (const char *path, int pri, eio_cb cb, void *data) 2195eio_req *eio_readlink (const char *path, int pri, eio_cb cb, void *data)
1422{ 2196{
1423 return eio__1path (EIO_READLINK, path, pri, cb, data); 2197 return eio__1path (EIO_READLINK, path, pri, cb, data);
1424} 2198}
1425 2199
2200eio_req *eio_realpath (const char *path, int pri, eio_cb cb, void *data)
2201{
2202 return eio__1path (EIO_REALPATH, path, pri, cb, data);
2203}
2204
1426eio_req *eio_stat (const char *path, int pri, eio_cb cb, void *data) 2205eio_req *eio_stat (const char *path, int pri, eio_cb cb, void *data)
1427{ 2206{
1428 return eio__1path (EIO_STAT, path, pri, cb, data); 2207 return eio__1path (EIO_STAT, path, pri, cb, data);
1429} 2208}
1430 2209
1431eio_req *eio_lstat (const char *path, int pri, eio_cb cb, void *data) 2210eio_req *eio_lstat (const char *path, int pri, eio_cb cb, void *data)
1432{ 2211{
1433 return eio__1path (EIO_LSTAT, path, pri, cb, data); 2212 return eio__1path (EIO_LSTAT, path, pri, cb, data);
1434} 2213}
1435 2214
2215eio_req *eio_statvfs (const char *path, int pri, eio_cb cb, void *data)
2216{
2217 return eio__1path (EIO_STATVFS, path, pri, cb, data);
2218}
2219
1436eio_req *eio_unlink (const char *path, int pri, eio_cb cb, void *data) 2220eio_req *eio_unlink (const char *path, int pri, eio_cb cb, void *data)
1437{ 2221{
1438 return eio__1path (EIO_UNLINK, path, pri, cb, data); 2222 return eio__1path (EIO_UNLINK, path, pri, cb, data);
1439} 2223}
1440 2224
1441eio_req *eio_rmdir (const char *path, int pri, eio_cb cb, void *data) 2225eio_req *eio_rmdir (const char *path, int pri, eio_cb cb, void *data)
1442{ 2226{
1443 return eio__1path (EIO_RMDIR, path, pri, cb, data); 2227 return eio__1path (EIO_RMDIR, path, pri, cb, data);
1444} 2228}
1445 2229
1446eio_req *eio_readdir (const char *path, int pri, eio_cb cb, void *data) 2230eio_req *eio_readdir (const char *path, int flags, int pri, eio_cb cb, void *data)
1447{ 2231{
1448 return eio__1path (EIO_READDIR, path, pri, cb, data); 2232 REQ (EIO_READDIR); PATH; req->int1 = flags; SEND;
1449} 2233}
1450 2234
1451eio_req *eio_mknod (const char *path, mode_t mode, dev_t dev, int pri, eio_cb cb, void *data) 2235eio_req *eio_mknod (const char *path, mode_t mode, dev_t dev, int pri, eio_cb cb, void *data)
1452{ 2236{
1453 REQ (EIO_MKNOD); PATH; req->int2 = (long)mode; req->int3 = (long)dev; SEND; 2237 REQ (EIO_MKNOD); PATH; req->int2 = (long)mode; req->offs = (off_t)dev; SEND;
1454} 2238}
1455 2239
1456static eio_req * 2240static eio_req *
1457eio__2path (int type, const char *path, const char *new_path, int pri, eio_cb cb, void *data) 2241eio__2path (int type, const char *path, const char *new_path, int pri, eio_cb cb, void *data)
1458{ 2242{
1482eio_req *eio_rename (const char *path, const char *new_path, int pri, eio_cb cb, void *data) 2266eio_req *eio_rename (const char *path, const char *new_path, int pri, eio_cb cb, void *data)
1483{ 2267{
1484 return eio__2path (EIO_RENAME, path, new_path, pri, cb, data); 2268 return eio__2path (EIO_RENAME, path, new_path, pri, cb, data);
1485} 2269}
1486 2270
1487eio_req *eio_custom (eio_cb execute, int pri, eio_cb cb, void *data) 2271eio_req *eio_custom (void (*execute)(eio_req *), int pri, eio_cb cb, void *data)
1488{ 2272{
1489 REQ (EIO_CUSTOM); req->feed = (void (*)(eio_req *))execute; SEND; 2273 REQ (EIO_CUSTOM); req->feed = execute; SEND;
1490} 2274}
1491 2275
1492#endif 2276#endif
1493 2277
1494eio_req *eio_grp (eio_cb cb, void *data) 2278eio_req *eio_grp (eio_cb cb, void *data)
1503#undef SEND 2287#undef SEND
1504 2288
1505/*****************************************************************************/ 2289/*****************************************************************************/
1506/* grp functions */ 2290/* grp functions */
1507 2291
2292void
1508void eio_grp_feed (eio_req *grp, void (*feed)(eio_req *req), int limit) 2293eio_grp_feed (eio_req *grp, void (*feed)(eio_req *req), int limit)
1509{ 2294{
1510 grp->int2 = limit; 2295 grp->int2 = limit;
1511 grp->feed = feed; 2296 grp->feed = feed;
1512 2297
1513 grp_try_feed (grp); 2298 grp_try_feed (grp);
1514} 2299}
1515 2300
2301void
1516void eio_grp_limit (eio_req *grp, int limit) 2302eio_grp_limit (eio_req *grp, int limit)
1517{ 2303{
1518 grp->int2 = limit; 2304 grp->int2 = limit;
1519 2305
1520 grp_try_feed (grp); 2306 grp_try_feed (grp);
1521} 2307}
1522 2308
2309void
1523void eio_grp_add (eio_req *grp, eio_req *req) 2310eio_grp_add (eio_req *grp, eio_req *req)
1524{ 2311{
1525 assert (("cannot add requests to IO::AIO::GRP after the group finished", grp->int1 != 2)); 2312 assert (("cannot add requests to IO::AIO::GRP after the group finished", grp->int1 != 2));
1526 2313
1527 grp->flags |= EIO_FLAG_GROUPADD; 2314 grp->flags |= EIO_FLAG_GROUPADD;
1528 2315
1539} 2326}
1540 2327
1541/*****************************************************************************/ 2328/*****************************************************************************/
1542/* misc garbage */ 2329/* misc garbage */
1543 2330
2331ssize_t
1544ssize_t eio_sendfile_sync (int ofd, int ifd, off_t offset, size_t count) 2332eio_sendfile_sync (int ofd, int ifd, off_t offset, size_t count)
1545{ 2333{
1546 etp_worker wrk; 2334 etp_worker wrk;
2335 ssize_t ret;
1547 2336
1548 wrk.dbuf = 0; 2337 wrk.dbuf = 0;
1549 2338
1550 eio__sendfile (ofd, ifd, offset, count, &wrk); 2339 ret = eio__sendfile (ofd, ifd, offset, count, &wrk);
1551 2340
1552 if (wrk.dbuf) 2341 if (wrk.dbuf)
1553 free (wrk.dbuf); 2342 free (wrk.dbuf);
1554}
1555 2343
2344 return ret;
2345}
2346

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines