ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/cvsroot/libeio/eio.c
(Generate patch)

Comparing cvsroot/libeio/eio.c (file contents):
Revision 1.33 by root, Sat Jun 6 19:44:17 2009 UTC vs.
Revision 1.82 by root, Thu Jul 7 15:44:44 2011 UTC

1/* 1/*
2 * libeio implementation 2 * libeio implementation
3 * 3 *
4 * Copyright (c) 2007,2008,2009 Marc Alexander Lehmann <libeio@schmorp.de> 4 * Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann <libeio@schmorp.de>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without modifica- 7 * Redistribution and use in source and binary forms, with or without modifica-
8 * tion, are permitted provided that the following conditions are met: 8 * tion, are permitted provided that the following conditions are met:
9 * 9 *
35 * and other provisions required by the GPL. If you do not delete the 35 * and other provisions required by the GPL. If you do not delete the
36 * provisions above, a recipient may use your version of this file under 36 * provisions above, a recipient may use your version of this file under
37 * either the BSD or the GPL. 37 * either the BSD or the GPL.
38 */ 38 */
39 39
40#ifndef _WIN32
41# include "config.h"
42#endif
43
40#include "eio.h" 44#include "eio.h"
45#include "ecb.h"
41 46
42#ifdef EIO_STACKSIZE 47#ifdef EIO_STACKSIZE
43# define XTHREAD_STACKSIZE EIO_STACKSIZE 48# define XTHREAD_STACKSIZE EIO_STACKSIZE
44#endif 49#endif
45#include "xthread.h" 50#include "xthread.h"
49#include <stdlib.h> 54#include <stdlib.h>
50#include <string.h> 55#include <string.h>
51#include <errno.h> 56#include <errno.h>
52#include <sys/types.h> 57#include <sys/types.h>
53#include <sys/stat.h> 58#include <sys/stat.h>
59#include <sys/statvfs.h>
54#include <limits.h> 60#include <limits.h>
55#include <fcntl.h> 61#include <fcntl.h>
56#include <assert.h> 62#include <assert.h>
57 63
64/* intptr_t comes from unistd.h, says POSIX/UNIX/tradition */
65/* intptr_t only comes from stdint.h, says idiot openbsd coder */
66#if HAVE_STDINT_H
67# include <stdint.h>
68#endif
69
70#ifndef ECANCELED
71# define ECANCELED EDOM
72#endif
73
74static void eio_destroy (eio_req *req);
75
58#ifndef EIO_FINISH 76#ifndef EIO_FINISH
59# define EIO_FINISH(req) ((req)->finish) && !EIO_CANCELLED (req) ? (req)->finish (req) : 0 77# define EIO_FINISH(req) ((req)->finish) && !EIO_CANCELLED (req) ? (req)->finish (req) : 0
60#endif 78#endif
61 79
62#ifndef EIO_DESTROY 80#ifndef EIO_DESTROY
70#ifdef _WIN32 88#ifdef _WIN32
71 89
72 /*doh*/ 90 /*doh*/
73#else 91#else
74 92
75# include "config.h"
76# include <sys/time.h> 93# include <sys/time.h>
77# include <sys/select.h> 94# include <sys/select.h>
78# include <unistd.h> 95# include <unistd.h>
79# include <utime.h> 96# include <utime.h>
80# include <signal.h> 97# include <signal.h>
81# include <dirent.h> 98# include <dirent.h>
82 99
100#if _POSIX_MEMLOCK || _POSIX_MEMLOCK_RANGE || _POSIX_MAPPED_FILES
101# include <sys/mman.h>
102#endif
103
83/* POSIX_SOURCE is useless on bsd's, and XOPEN_SOURCE is unreliable there, too */ 104/* POSIX_SOURCE is useless on bsd's, and XOPEN_SOURCE is unreliable there, too */
84# if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) 105# if __FreeBSD__ || defined __NetBSD__ || defined __OpenBSD__
85# define _DIRENT_HAVE_D_TYPE /* sigh */ 106# define _DIRENT_HAVE_D_TYPE /* sigh */
86# define D_INO(de) (de)->d_fileno 107# define D_INO(de) (de)->d_fileno
87# define D_NAMLEN(de) (de)->d_namlen 108# define D_NAMLEN(de) (de)->d_namlen
88# elif defined(__linux) || defined(d_ino) || _XOPEN_SOURCE >= 600 109# elif __linux || defined d_ino || _XOPEN_SOURCE >= 600
89# define D_INO(de) (de)->d_ino 110# define D_INO(de) (de)->d_ino
90# endif 111# endif
91 112
92#ifdef _D_EXACT_NAMLEN 113#ifdef _D_EXACT_NAMLEN
93# undef D_NAMLEN 114# undef D_NAMLEN
105#endif 126#endif
106 127
107#if HAVE_SENDFILE 128#if HAVE_SENDFILE
108# if __linux 129# if __linux
109# include <sys/sendfile.h> 130# include <sys/sendfile.h>
110# elif __freebsd 131# elif __FreeBSD__ || defined __APPLE__
111# include <sys/socket.h> 132# include <sys/socket.h>
112# include <sys/uio.h> 133# include <sys/uio.h>
113# elif __hpux 134# elif __hpux
114# include <sys/socket.h> 135# include <sys/socket.h>
115# elif __solaris /* not yet */ 136# elif __solaris
116# include <sys/sendfile.h> 137# include <sys/sendfile.h>
117# else 138# else
118# error sendfile support requested but not available 139# error sendfile support requested but not available
119# endif 140# endif
120#endif 141#endif
127#endif 148#endif
128#ifndef D_NAMLEN 149#ifndef D_NAMLEN
129# define D_NAMLEN(de) strlen ((de)->d_name) 150# define D_NAMLEN(de) strlen ((de)->d_name)
130#endif 151#endif
131 152
132/* number of seconds after which an idle threads exit */
133#define IDLE_TIMEOUT 10
134
135/* used for struct dirent, AIX doesn't provide it */ 153/* used for struct dirent, AIX doesn't provide it */
136#ifndef NAME_MAX 154#ifndef NAME_MAX
137# define NAME_MAX 4096 155# define NAME_MAX 4096
156#endif
157
158/* used for readlink etc. */
159#ifndef PATH_MAX
160# define PATH_MAX 4096
138#endif 161#endif
139 162
140/* buffer size for various temporary buffers */ 163/* buffer size for various temporary buffers */
141#define EIO_BUFSIZE 65536 164#define EIO_BUFSIZE 65536
142 165
148 errno = ENOMEM; \ 171 errno = ENOMEM; \
149 if (!eio_buf) \ 172 if (!eio_buf) \
150 return -1; 173 return -1;
151 174
152#define EIO_TICKS ((1000000 + 1023) >> 10) 175#define EIO_TICKS ((1000000 + 1023) >> 10)
153
154/*****************************************************************************/
155
156#if __GNUC__ >= 3
157# define expect(expr,value) __builtin_expect ((expr),(value))
158#else
159# define expect(expr,value) (expr)
160#endif
161
162#define expect_false(expr) expect ((expr) != 0, 0)
163#define expect_true(expr) expect ((expr) != 0, 1)
164
165/*****************************************************************************/
166 176
167#define ETP_PRI_MIN EIO_PRI_MIN 177#define ETP_PRI_MIN EIO_PRI_MIN
168#define ETP_PRI_MAX EIO_PRI_MAX 178#define ETP_PRI_MAX EIO_PRI_MAX
169 179
170struct etp_worker; 180struct etp_worker;
195 205
196/*****************************************************************************/ 206/*****************************************************************************/
197 207
198#define ETP_NUM_PRI (ETP_PRI_MAX - ETP_PRI_MIN + 1) 208#define ETP_NUM_PRI (ETP_PRI_MAX - ETP_PRI_MIN + 1)
199 209
200/* calculcate time difference in ~1/EIO_TICKS of a second */ 210/* calculate time difference in ~1/EIO_TICKS of a second */
211ecb_inline int
201static int tvdiff (struct timeval *tv1, struct timeval *tv2) 212tvdiff (struct timeval *tv1, struct timeval *tv2)
202{ 213{
203 return (tv2->tv_sec - tv1->tv_sec ) * EIO_TICKS 214 return (tv2->tv_sec - tv1->tv_sec ) * EIO_TICKS
204 + ((tv2->tv_usec - tv1->tv_usec) >> 10); 215 + ((tv2->tv_usec - tv1->tv_usec) >> 10);
205} 216}
206 217
213static unsigned int max_poll_reqs; /* reslock */ 224static unsigned int max_poll_reqs; /* reslock */
214 225
215static volatile unsigned int nreqs; /* reqlock */ 226static volatile unsigned int nreqs; /* reqlock */
216static volatile unsigned int nready; /* reqlock */ 227static volatile unsigned int nready; /* reqlock */
217static volatile unsigned int npending; /* reqlock */ 228static volatile unsigned int npending; /* reqlock */
218static volatile unsigned int max_idle = 4; 229static volatile unsigned int max_idle = 4; /* maximum number of threads that can idle indefinitely */
230static volatile unsigned int idle_timeout = 10; /* number of seconds after which an idle threads exit */
219 231
220static mutex_t wrklock = X_MUTEX_INIT; 232static xmutex_t wrklock;
221static mutex_t reslock = X_MUTEX_INIT; 233static xmutex_t reslock;
222static mutex_t reqlock = X_MUTEX_INIT; 234static xmutex_t reqlock;
223static cond_t reqwait = X_COND_INIT; 235static xcond_t reqwait;
224 236
225#if !HAVE_PREADWRITE 237#if !HAVE_PREADWRITE
226/* 238/*
227 * make our pread/pwrite emulation safe against themselves, but not against 239 * make our pread/pwrite emulation safe against themselves, but not against
228 * normal read/write by using a mutex. slows down execution a lot, 240 * normal read/write by using a mutex. slows down execution a lot,
229 * but that's your problem, not mine. 241 * but that's your problem, not mine.
230 */ 242 */
231static mutex_t preadwritelock = X_MUTEX_INIT; 243static xmutex_t preadwritelock = X_MUTEX_INIT;
232#endif 244#endif
233 245
234typedef struct etp_worker 246typedef struct etp_worker
235{ 247{
236 /* locked by wrklock */ 248 /* locked by wrklock */
237 struct etp_worker *prev, *next; 249 struct etp_worker *prev, *next;
238 250
239 thread_t tid; 251 xthread_t tid;
240 252
241 /* locked by reslock, reqlock or wrklock */ 253 /* locked by reslock, reqlock or wrklock */
242 ETP_REQ *req; /* currently processed request */ 254 ETP_REQ *req; /* currently processed request */
243 255
244 ETP_WORKER_COMMON 256 ETP_WORKER_COMMON
249#define ETP_WORKER_LOCK(wrk) X_LOCK (wrklock) 261#define ETP_WORKER_LOCK(wrk) X_LOCK (wrklock)
250#define ETP_WORKER_UNLOCK(wrk) X_UNLOCK (wrklock) 262#define ETP_WORKER_UNLOCK(wrk) X_UNLOCK (wrklock)
251 263
252/* worker threads management */ 264/* worker threads management */
253 265
266static void ecb_cold
254static void etp_worker_clear (etp_worker *wrk) 267etp_worker_clear (etp_worker *wrk)
255{ 268{
256 ETP_WORKER_CLEAR (wrk); 269 ETP_WORKER_CLEAR (wrk);
257} 270}
258 271
272static void ecb_cold
259static void etp_worker_free (etp_worker *wrk) 273etp_worker_free (etp_worker *wrk)
260{ 274{
261 wrk->next->prev = wrk->prev; 275 wrk->next->prev = wrk->prev;
262 wrk->prev->next = wrk->next; 276 wrk->prev->next = wrk->next;
263 277
264 free (wrk); 278 free (wrk);
265} 279}
266 280
267static unsigned int etp_nreqs (void) 281static unsigned int
282etp_nreqs (void)
268{ 283{
269 int retval; 284 int retval;
270 if (WORDACCESS_UNSAFE) X_LOCK (reqlock); 285 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
271 retval = nreqs; 286 retval = nreqs;
272 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); 287 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
273 return retval; 288 return retval;
274} 289}
275 290
276static unsigned int etp_nready (void) 291static unsigned int
292etp_nready (void)
277{ 293{
278 unsigned int retval; 294 unsigned int retval;
279 295
280 if (WORDACCESS_UNSAFE) X_LOCK (reqlock); 296 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
281 retval = nready; 297 retval = nready;
282 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); 298 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
283 299
284 return retval; 300 return retval;
285} 301}
286 302
287static unsigned int etp_npending (void) 303static unsigned int
304etp_npending (void)
288{ 305{
289 unsigned int retval; 306 unsigned int retval;
290 307
291 if (WORDACCESS_UNSAFE) X_LOCK (reqlock); 308 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
292 retval = npending; 309 retval = npending;
293 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); 310 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
294 311
295 return retval; 312 return retval;
296} 313}
297 314
298static unsigned int etp_nthreads (void) 315static unsigned int
316etp_nthreads (void)
299{ 317{
300 unsigned int retval; 318 unsigned int retval;
301 319
302 if (WORDACCESS_UNSAFE) X_LOCK (reqlock); 320 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
303 retval = started; 321 retval = started;
317} etp_reqq; 335} etp_reqq;
318 336
319static etp_reqq req_queue; 337static etp_reqq req_queue;
320static etp_reqq res_queue; 338static etp_reqq res_queue;
321 339
340static int ecb_noinline
322static int reqq_push (etp_reqq *q, ETP_REQ *req) 341reqq_push (etp_reqq *q, ETP_REQ *req)
323{ 342{
324 int pri = req->pri; 343 int pri = req->pri;
325 req->next = 0; 344 req->next = 0;
326 345
327 if (q->qe[pri]) 346 if (q->qe[pri])
333 q->qe[pri] = q->qs[pri] = req; 352 q->qe[pri] = q->qs[pri] = req;
334 353
335 return q->size++; 354 return q->size++;
336} 355}
337 356
357static ETP_REQ * ecb_noinline
338static ETP_REQ *reqq_shift (etp_reqq *q) 358reqq_shift (etp_reqq *q)
339{ 359{
340 int pri; 360 int pri;
341 361
342 if (!q->size) 362 if (!q->size)
343 return 0; 363 return 0;
358 } 378 }
359 379
360 abort (); 380 abort ();
361} 381}
362 382
383static void ecb_cold
384etp_thread_init (void)
385{
386 X_MUTEX_CREATE (wrklock);
387 X_MUTEX_CREATE (reslock);
388 X_MUTEX_CREATE (reqlock);
389 X_COND_CREATE (reqwait);
390}
391
392static void ecb_cold
363static void etp_atfork_prepare (void) 393etp_atfork_prepare (void)
364{ 394{
365 X_LOCK (wrklock); 395 X_LOCK (wrklock);
366 X_LOCK (reqlock); 396 X_LOCK (reqlock);
367 X_LOCK (reslock); 397 X_LOCK (reslock);
368#if !HAVE_PREADWRITE 398#if !HAVE_PREADWRITE
369 X_LOCK (preadwritelock); 399 X_LOCK (preadwritelock);
370#endif 400#endif
371} 401}
372 402
403static void ecb_cold
373static void etp_atfork_parent (void) 404etp_atfork_parent (void)
374{ 405{
375#if !HAVE_PREADWRITE 406#if !HAVE_PREADWRITE
376 X_UNLOCK (preadwritelock); 407 X_UNLOCK (preadwritelock);
377#endif 408#endif
378 X_UNLOCK (reslock); 409 X_UNLOCK (reslock);
379 X_UNLOCK (reqlock); 410 X_UNLOCK (reqlock);
380 X_UNLOCK (wrklock); 411 X_UNLOCK (wrklock);
381} 412}
382 413
414static void ecb_cold
383static void etp_atfork_child (void) 415etp_atfork_child (void)
384{ 416{
385 ETP_REQ *prv; 417 ETP_REQ *prv;
386 418
387 while ((prv = reqq_shift (&req_queue))) 419 while ((prv = reqq_shift (&req_queue)))
388 ETP_DESTROY (prv); 420 ETP_DESTROY (prv);
405 idle = 0; 437 idle = 0;
406 nreqs = 0; 438 nreqs = 0;
407 nready = 0; 439 nready = 0;
408 npending = 0; 440 npending = 0;
409 441
410 etp_atfork_parent (); 442 etp_thread_init ();
411} 443}
412 444
413static void 445static void ecb_cold
414etp_once_init (void) 446etp_once_init (void)
415{ 447{
448 etp_thread_init ();
416 X_THREAD_ATFORK (etp_atfork_prepare, etp_atfork_parent, etp_atfork_child); 449 X_THREAD_ATFORK (etp_atfork_prepare, etp_atfork_parent, etp_atfork_child);
417} 450}
418 451
419static int 452static int ecb_cold
420etp_init (void (*want_poll)(void), void (*done_poll)(void)) 453etp_init (void (*want_poll)(void), void (*done_poll)(void))
421{ 454{
422 static pthread_once_t doinit = PTHREAD_ONCE_INIT; 455 static pthread_once_t doinit = PTHREAD_ONCE_INIT;
423 456
424 pthread_once (&doinit, etp_once_init); 457 pthread_once (&doinit, etp_once_init);
429 return 0; 462 return 0;
430} 463}
431 464
432X_THREAD_PROC (etp_proc); 465X_THREAD_PROC (etp_proc);
433 466
467static void ecb_cold
434static void etp_start_thread (void) 468etp_start_thread (void)
435{ 469{
436 etp_worker *wrk = calloc (1, sizeof (etp_worker)); 470 etp_worker *wrk = calloc (1, sizeof (etp_worker));
437 471
438 /*TODO*/ 472 /*TODO*/
439 assert (("unable to allocate worker thread data", wrk)); 473 assert (("unable to allocate worker thread data", wrk));
452 free (wrk); 486 free (wrk);
453 487
454 X_UNLOCK (wrklock); 488 X_UNLOCK (wrklock);
455} 489}
456 490
491static void
457static void etp_maybe_start_thread (void) 492etp_maybe_start_thread (void)
458{ 493{
459 if (expect_true (etp_nthreads () >= wanted)) 494 if (ecb_expect_true (etp_nthreads () >= wanted))
460 return; 495 return;
461 496
462 /* todo: maybe use idle here, but might be less exact */ 497 /* todo: maybe use idle here, but might be less exact */
463 if (expect_true (0 <= (int)etp_nthreads () + (int)etp_npending () - (int)etp_nreqs ())) 498 if (ecb_expect_true (0 <= (int)etp_nthreads () + (int)etp_npending () - (int)etp_nreqs ()))
464 return; 499 return;
465 500
466 etp_start_thread (); 501 etp_start_thread ();
467} 502}
468 503
504static void ecb_cold
469static void etp_end_thread (void) 505etp_end_thread (void)
470{ 506{
471 eio_req *req = calloc (1, sizeof (eio_req)); 507 eio_req *req = calloc (1, sizeof (eio_req));
472 508
473 req->type = -1; 509 req->type = -1;
474 req->pri = ETP_PRI_MAX - ETP_PRI_MIN; 510 req->pri = ETP_PRI_MAX - ETP_PRI_MIN;
481 X_LOCK (wrklock); 517 X_LOCK (wrklock);
482 --started; 518 --started;
483 X_UNLOCK (wrklock); 519 X_UNLOCK (wrklock);
484} 520}
485 521
486static int etp_poll (void) 522static int
523etp_poll (void)
487{ 524{
488 unsigned int maxreqs; 525 unsigned int maxreqs;
489 unsigned int maxtime; 526 unsigned int maxtime;
490 struct timeval tv_start, tv_now; 527 struct timeval tv_start, tv_now;
491 528
521 558
522 X_LOCK (reqlock); 559 X_LOCK (reqlock);
523 --nreqs; 560 --nreqs;
524 X_UNLOCK (reqlock); 561 X_UNLOCK (reqlock);
525 562
526 if (expect_false (req->type == EIO_GROUP && req->size)) 563 if (ecb_expect_false (req->type == EIO_GROUP && req->size))
527 { 564 {
528 req->int1 = 1; /* mark request as delayed */ 565 req->int1 = 1; /* mark request as delayed */
529 continue; 566 continue;
530 } 567 }
531 else 568 else
532 { 569 {
533 int res = ETP_FINISH (req); 570 int res = ETP_FINISH (req);
534 if (expect_false (res)) 571 if (ecb_expect_false (res))
535 return res; 572 return res;
536 } 573 }
537 574
538 if (expect_false (maxreqs && !--maxreqs)) 575 if (ecb_expect_false (maxreqs && !--maxreqs))
539 break; 576 break;
540 577
541 if (maxtime) 578 if (maxtime)
542 { 579 {
543 gettimeofday (&tv_now, 0); 580 gettimeofday (&tv_now, 0);
549 586
550 errno = EAGAIN; 587 errno = EAGAIN;
551 return -1; 588 return -1;
552} 589}
553 590
591static void
554static void etp_cancel (ETP_REQ *req) 592etp_cancel (ETP_REQ *req)
555{ 593{
556 X_LOCK (wrklock); 594 req->cancelled = 1;
557 req->flags |= EIO_FLAG_CANCELLED;
558 X_UNLOCK (wrklock);
559 595
560 eio_grp_cancel (req); 596 eio_grp_cancel (req);
561} 597}
562 598
599static void
563static void etp_submit (ETP_REQ *req) 600etp_submit (ETP_REQ *req)
564{ 601{
565 req->pri -= ETP_PRI_MIN; 602 req->pri -= ETP_PRI_MIN;
566 603
567 if (expect_false (req->pri < ETP_PRI_MIN - ETP_PRI_MIN)) req->pri = ETP_PRI_MIN - ETP_PRI_MIN; 604 if (ecb_expect_false (req->pri < ETP_PRI_MIN - ETP_PRI_MIN)) req->pri = ETP_PRI_MIN - ETP_PRI_MIN;
568 if (expect_false (req->pri > ETP_PRI_MAX - ETP_PRI_MIN)) req->pri = ETP_PRI_MAX - ETP_PRI_MIN; 605 if (ecb_expect_false (req->pri > ETP_PRI_MAX - ETP_PRI_MIN)) req->pri = ETP_PRI_MAX - ETP_PRI_MIN;
569 606
570 if (expect_false (req->type == EIO_GROUP)) 607 if (ecb_expect_false (req->type == EIO_GROUP))
571 { 608 {
572 /* I hope this is worth it :/ */ 609 /* I hope this is worth it :/ */
573 X_LOCK (reqlock); 610 X_LOCK (reqlock);
574 ++nreqs; 611 ++nreqs;
575 X_UNLOCK (reqlock); 612 X_UNLOCK (reqlock);
594 631
595 etp_maybe_start_thread (); 632 etp_maybe_start_thread ();
596 } 633 }
597} 634}
598 635
636static void ecb_cold
599static void etp_set_max_poll_time (double nseconds) 637etp_set_max_poll_time (double nseconds)
600{ 638{
601 if (WORDACCESS_UNSAFE) X_LOCK (reslock); 639 if (WORDACCESS_UNSAFE) X_LOCK (reslock);
602 max_poll_time = nseconds; 640 max_poll_time = nseconds * EIO_TICKS;
603 if (WORDACCESS_UNSAFE) X_UNLOCK (reslock); 641 if (WORDACCESS_UNSAFE) X_UNLOCK (reslock);
604} 642}
605 643
644static void ecb_cold
606static void etp_set_max_poll_reqs (unsigned int maxreqs) 645etp_set_max_poll_reqs (unsigned int maxreqs)
607{ 646{
608 if (WORDACCESS_UNSAFE) X_LOCK (reslock); 647 if (WORDACCESS_UNSAFE) X_LOCK (reslock);
609 max_poll_reqs = maxreqs; 648 max_poll_reqs = maxreqs;
610 if (WORDACCESS_UNSAFE) X_UNLOCK (reslock); 649 if (WORDACCESS_UNSAFE) X_UNLOCK (reslock);
611} 650}
612 651
652static void ecb_cold
613static void etp_set_max_idle (unsigned int nthreads) 653etp_set_max_idle (unsigned int nthreads)
614{ 654{
615 if (WORDACCESS_UNSAFE) X_LOCK (reqlock); 655 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
616 max_idle = nthreads <= 0 ? 1 : nthreads; 656 max_idle = nthreads;
617 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); 657 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
618} 658}
619 659
660static void ecb_cold
661etp_set_idle_timeout (unsigned int seconds)
662{
663 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
664 idle_timeout = seconds;
665 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
666}
667
668static void ecb_cold
620static void etp_set_min_parallel (unsigned int nthreads) 669etp_set_min_parallel (unsigned int nthreads)
621{ 670{
622 if (wanted < nthreads) 671 if (wanted < nthreads)
623 wanted = nthreads; 672 wanted = nthreads;
624} 673}
625 674
675static void ecb_cold
626static void etp_set_max_parallel (unsigned int nthreads) 676etp_set_max_parallel (unsigned int nthreads)
627{ 677{
628 if (wanted > nthreads) 678 if (wanted > nthreads)
629 wanted = nthreads; 679 wanted = nthreads;
630 680
631 while (started > wanted) 681 while (started > wanted)
632 etp_end_thread (); 682 etp_end_thread ();
633} 683}
634 684
635/*****************************************************************************/ 685/*****************************************************************************/
636 686
687static void
637static void grp_try_feed (eio_req *grp) 688grp_try_feed (eio_req *grp)
638{ 689{
639 while (grp->size < grp->int2 && !EIO_CANCELLED (grp)) 690 while (grp->size < grp->int2 && !EIO_CANCELLED (grp))
640 { 691 {
641 grp->flags &= ~EIO_FLAG_GROUPADD; 692 grp->flags &= ~EIO_FLAG_GROUPADD;
642 693
649 break; 700 break;
650 } 701 }
651 } 702 }
652} 703}
653 704
705static int
654static int grp_dec (eio_req *grp) 706grp_dec (eio_req *grp)
655{ 707{
656 --grp->size; 708 --grp->size;
657 709
658 /* call feeder, if applicable */ 710 /* call feeder, if applicable */
659 grp_try_feed (grp); 711 grp_try_feed (grp);
663 return eio_finish (grp); 715 return eio_finish (grp);
664 else 716 else
665 return 0; 717 return 0;
666} 718}
667 719
720static void
668void eio_destroy (eio_req *req) 721eio_destroy (eio_req *req)
669{ 722{
670 if ((req)->flags & EIO_FLAG_PTR1_FREE) free (req->ptr1); 723 if ((req)->flags & EIO_FLAG_PTR1_FREE) free (req->ptr1);
671 if ((req)->flags & EIO_FLAG_PTR2_FREE) free (req->ptr2); 724 if ((req)->flags & EIO_FLAG_PTR2_FREE) free (req->ptr2);
672 725
673 EIO_DESTROY (req); 726 EIO_DESTROY (req);
674} 727}
675 728
729static int
676static int eio_finish (eio_req *req) 730eio_finish (eio_req *req)
677{ 731{
678 int res = EIO_FINISH (req); 732 int res = EIO_FINISH (req);
679 733
680 if (req->grp) 734 if (req->grp)
681 { 735 {
689 if (grp->grp_first == req) 743 if (grp->grp_first == req)
690 grp->grp_first = req->grp_next; 744 grp->grp_first = req->grp_next;
691 745
692 res2 = grp_dec (grp); 746 res2 = grp_dec (grp);
693 747
694 if (!res && res2) 748 if (!res)
695 res = res2; 749 res = res2;
696 } 750 }
697 751
698 eio_destroy (req); 752 eio_destroy (req);
699 753
700 return res; 754 return res;
701} 755}
702 756
757void
703void eio_grp_cancel (eio_req *grp) 758eio_grp_cancel (eio_req *grp)
704{ 759{
705 for (grp = grp->grp_first; grp; grp = grp->grp_next) 760 for (grp = grp->grp_first; grp; grp = grp->grp_next)
706 eio_cancel (grp); 761 eio_cancel (grp);
707} 762}
708 763
764void
709void eio_cancel (eio_req *req) 765eio_cancel (eio_req *req)
710{ 766{
711 etp_cancel (req); 767 etp_cancel (req);
712} 768}
713 769
770void
714void eio_submit (eio_req *req) 771eio_submit (eio_req *req)
715{ 772{
716 etp_submit (req); 773 etp_submit (req);
717} 774}
718 775
719unsigned int eio_nreqs (void) 776unsigned int
777eio_nreqs (void)
720{ 778{
721 return etp_nreqs (); 779 return etp_nreqs ();
722} 780}
723 781
724unsigned int eio_nready (void) 782unsigned int
783eio_nready (void)
725{ 784{
726 return etp_nready (); 785 return etp_nready ();
727} 786}
728 787
729unsigned int eio_npending (void) 788unsigned int
789eio_npending (void)
730{ 790{
731 return etp_npending (); 791 return etp_npending ();
732} 792}
733 793
734unsigned int eio_nthreads (void) 794unsigned int ecb_cold
795eio_nthreads (void)
735{ 796{
736 return etp_nthreads (); 797 return etp_nthreads ();
737} 798}
738 799
800void ecb_cold
739void eio_set_max_poll_time (double nseconds) 801eio_set_max_poll_time (double nseconds)
740{ 802{
741 etp_set_max_poll_time (nseconds); 803 etp_set_max_poll_time (nseconds);
742} 804}
743 805
806void ecb_cold
744void eio_set_max_poll_reqs (unsigned int maxreqs) 807eio_set_max_poll_reqs (unsigned int maxreqs)
745{ 808{
746 etp_set_max_poll_reqs (maxreqs); 809 etp_set_max_poll_reqs (maxreqs);
747} 810}
748 811
812void ecb_cold
749void eio_set_max_idle (unsigned int nthreads) 813eio_set_max_idle (unsigned int nthreads)
750{ 814{
751 etp_set_max_idle (nthreads); 815 etp_set_max_idle (nthreads);
752} 816}
753 817
818void ecb_cold
819eio_set_idle_timeout (unsigned int seconds)
820{
821 etp_set_idle_timeout (seconds);
822}
823
824void ecb_cold
754void eio_set_min_parallel (unsigned int nthreads) 825eio_set_min_parallel (unsigned int nthreads)
755{ 826{
756 etp_set_min_parallel (nthreads); 827 etp_set_min_parallel (nthreads);
757} 828}
758 829
830void ecb_cold
759void eio_set_max_parallel (unsigned int nthreads) 831eio_set_max_parallel (unsigned int nthreads)
760{ 832{
761 etp_set_max_parallel (nthreads); 833 etp_set_max_parallel (nthreads);
762} 834}
763 835
764int eio_poll (void) 836int eio_poll (void)
806 878
807 return res; 879 return res;
808} 880}
809#endif 881#endif
810 882
811#ifndef HAVE_FUTIMES 883#ifndef HAVE_UTIMES
812 884
813# undef utimes 885# undef utimes
814# undef futimes
815# define utimes(path,times) eio__utimes (path, times) 886# define utimes(path,times) eio__utimes (path, times)
816# define futimes(fd,times) eio__futimes (fd, times)
817 887
818static int 888static int
819eio__utimes (const char *filename, const struct timeval times[2]) 889eio__utimes (const char *filename, const struct timeval times[2])
820{ 890{
821 if (times) 891 if (times)
829 } 899 }
830 else 900 else
831 return utime (filename, 0); 901 return utime (filename, 0);
832} 902}
833 903
904#endif
905
906#ifndef HAVE_FUTIMES
907
908# undef futimes
909# define futimes(fd,times) eio__futimes (fd, times)
910
911static int
834static int eio__futimes (int fd, const struct timeval tv[2]) 912eio__futimes (int fd, const struct timeval tv[2])
835{ 913{
836 errno = ENOSYS; 914 errno = ENOSYS;
837 return -1; 915 return -1;
838} 916}
839 917
843# undef fdatasync 921# undef fdatasync
844# define fdatasync(fd) fsync (fd) 922# define fdatasync(fd) fsync (fd)
845#endif 923#endif
846 924
847/* sync_file_range always needs emulation */ 925/* sync_file_range always needs emulation */
848int 926static int
849eio__sync_file_range (int fd, off_t offset, size_t nbytes, unsigned int flags) 927eio__sync_file_range (int fd, off_t offset, size_t nbytes, unsigned int flags)
850{ 928{
851#if HAVE_SYNC_FILE_RANGE 929#if HAVE_SYNC_FILE_RANGE
852 int res; 930 int res;
853 931
866 if (!res || errno != ENOSYS) 944 if (!res || errno != ENOSYS)
867 return res; 945 return res;
868#endif 946#endif
869 947
870 /* even though we could play tricks with the flags, it's better to always 948 /* even though we could play tricks with the flags, it's better to always
871 * call fdatasync, as thta matches the expectation of it's users best */ 949 * call fdatasync, as that matches the expectation of its users best */
872 return fdatasync (fd); 950 return fdatasync (fd);
873} 951}
874 952
875#if !HAVE_READAHEAD 953#if !HAVE_READAHEAD
876# undef readahead 954# undef readahead
899 977
900/* sendfile always needs emulation */ 978/* sendfile always needs emulation */
901static ssize_t 979static ssize_t
902eio__sendfile (int ofd, int ifd, off_t offset, size_t count, etp_worker *self) 980eio__sendfile (int ofd, int ifd, off_t offset, size_t count, etp_worker *self)
903{ 981{
982 ssize_t written = 0;
904 ssize_t res; 983 ssize_t res;
905 984
906 if (!count) 985 if (!count)
907 return 0; 986 return 0;
908 987
988 for (;;)
989 {
990#ifdef __APPLE__
991# undef HAVE_SENDFILE /* broken, as everything on os x */
992#endif
909#if HAVE_SENDFILE 993#if HAVE_SENDFILE
910# if __linux 994# if __linux
995 off_t soffset = offset;
911 res = sendfile (ofd, ifd, &offset, count); 996 res = sendfile (ofd, ifd, &soffset, count);
912 997
913# elif __freebsd 998# elif __FreeBSD__
914 /* 999 /*
915 * Of course, the freebsd sendfile is a dire hack with no thoughts 1000 * Of course, the freebsd sendfile is a dire hack with no thoughts
916 * wasted on making it similar to other I/O functions. 1001 * wasted on making it similar to other I/O functions.
917 */ 1002 */
918 {
919 off_t sbytes; 1003 off_t sbytes;
920 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0); 1004 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0);
921 1005
922 if (res < 0 && sbytes) 1006 #if 0 /* according to the manpage, this is correct, but broken behaviour */
923 /* maybe only on EAGAIN: as usual, the manpage leaves you guessing */ 1007 /* freebsd' sendfile will return 0 on success */
1008 /* freebsd 8 documents it as only setting *sbytes on EINTR and EAGAIN, but */
1009 /* not on e.g. EIO or EPIPE - sounds broken */
1010 if ((res < 0 && (errno == EAGAIN || errno == EINTR) && sbytes) || res == 0)
924 res = sbytes; 1011 res = sbytes;
925 } 1012 #endif
1013
1014 /* according to source inspection, this is correct, and useful behaviour */
1015 if (sbytes)
1016 res = sbytes;
1017
1018# elif defined (__APPLE__)
1019 off_t sbytes = count;
1020 res = sendfile (ifd, ofd, offset, &sbytes, 0, 0);
1021
1022 /* according to the manpage, sbytes is always valid */
1023 if (sbytes)
1024 res = sbytes;
926 1025
927# elif __hpux 1026# elif __hpux
928 res = sendfile (ofd, ifd, offset, count, 0, 0); 1027 res = sendfile (ofd, ifd, offset, count, 0, 0);
929 1028
930# elif __solaris 1029# elif __solaris
931 {
932 struct sendfilevec vec; 1030 struct sendfilevec vec;
933 size_t sbytes; 1031 size_t sbytes;
934 1032
935 vec.sfv_fd = ifd; 1033 vec.sfv_fd = ifd;
936 vec.sfv_flag = 0; 1034 vec.sfv_flag = 0;
937 vec.sfv_off = offset; 1035 vec.sfv_off = offset;
938 vec.sfv_len = count; 1036 vec.sfv_len = count;
939 1037
940 res = sendfilev (ofd, &vec, 1, &sbytes); 1038 res = sendfilev (ofd, &vec, 1, &sbytes);
941 1039
942 if (res < 0 && sbytes) 1040 if (res < 0 && sbytes)
943 res = sbytes; 1041 res = sbytes;
944 }
945 1042
946# endif 1043# endif
1044
1045#elif defined (_WIN32)
1046 /* does not work, just for documentation of what would need to be done */
1047 /* actually, cannot be done like this, as TransmitFile changes the file offset, */
1048 /* libeio guarantees that the file offset does not change, and windows */
1049 /* has no way to get an independent handle to the same file description */
1050 HANDLE h = TO_SOCKET (ifd);
1051 SetFilePointer (h, offset, 0, FILE_BEGIN);
1052 res = TransmitFile (TO_SOCKET (ofd), h, count, 0, 0, 0, 0);
1053
947#else 1054#else
948 res = -1; 1055 res = -1;
949 errno = ENOSYS; 1056 errno = ENOSYS;
950#endif 1057#endif
951 1058
1059 /* we assume sendfile can copy at least 128mb in one go */
1060 if (res <= 128 * 1024 * 1024)
1061 {
1062 if (res > 0)
1063 written += res;
1064
1065 if (written)
1066 return written;
1067
1068 break;
1069 }
1070 else
1071 {
1072 /* if we requested more, then probably the kernel was lazy */
1073 written += res;
1074 offset += res;
1075 count -= res;
1076
1077 if (!count)
1078 return written;
1079 }
1080 }
1081
952 if (res < 0 1082 if (res < 0
953 && (errno == ENOSYS || errno == EINVAL || errno == ENOTSOCK 1083 && (errno == ENOSYS || errno == EINVAL || errno == ENOTSOCK
1084 /* BSDs */
1085#ifdef ENOTSUP /* sigh, if the steenking pile called openbsd would only try to at least compile posix code... */
1086 || errno == ENOTSUP
1087#endif
1088 || errno == EOPNOTSUPP /* BSDs */
954#if __solaris 1089#if __solaris
955 || errno == EAFNOSUPPORT || errno == EPROTOTYPE 1090 || errno == EAFNOSUPPORT || errno == EPROTOTYPE
956#endif 1091#endif
957 ) 1092 )
958 ) 1093 )
989 } 1124 }
990 1125
991 return res; 1126 return res;
992} 1127}
993 1128
1129#ifdef PAGESIZE
1130# define eio_pagesize() PAGESIZE
1131#else
1132static intptr_t
1133eio_pagesize (void)
1134{
1135 static intptr_t page;
1136
1137 if (!page)
1138 page = sysconf (_SC_PAGESIZE);
1139
1140 return page;
1141}
1142#endif
1143
1144static void
1145eio_page_align (void **addr, size_t *length)
1146{
1147 intptr_t mask = eio_pagesize () - 1;
1148
1149 /* round down addr */
1150 intptr_t adj = mask & (intptr_t)*addr;
1151
1152 *addr = (void *)((intptr_t)*addr - adj);
1153 *length += adj;
1154
1155 /* round up length */
1156 *length = (*length + mask) & ~mask;
1157}
1158
1159#if !_POSIX_MEMLOCK
1160# define eio__mlockall(a) ((errno = ENOSYS), -1)
1161#else
1162
994static int 1163static int
1164eio__mlockall (int flags)
1165{
1166 #if __GLIBC__ == 2 && __GLIBC_MINOR__ <= 7
1167 extern int mallopt (int, int);
1168 mallopt (-6, 238); /* http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=473812 */
1169 #endif
1170
1171 if (EIO_MCL_CURRENT != MCL_CURRENT
1172 || EIO_MCL_FUTURE != MCL_FUTURE)
1173 {
1174 flags = 0
1175 | (flags & EIO_MCL_CURRENT ? MCL_CURRENT : 0)
1176 | (flags & EIO_MCL_FUTURE ? MCL_FUTURE : 0);
1177 }
1178
1179 return mlockall (flags);
1180}
1181#endif
1182
1183#if !_POSIX_MEMLOCK_RANGE
1184# define eio__mlock(a,b) ((errno = ENOSYS), -1)
1185#else
1186
1187static int
1188eio__mlock (void *addr, size_t length)
1189{
1190 eio_page_align (&addr, &length);
1191
1192 return mlock (addr, length);
1193}
1194
1195#endif
1196
1197#if !(_POSIX_MAPPED_FILES && _POSIX_SYNCHRONIZED_IO)
1198# define eio__msync(a,b,c) ((errno = ENOSYS), -1)
1199#else
1200
1201static int
1202eio__msync (void *mem, size_t len, int flags)
1203{
1204 eio_page_align (&mem, &len);
1205
1206 if (EIO_MS_ASYNC != MS_SYNC
1207 || EIO_MS_INVALIDATE != MS_INVALIDATE
1208 || EIO_MS_SYNC != MS_SYNC)
1209 {
1210 flags = 0
1211 | (flags & EIO_MS_ASYNC ? MS_ASYNC : 0)
1212 | (flags & EIO_MS_INVALIDATE ? MS_INVALIDATE : 0)
1213 | (flags & EIO_MS_SYNC ? MS_SYNC : 0);
1214 }
1215
1216 return msync (mem, len, flags);
1217}
1218
1219#endif
1220
1221static int
1222eio__mtouch (eio_req *req)
1223{
1224 void *mem = req->ptr2;
1225 size_t len = req->size;
1226 int flags = req->int1;
1227
1228 eio_page_align (&mem, &len);
1229
1230 {
1231 intptr_t addr = (intptr_t)mem;
1232 intptr_t end = addr + len;
1233 intptr_t page = eio_pagesize ();
1234
1235 if (addr < end)
1236 if (flags & EIO_MT_MODIFY) /* modify */
1237 do { *((volatile sig_atomic_t *)addr) |= 0; } while ((addr += page) < len && !EIO_CANCELLED (req));
1238 else
1239 do { *((volatile sig_atomic_t *)addr) ; } while ((addr += page) < len && !EIO_CANCELLED (req));
1240 }
1241
1242 return 0;
1243}
1244
1245/*****************************************************************************/
1246/* requests implemented outside eio_execute, because they are so large */
1247
1248static void
1249eio__realpath (eio_req *req, etp_worker *self)
1250{
1251 char *rel = req->ptr1;
1252 char *res;
1253 char *tmp1, *tmp2;
1254#if SYMLOOP_MAX > 32
1255 int symlinks = SYMLOOP_MAX;
1256#else
1257 int symlinks = 32;
1258#endif
1259
1260 req->result = -1;
1261
1262 errno = EINVAL;
1263 if (!rel)
1264 return;
1265
1266 errno = ENOENT;
1267 if (!*rel)
1268 return;
1269
1270 if (!req->ptr2)
1271 {
1272 X_LOCK (wrklock);
1273 req->flags |= EIO_FLAG_PTR2_FREE;
1274 X_UNLOCK (wrklock);
1275 req->ptr2 = malloc (PATH_MAX * 3);
1276
1277 errno = ENOMEM;
1278 if (!req->ptr2)
1279 return;
1280 }
1281
1282 res = req->ptr2;
1283 tmp1 = res + PATH_MAX;
1284 tmp2 = tmp1 + PATH_MAX;
1285
1286#if 0 /* disabled, the musl way to do things is just too racy */
1287#if __linux && defined(O_NONBLOCK) && defined(O_NOATIME)
1288 /* on linux we may be able to ask the kernel */
1289 {
1290 int fd = open (rel, O_RDONLY | O_NONBLOCK | O_NOCTTY | O_NOATIME);
1291
1292 if (fd >= 0)
1293 {
1294 sprintf (tmp1, "/proc/self/fd/%d", fd);
1295 req->result = readlink (tmp1, res, PATH_MAX);
1296 close (fd);
1297
1298 /* here we should probably stat the open file and the disk file, to make sure they still match */
1299
1300 if (req->result > 0)
1301 goto done;
1302 }
1303 else if (errno == ELOOP || errno == ENAMETOOLONG || errno == ENOENT || errno == ENOTDIR || errno == EIO)
1304 return;
1305 }
1306#endif
1307#endif
1308
1309 if (*rel != '/')
1310 {
1311 if (!getcwd (res, PATH_MAX))
1312 return;
1313
1314 if (res [1]) /* only use if not / */
1315 res += strlen (res);
1316 }
1317
1318 while (*rel)
1319 {
1320 ssize_t len, linklen;
1321 char *beg = rel;
1322
1323 while (*rel && *rel != '/')
1324 ++rel;
1325
1326 len = rel - beg;
1327
1328 if (!len) /* skip slashes */
1329 {
1330 ++rel;
1331 continue;
1332 }
1333
1334 if (beg [0] == '.')
1335 {
1336 if (len == 1)
1337 continue; /* . - nop */
1338
1339 if (beg [1] == '.' && len == 2)
1340 {
1341 /* .. - back up one component, if possible */
1342
1343 while (res != req->ptr2)
1344 if (*--res == '/')
1345 break;
1346
1347 continue;
1348 }
1349 }
1350
1351 errno = ENAMETOOLONG;
1352 if (res + 1 + len + 1 >= tmp1)
1353 return;
1354
1355 /* copy one component */
1356 *res = '/';
1357 memcpy (res + 1, beg, len);
1358
1359 /* zero-terminate, for readlink */
1360 res [len + 1] = 0;
1361
1362 /* now check if it's a symlink */
1363 linklen = readlink (req->ptr2, tmp1, PATH_MAX);
1364
1365 if (linklen < 0)
1366 {
1367 if (errno != EINVAL)
1368 return;
1369
1370 /* it's a normal directory. hopefully */
1371 res += len + 1;
1372 }
1373 else
1374 {
1375 /* yay, it was a symlink - build new path in tmp2 */
1376 int rellen = strlen (rel);
1377
1378 errno = ENAMETOOLONG;
1379 if (linklen + 1 + rellen >= PATH_MAX)
1380 return;
1381
1382 errno = ELOOP;
1383 if (!--symlinks)
1384 return;
1385
1386 if (*tmp1 == '/')
1387 res = req->ptr2; /* symlink resolves to an absolute path */
1388
1389 /* we need to be careful, as rel might point into tmp2 already */
1390 memmove (tmp2 + linklen + 1, rel, rellen + 1);
1391 tmp2 [linklen] = '/';
1392 memcpy (tmp2, tmp1, linklen);
1393
1394 rel = tmp2;
1395 }
1396 }
1397
1398 /* special case for the lone root path */
1399 if (res == req->ptr2)
1400 *res++ = '/';
1401
1402 req->result = res - (char *)req->ptr2;
1403
1404done:
1405 req->ptr2 = realloc (req->ptr2, req->result); /* trade time for space savings */
1406}
1407
1408static signed char
995eio_dent_cmp (const void *a_, const void *b_) 1409eio_dent_cmp (const eio_dirent *a, const eio_dirent *b)
996{ 1410{
997 const eio_dirent *a = (const eio_dirent *)a_; 1411 return a->score - b->score ? a->score - b->score /* works because our signed char is always 0..100 */
998 const eio_dirent *b = (const eio_dirent *)b_; 1412 : a->inode < b->inode ? -1
1413 : a->inode > b->inode ? 1
1414 : 0;
1415}
999 1416
1000 return (int)b->score - (int)a->score ? (int)b->score - (int)a->score 1417#define EIO_DENT_CMP(i,op,j) eio_dent_cmp (&i, &j) op 0
1001 : a->inode < b->inode ? -1 : a->inode > b->inode ? 1 : 0; /* int might be < ino_t */ 1418
1419#define EIO_SORT_CUTOFF 30 /* quite high, but performs well on many filesystems */
1420#define EIO_SORT_FAST 60 /* when to only use insertion sort */
1421
1422static void
1423eio_dent_radix_sort (eio_dirent *dents, int size, signed char score_bits, ino_t inode_bits)
1424{
1425 unsigned char bits [9 + sizeof (ino_t) * 8];
1426 unsigned char *bit = bits;
1427
1428 assert (CHAR_BIT == 8);
1429 assert (sizeof (eio_dirent) * 8 < 256);
1430 assert (offsetof (eio_dirent, inode)); /* we use bit #0 as sentinel */
1431 assert (offsetof (eio_dirent, score)); /* we use bit #0 as sentinel */
1432
1433 if (size <= EIO_SORT_FAST)
1434 return;
1435
1436 /* first prepare an array of bits to test in our radix sort */
1437 /* try to take endianness into account, as well as differences in ino_t sizes */
1438 /* inode_bits must contain all inodes ORed together */
1439 /* which is used to skip bits that are 0 everywhere, which is very common */
1440 {
1441 ino_t endianness;
1442 int i, j;
1443
1444 /* we store the byte offset of byte n into byte n of "endianness" */
1445 for (i = 0; i < sizeof (ino_t); ++i)
1446 ((unsigned char *)&endianness)[i] = i;
1447
1448 *bit++ = 0;
1449
1450 for (i = 0; i < sizeof (ino_t); ++i)
1451 {
1452 /* shifting off the byte offsets out of "endianness" */
1453 int offs = (offsetof (eio_dirent, inode) + (endianness & 0xff)) * 8;
1454 endianness >>= 8;
1455
1456 for (j = 0; j < 8; ++j)
1457 if (inode_bits & (((ino_t)1) << (i * 8 + j)))
1458 *bit++ = offs + j;
1459 }
1460
1461 for (j = 0; j < 8; ++j)
1462 if (score_bits & (1 << j))
1463 *bit++ = offsetof (eio_dirent, score) * 8 + j;
1464 }
1465
1466 /* now actually do the sorting (a variant of MSD radix sort) */
1467 {
1468 eio_dirent *base_stk [9 + sizeof (ino_t) * 8], *base;
1469 eio_dirent *end_stk [9 + sizeof (ino_t) * 8], *end;
1470 unsigned char *bit_stk [9 + sizeof (ino_t) * 8];
1471 int stk_idx = 0;
1472
1473 base_stk [stk_idx] = dents;
1474 end_stk [stk_idx] = dents + size;
1475 bit_stk [stk_idx] = bit - 1;
1476
1477 do
1478 {
1479 base = base_stk [stk_idx];
1480 end = end_stk [stk_idx];
1481 bit = bit_stk [stk_idx];
1482
1483 for (;;)
1484 {
1485 unsigned char O = *bit >> 3;
1486 unsigned char M = 1 << (*bit & 7);
1487
1488 eio_dirent *a = base;
1489 eio_dirent *b = end;
1490
1491 if (b - a < EIO_SORT_CUTOFF)
1492 break;
1493
1494 /* now bit-partition the array on the bit */
1495 /* this ugly asymmetric loop seems to perform much better than typical */
1496 /* partition algos found in the literature */
1497 do
1498 if (!(((unsigned char *)a)[O] & M))
1499 ++a;
1500 else if (!(((unsigned char *)--b)[O] & M))
1501 {
1502 eio_dirent tmp = *a; *a = *b; *b = tmp;
1503 ++a;
1504 }
1505 while (b > a);
1506
1507 /* next bit, or stop, if no bits left in this path */
1508 if (!*--bit)
1509 break;
1510
1511 base_stk [stk_idx] = a;
1512 end_stk [stk_idx] = end;
1513 bit_stk [stk_idx] = bit;
1514 ++stk_idx;
1515
1516 end = a;
1517 }
1518 }
1519 while (stk_idx--);
1520 }
1521}
1522
1523static void
1524eio_dent_insertion_sort (eio_dirent *dents, int size)
1525{
1526 /* first move the smallest element to the front, to act as a sentinel */
1527 {
1528 int i;
1529 eio_dirent *min = dents;
1530
1531 /* the radix pre-pass ensures that the minimum element is in the first EIO_SORT_CUTOFF + 1 elements */
1532 for (i = size > EIO_SORT_FAST ? EIO_SORT_CUTOFF + 1 : size; --i; )
1533 if (EIO_DENT_CMP (dents [i], <, *min))
1534 min = &dents [i];
1535
1536 /* swap elements 0 and j (minimum) */
1537 {
1538 eio_dirent tmp = *dents; *dents = *min; *min = tmp;
1539 }
1540 }
1541
1542 /* then do standard insertion sort, assuming that all elements are >= dents [0] */
1543 {
1544 eio_dirent *i, *j;
1545
1546 for (i = dents + 1; i < dents + size; ++i)
1547 {
1548 eio_dirent value = *i;
1549
1550 for (j = i - 1; EIO_DENT_CMP (*j, >, value); --j)
1551 j [1] = j [0];
1552
1553 j [1] = value;
1554 }
1555 }
1556}
1557
1558static void
1559eio_dent_sort (eio_dirent *dents, int size, signed char score_bits, ino_t inode_bits)
1560{
1561 if (size <= 1)
1562 return; /* our insertion sort relies on size > 0 */
1563
1564 /* first we use a radix sort, but only for dirs >= EIO_SORT_FAST */
1565 /* and stop sorting when the partitions are <= EIO_SORT_CUTOFF */
1566 eio_dent_radix_sort (dents, size, score_bits, inode_bits);
1567
1568 /* use an insertion sort at the end, or for small arrays, */
1569 /* as insertion sort is more efficient for small partitions */
1570 eio_dent_insertion_sort (dents, size);
1002} 1571}
1003 1572
1004/* read a full directory */ 1573/* read a full directory */
1005static void 1574static void
1006eio__scandir (eio_req *req, etp_worker *self) 1575eio__scandir (eio_req *req, etp_worker *self)
1007{ 1576{
1008 DIR *dirp; 1577 DIR *dirp;
1009 EIO_STRUCT_DIRENT *entp; 1578 EIO_STRUCT_DIRENT *entp;
1010 unsigned char *name, *names; 1579 char *name, *names;
1011 int namesalloc = 4096; 1580 int namesalloc = 4096;
1012 int namesoffs = 0; 1581 int namesoffs = 0;
1013 int flags = req->int1; 1582 int flags = req->int1;
1014 eio_dirent *dents = 0; 1583 eio_dirent *dents = 0;
1015 int dentalloc = 128; 1584 int dentalloc = 128;
1016 int dentoffs = 0; 1585 int dentoffs = 0;
1586 ino_t inode_bits = 0;
1017 1587
1018 req->result = -1; 1588 req->result = -1;
1019 1589
1020 if (!(flags & EIO_READDIR_DENTS)) 1590 if (!(flags & EIO_READDIR_DENTS))
1021 flags &= ~(EIO_READDIR_DIRS_FIRST | EIO_READDIR_STAT_ORDER); 1591 flags &= ~(EIO_READDIR_DIRS_FIRST | EIO_READDIR_STAT_ORDER);
1022 1592
1023 X_LOCK (wrklock); 1593 X_LOCK (wrklock);
1024 /* the corresponding closedir is in ETP_WORKER_CLEAR */ 1594 /* the corresponding closedir is in ETP_WORKER_CLEAR */
1025 self->dirp = dirp = opendir (req->ptr1); 1595 self->dirp = dirp = opendir (req->ptr1);
1596
1597 if (req->flags & EIO_FLAG_PTR1_FREE)
1598 free (req->ptr1);
1599
1026 req->flags |= EIO_FLAG_PTR1_FREE | EIO_FLAG_PTR2_FREE; 1600 req->flags |= EIO_FLAG_PTR1_FREE | EIO_FLAG_PTR2_FREE;
1027 req->ptr1 = names = malloc (namesalloc);
1028 req->ptr2 = dents = flags ? malloc (dentalloc * sizeof (eio_dirent)) : 0; 1601 req->ptr1 = dents = flags ? malloc (dentalloc * sizeof (eio_dirent)) : 0;
1602 req->ptr2 = names = malloc (namesalloc);
1029 X_UNLOCK (wrklock); 1603 X_UNLOCK (wrklock);
1030 1604
1031 if (dirp && names && (!flags || dents)) 1605 if (dirp && names && (!flags || dents))
1032 for (;;) 1606 for (;;)
1033 { 1607 {
1041 1615
1042 /* sort etc. */ 1616 /* sort etc. */
1043 req->int1 = flags; 1617 req->int1 = flags;
1044 req->result = dentoffs; 1618 req->result = dentoffs;
1045 1619
1046 if (dents)
1047 {
1048 eio_dirent *ent = dents + dentoffs;
1049
1050 while (ent > dents)
1051 (--ent)->name = names + (size_t)ent->name;
1052 }
1053
1054 if (flags & EIO_READDIR_STAT_ORDER 1620 if (flags & EIO_READDIR_STAT_ORDER)
1055 || !(~flags & (EIO_READDIR_DIRS_FIRST | EIO_READDIR_FOUND_UNKNOWN))) 1621 eio_dent_sort (dents, dentoffs, flags & EIO_READDIR_DIRS_FIRST ? 7 : 0, inode_bits);
1056 {
1057 /* pray your qsort doesn't use quicksort */
1058 qsort (dents, dentoffs, sizeof (*dents), eio_dent_cmp); /* score depends of DIRS_FIRST */
1059 }
1060 else if (flags & EIO_READDIR_DIRS_FIRST) 1622 else if (flags & EIO_READDIR_DIRS_FIRST)
1623 if (flags & EIO_READDIR_FOUND_UNKNOWN)
1624 eio_dent_sort (dents, dentoffs, 7, inode_bits); /* sort by score and inode */
1625 else
1061 { 1626 {
1062 /* in this case, all is known, and we just put dirs first and sort them */ 1627 /* in this case, all is known, and we just put dirs first and sort them */
1063 eio_dirent *ent = dents + dentoffs; 1628 eio_dirent *oth = dents + dentoffs;
1064 eio_dirent *dir = dents; 1629 eio_dirent *dir = dents;
1065 1630
1631 /* now partition dirs to the front, and non-dirs to the back */
1632 /* by walking from both sides and swapping if necessary */
1066 while (ent > dir) 1633 while (oth > dir)
1067 { 1634 {
1068 if (dir->type == DT_DIR) 1635 if (dir->type == EIO_DT_DIR)
1069 ++dir; 1636 ++dir;
1070 else
1071 {
1072 --ent;
1073
1074 if (ent->type == DT_DIR) 1637 else if ((--oth)->type == EIO_DT_DIR)
1075 { 1638 {
1076 eio_dirent tmp = *dir; 1639 eio_dirent tmp = *dir; *dir = *oth; *oth = tmp;
1077 *dir = *ent;
1078 *ent = tmp;
1079 1640
1080 ++dir; 1641 ++dir;
1081 } 1642 }
1082 } 1643 }
1644
1645 /* now sort the dirs only (dirs all have the same score) */
1646 eio_dent_sort (dents, dir - dents, 0, inode_bits);
1083 } 1647 }
1084
1085 /* now sort the dirs only */
1086 qsort (dents, dir - dents, sizeof (*dents), eio_dent_cmp);
1087 }
1088
1089 /* only provide the names array unless DENTS is specified */
1090 if (!(flags & EIO_READDIR_DENTS))
1091 {
1092 X_LOCK (wrklock);
1093 assert (!dents);
1094 req->ptr1 = 0;
1095 req->ptr2 = names;
1096 X_UNLOCK (wrklock);
1097 }
1098 1648
1099 break; 1649 break;
1100 } 1650 }
1101 1651
1102 /* now add the entry to our list(s) */ 1652 /* now add the entry to our list(s) */
1105 /* skip . and .. entries */ 1655 /* skip . and .. entries */
1106 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2]))) 1656 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
1107 { 1657 {
1108 int len = D_NAMLEN (entp) + 1; 1658 int len = D_NAMLEN (entp) + 1;
1109 1659
1110 while (expect_false (namesoffs + len > namesalloc)) 1660 while (ecb_expect_false (namesoffs + len > namesalloc))
1111 { 1661 {
1112 namesalloc *= 2; 1662 namesalloc *= 2;
1113 X_LOCK (wrklock); 1663 X_LOCK (wrklock);
1114 req->ptr1 = names = realloc (names, namesalloc); 1664 req->ptr2 = names = realloc (names, namesalloc);
1115 X_UNLOCK (wrklock); 1665 X_UNLOCK (wrklock);
1116 1666
1117 if (!names) 1667 if (!names)
1118 break; 1668 break;
1119 } 1669 }
1122 1672
1123 if (dents) 1673 if (dents)
1124 { 1674 {
1125 struct eio_dirent *ent; 1675 struct eio_dirent *ent;
1126 1676
1127 if (expect_false (dentoffs == dentalloc)) 1677 if (ecb_expect_false (dentoffs == dentalloc))
1128 { 1678 {
1129 dentalloc *= 2; 1679 dentalloc *= 2;
1130 X_LOCK (wrklock); 1680 X_LOCK (wrklock);
1131 req->ptr2 = dents = realloc (dents, dentalloc * sizeof (eio_dirent)); 1681 req->ptr1 = dents = realloc (dents, dentalloc * sizeof (eio_dirent));
1132 X_UNLOCK (wrklock); 1682 X_UNLOCK (wrklock);
1133 1683
1134 if (!dents) 1684 if (!dents)
1135 break; 1685 break;
1136 } 1686 }
1137 1687
1138 ent = dents + dentoffs; 1688 ent = dents + dentoffs;
1139 1689
1140 ent->name = (char *)(size_t)namesoffs; /* rather dirtily we store the offset in the pointer */ 1690 ent->nameofs = namesoffs; /* rather dirtily we store the offset in the pointer */
1141 ent->namelen = len - 1; 1691 ent->namelen = len - 1;
1142 ent->inode = D_INO (entp); 1692 ent->inode = D_INO (entp);
1693
1694 inode_bits |= ent->inode;
1143 1695
1144 switch (D_TYPE (entp)) 1696 switch (D_TYPE (entp))
1145 { 1697 {
1146 default: 1698 default:
1147 ent->type = EIO_DT_UNKNOWN; 1699 ent->type = EIO_DT_UNKNOWN;
1190 #ifdef DT_WHT 1742 #ifdef DT_WHT
1191 case DT_WHT: ent->type = EIO_DT_WHT; break; 1743 case DT_WHT: ent->type = EIO_DT_WHT; break;
1192 #endif 1744 #endif
1193 } 1745 }
1194 1746
1195 ent->score = 0; 1747 ent->score = 7;
1196 1748
1197 if (flags & EIO_READDIR_DIRS_FIRST) 1749 if (flags & EIO_READDIR_DIRS_FIRST)
1198 { 1750 {
1199 if (ent->type == EIO_DT_UNKNOWN) 1751 if (ent->type == EIO_DT_UNKNOWN)
1200 { 1752 {
1201 if (*name == '.') /* leading dots are likely directories, and, in any case, rare */ 1753 if (*name == '.') /* leading dots are likely directories, and, in any case, rare */
1202 ent->score = 98; 1754 ent->score = 1;
1203 else if (!strchr (name, '.')) /* absense of dots indicate likely dirs */ 1755 else if (!strchr (name, '.')) /* absense of dots indicate likely dirs */
1204 ent->score = len <= 2 ? len + 60 : len <= 4 ? 50 : len <= 7 ? 40 : 10; /* shorter == more likely dir, but avoid too many classes */ 1756 ent->score = len <= 2 ? 4 - len : len <= 4 ? 4 : len <= 7 ? 5 : 6; /* shorter == more likely dir, but avoid too many classes */
1205 } 1757 }
1206 else if (ent->type == EIO_DT_DIR) 1758 else if (ent->type == EIO_DT_DIR)
1207 ent->score = 100; 1759 ent->score = 0;
1208 } 1760 }
1209 } 1761 }
1210 1762
1211 namesoffs += len; 1763 namesoffs += len;
1212 ++dentoffs; 1764 ++dentoffs;
1213 } 1765 }
1766
1767 if (EIO_CANCELLED (req))
1768 {
1769 errno = ECANCELED;
1770 break;
1771 }
1214 } 1772 }
1215 else
1216 req->result = -1;
1217}
1218
1219#if !(_POSIX_MAPPED_FILES && _POSIX_SYNCHRONIZED_IO)
1220# undef msync
1221# define msync(a,b,c) ((errno = ENOSYS), -1)
1222#endif
1223
1224int
1225eio__mtouch (void *mem, size_t len, int flags)
1226{
1227 intptr_t addr = (intptr_t)mem;
1228 intptr_t end = addr + len;
1229#ifdef PAGESIZE
1230 const intptr_t page = PAGESIZE;
1231#else
1232 static intptr_t page;
1233
1234 if (!page)
1235 page = sysconf (_SC_PAGESIZE);
1236#endif
1237
1238 addr &= ~(page - 1); /* assume page size is always a power of two */
1239
1240 if (addr < end)
1241 if (flags) /* modify */
1242 do { *((volatile sig_atomic_t *)addr) |= 0; } while ((addr += page) < len);
1243 else
1244 do { *((volatile sig_atomic_t *)addr) ; } while ((addr += page) < len);
1245
1246 return 0;
1247} 1773}
1248 1774
1249/*****************************************************************************/ 1775/*****************************************************************************/
1250 1776
1251#define ALLOC(len) \ 1777#define ALLOC(len) \
1283 if (req) 1809 if (req)
1284 break; 1810 break;
1285 1811
1286 ++idle; 1812 ++idle;
1287 1813
1288 ts.tv_sec = time (0) + IDLE_TIMEOUT; 1814 ts.tv_sec = time (0) + idle_timeout;
1289 if (X_COND_TIMEDWAIT (reqwait, reqlock, ts) == ETIMEDOUT) 1815 if (X_COND_TIMEDWAIT (reqwait, reqlock, ts) == ETIMEDOUT)
1290 { 1816 {
1291 if (idle > max_idle) 1817 if (idle > max_idle)
1292 { 1818 {
1293 --idle; 1819 --idle;
1310 X_UNLOCK (reqlock); 1836 X_UNLOCK (reqlock);
1311 1837
1312 if (req->type < 0) 1838 if (req->type < 0)
1313 goto quit; 1839 goto quit;
1314 1840
1315 if (!EIO_CANCELLED (req))
1316 ETP_EXECUTE (self, req); 1841 ETP_EXECUTE (self, req);
1317 1842
1318 X_LOCK (reslock); 1843 X_LOCK (reslock);
1319 1844
1320 ++npending; 1845 ++npending;
1321 1846
1336 return 0; 1861 return 0;
1337} 1862}
1338 1863
1339/*****************************************************************************/ 1864/*****************************************************************************/
1340 1865
1866int ecb_cold
1341int eio_init (void (*want_poll)(void), void (*done_poll)(void)) 1867eio_init (void (*want_poll)(void), void (*done_poll)(void))
1342{ 1868{
1343 return etp_init (want_poll, done_poll); 1869 return etp_init (want_poll, done_poll);
1344} 1870}
1345 1871
1872ecb_inline void
1346static void eio_api_destroy (eio_req *req) 1873eio_api_destroy (eio_req *req)
1347{ 1874{
1348 free (req); 1875 free (req);
1349} 1876}
1350 1877
1351#define REQ(rtype) \ 1878#define REQ(rtype) \
1370 { \ 1897 { \
1371 eio_api_destroy (req); \ 1898 eio_api_destroy (req); \
1372 return 0; \ 1899 return 0; \
1373 } 1900 }
1374 1901
1902static void
1375static void eio_execute (etp_worker *self, eio_req *req) 1903eio_execute (etp_worker *self, eio_req *req)
1376{ 1904{
1377 errno = 0; 1905 if (ecb_expect_false (EIO_CANCELLED (req)))
1906 {
1907 req->result = -1;
1908 req->errorno = ECANCELED;
1909 return;
1910 }
1378 1911
1379 switch (req->type) 1912 switch (req->type)
1380 { 1913 {
1381 case EIO_READ: ALLOC (req->size); 1914 case EIO_READ: ALLOC (req->size);
1382 req->result = req->offs >= 0 1915 req->result = req->offs >= 0
1394 case EIO_LSTAT: ALLOC (sizeof (EIO_STRUCT_STAT)); 1927 case EIO_LSTAT: ALLOC (sizeof (EIO_STRUCT_STAT));
1395 req->result = lstat (req->ptr1, (EIO_STRUCT_STAT *)req->ptr2); break; 1928 req->result = lstat (req->ptr1, (EIO_STRUCT_STAT *)req->ptr2); break;
1396 case EIO_FSTAT: ALLOC (sizeof (EIO_STRUCT_STAT)); 1929 case EIO_FSTAT: ALLOC (sizeof (EIO_STRUCT_STAT));
1397 req->result = fstat (req->int1, (EIO_STRUCT_STAT *)req->ptr2); break; 1930 req->result = fstat (req->int1, (EIO_STRUCT_STAT *)req->ptr2); break;
1398 1931
1932 case EIO_STATVFS: ALLOC (sizeof (EIO_STRUCT_STATVFS));
1933 req->result = statvfs (req->ptr1, (EIO_STRUCT_STATVFS *)req->ptr2); break;
1934 case EIO_FSTATVFS: ALLOC (sizeof (EIO_STRUCT_STATVFS));
1935 req->result = fstatvfs (req->int1, (EIO_STRUCT_STATVFS *)req->ptr2); break;
1936
1399 case EIO_CHOWN: req->result = chown (req->ptr1, req->int2, req->int3); break; 1937 case EIO_CHOWN: req->result = chown (req->ptr1, req->int2, req->int3); break;
1400 case EIO_FCHOWN: req->result = fchown (req->int1, req->int2, req->int3); break; 1938 case EIO_FCHOWN: req->result = fchown (req->int1, req->int2, req->int3); break;
1401 case EIO_CHMOD: req->result = chmod (req->ptr1, (mode_t)req->int2); break; 1939 case EIO_CHMOD: req->result = chmod (req->ptr1, (mode_t)req->int2); break;
1402 case EIO_FCHMOD: req->result = fchmod (req->int1, (mode_t)req->int2); break; 1940 case EIO_FCHMOD: req->result = fchmod (req->int1, (mode_t)req->int2); break;
1403 case EIO_TRUNCATE: req->result = truncate (req->ptr1, req->offs); break; 1941 case EIO_TRUNCATE: req->result = truncate (req->ptr1, req->offs); break;
1410 case EIO_RMDIR: req->result = rmdir (req->ptr1); break; 1948 case EIO_RMDIR: req->result = rmdir (req->ptr1); break;
1411 case EIO_MKDIR: req->result = mkdir (req->ptr1, (mode_t)req->int2); break; 1949 case EIO_MKDIR: req->result = mkdir (req->ptr1, (mode_t)req->int2); break;
1412 case EIO_RENAME: req->result = rename (req->ptr1, req->ptr2); break; 1950 case EIO_RENAME: req->result = rename (req->ptr1, req->ptr2); break;
1413 case EIO_LINK: req->result = link (req->ptr1, req->ptr2); break; 1951 case EIO_LINK: req->result = link (req->ptr1, req->ptr2); break;
1414 case EIO_SYMLINK: req->result = symlink (req->ptr1, req->ptr2); break; 1952 case EIO_SYMLINK: req->result = symlink (req->ptr1, req->ptr2); break;
1415 case EIO_MKNOD: req->result = mknod (req->ptr1, (mode_t)req->int2, (dev_t)req->int3); break; 1953 case EIO_MKNOD: req->result = mknod (req->ptr1, (mode_t)req->int2, (dev_t)req->offs); break;
1416 1954
1955 case EIO_REALPATH: eio__realpath (req, self); break;
1956
1417 case EIO_READLINK: ALLOC (NAME_MAX); 1957 case EIO_READLINK: ALLOC (PATH_MAX);
1418 req->result = readlink (req->ptr1, req->ptr2, NAME_MAX); break; 1958 req->result = readlink (req->ptr1, req->ptr2, PATH_MAX); break;
1419 1959
1420 case EIO_SYNC: req->result = 0; sync (); break; 1960 case EIO_SYNC: req->result = 0; sync (); break;
1421 case EIO_FSYNC: req->result = fsync (req->int1); break; 1961 case EIO_FSYNC: req->result = fsync (req->int1); break;
1422 case EIO_FDATASYNC: req->result = fdatasync (req->int1); break; 1962 case EIO_FDATASYNC: req->result = fdatasync (req->int1); break;
1423 case EIO_MSYNC: req->result = msync (req->ptr2, req->size, req->int1); break; 1963 case EIO_MSYNC: req->result = eio__msync (req->ptr2, req->size, req->int1); break;
1964 case EIO_MTOUCH: req->result = eio__mtouch (req); break;
1424 case EIO_MTOUCH: req->result = eio__mtouch (req->ptr2, req->size, req->int1); break; 1965 case EIO_MLOCK: req->result = eio__mlock (req->ptr2, req->size); break;
1966 case EIO_MLOCKALL: req->result = eio__mlockall (req->int1); break;
1425 case EIO_SYNC_FILE_RANGE: req->result = eio__sync_file_range (req->int1, req->offs, req->size, req->int2); break; 1967 case EIO_SYNC_FILE_RANGE: req->result = eio__sync_file_range (req->int1, req->offs, req->size, req->int2); break;
1426 1968
1427 case EIO_READDIR: eio__scandir (req, self); break; 1969 case EIO_READDIR: eio__scandir (req, self); break;
1428 1970
1429 case EIO_BUSY: 1971 case EIO_BUSY:
1430#ifdef _WIN32 1972#ifdef _WIN32
1431 Sleep (req->nv1 * 1000.); 1973 Sleep (req->nv1 * 1e3);
1432#else 1974#else
1433 { 1975 {
1434 struct timeval tv; 1976 struct timeval tv;
1435 1977
1436 tv.tv_sec = req->nv1; 1978 tv.tv_sec = req->nv1;
1437 tv.tv_usec = (req->nv1 - tv.tv_sec) * 1000000.; 1979 tv.tv_usec = (req->nv1 - tv.tv_sec) * 1e6;
1438 1980
1439 req->result = select (0, 0, 0, 0, &tv); 1981 req->result = select (0, 0, 0, 0, &tv);
1440 } 1982 }
1441#endif 1983#endif
1442 break; 1984 break;
1457 times = tv; 1999 times = tv;
1458 } 2000 }
1459 else 2001 else
1460 times = 0; 2002 times = 0;
1461 2003
1462
1463 req->result = req->type == EIO_FUTIME 2004 req->result = req->type == EIO_FUTIME
1464 ? futimes (req->int1, times) 2005 ? futimes (req->int1, times)
1465 : utimes (req->ptr1, times); 2006 : utimes (req->ptr1, times);
1466 } 2007 }
1467 break; 2008 break;
1472 case EIO_NOP: 2013 case EIO_NOP:
1473 req->result = 0; 2014 req->result = 0;
1474 break; 2015 break;
1475 2016
1476 case EIO_CUSTOM: 2017 case EIO_CUSTOM:
1477 ((void (*)(eio_req *))req->feed) (req); 2018 req->feed (req);
1478 break; 2019 break;
1479 2020
1480 default: 2021 default:
2022 errno = ENOSYS;
1481 req->result = -1; 2023 req->result = -1;
1482 break; 2024 break;
1483 } 2025 }
1484 2026
1485 req->errorno = errno; 2027 req->errorno = errno;
1515eio_req *eio_mtouch (void *addr, size_t length, int flags, int pri, eio_cb cb, void *data) 2057eio_req *eio_mtouch (void *addr, size_t length, int flags, int pri, eio_cb cb, void *data)
1516{ 2058{
1517 REQ (EIO_MTOUCH); req->ptr2 = addr; req->size = length; req->int1 = flags; SEND; 2059 REQ (EIO_MTOUCH); req->ptr2 = addr; req->size = length; req->int1 = flags; SEND;
1518} 2060}
1519 2061
2062eio_req *eio_mlock (void *addr, size_t length, int pri, eio_cb cb, void *data)
2063{
2064 REQ (EIO_MLOCK); req->ptr2 = addr; req->size = length; SEND;
2065}
2066
2067eio_req *eio_mlockall (int flags, int pri, eio_cb cb, void *data)
2068{
2069 REQ (EIO_MLOCKALL); req->int1 = flags; SEND;
2070}
2071
1520eio_req *eio_sync_file_range (int fd, off_t offset, size_t nbytes, unsigned int flags, int pri, eio_cb cb, void *data) 2072eio_req *eio_sync_file_range (int fd, off_t offset, size_t nbytes, unsigned int flags, int pri, eio_cb cb, void *data)
1521{ 2073{
1522 REQ (EIO_SYNC_FILE_RANGE); req->int1 = fd; req->offs = offset; req->size = nbytes; req->int2 = flags; SEND; 2074 REQ (EIO_SYNC_FILE_RANGE); req->int1 = fd; req->offs = offset; req->size = nbytes; req->int2 = flags; SEND;
1523} 2075}
1524 2076
1548} 2100}
1549 2101
1550eio_req *eio_fstat (int fd, int pri, eio_cb cb, void *data) 2102eio_req *eio_fstat (int fd, int pri, eio_cb cb, void *data)
1551{ 2103{
1552 REQ (EIO_FSTAT); req->int1 = fd; SEND; 2104 REQ (EIO_FSTAT); req->int1 = fd; SEND;
2105}
2106
2107eio_req *eio_fstatvfs (int fd, int pri, eio_cb cb, void *data)
2108{
2109 REQ (EIO_FSTATVFS); req->int1 = fd; SEND;
1553} 2110}
1554 2111
1555eio_req *eio_futime (int fd, double atime, double mtime, int pri, eio_cb cb, void *data) 2112eio_req *eio_futime (int fd, double atime, double mtime, int pri, eio_cb cb, void *data)
1556{ 2113{
1557 REQ (EIO_FUTIME); req->int1 = fd; req->nv1 = atime; req->nv2 = mtime; SEND; 2114 REQ (EIO_FUTIME); req->int1 = fd; req->nv1 = atime; req->nv2 = mtime; SEND;
1621eio_req *eio_readlink (const char *path, int pri, eio_cb cb, void *data) 2178eio_req *eio_readlink (const char *path, int pri, eio_cb cb, void *data)
1622{ 2179{
1623 return eio__1path (EIO_READLINK, path, pri, cb, data); 2180 return eio__1path (EIO_READLINK, path, pri, cb, data);
1624} 2181}
1625 2182
2183eio_req *eio_realpath (const char *path, int pri, eio_cb cb, void *data)
2184{
2185 return eio__1path (EIO_REALPATH, path, pri, cb, data);
2186}
2187
1626eio_req *eio_stat (const char *path, int pri, eio_cb cb, void *data) 2188eio_req *eio_stat (const char *path, int pri, eio_cb cb, void *data)
1627{ 2189{
1628 return eio__1path (EIO_STAT, path, pri, cb, data); 2190 return eio__1path (EIO_STAT, path, pri, cb, data);
1629} 2191}
1630 2192
1631eio_req *eio_lstat (const char *path, int pri, eio_cb cb, void *data) 2193eio_req *eio_lstat (const char *path, int pri, eio_cb cb, void *data)
1632{ 2194{
1633 return eio__1path (EIO_LSTAT, path, pri, cb, data); 2195 return eio__1path (EIO_LSTAT, path, pri, cb, data);
1634} 2196}
1635 2197
2198eio_req *eio_statvfs (const char *path, int pri, eio_cb cb, void *data)
2199{
2200 return eio__1path (EIO_STATVFS, path, pri, cb, data);
2201}
2202
1636eio_req *eio_unlink (const char *path, int pri, eio_cb cb, void *data) 2203eio_req *eio_unlink (const char *path, int pri, eio_cb cb, void *data)
1637{ 2204{
1638 return eio__1path (EIO_UNLINK, path, pri, cb, data); 2205 return eio__1path (EIO_UNLINK, path, pri, cb, data);
1639} 2206}
1640 2207
1648 REQ (EIO_READDIR); PATH; req->int1 = flags; SEND; 2215 REQ (EIO_READDIR); PATH; req->int1 = flags; SEND;
1649} 2216}
1650 2217
1651eio_req *eio_mknod (const char *path, mode_t mode, dev_t dev, int pri, eio_cb cb, void *data) 2218eio_req *eio_mknod (const char *path, mode_t mode, dev_t dev, int pri, eio_cb cb, void *data)
1652{ 2219{
1653 REQ (EIO_MKNOD); PATH; req->int2 = (long)mode; req->int3 = (long)dev; SEND; 2220 REQ (EIO_MKNOD); PATH; req->int2 = (long)mode; req->offs = (off_t)dev; SEND;
1654} 2221}
1655 2222
1656static eio_req * 2223static eio_req *
1657eio__2path (int type, const char *path, const char *new_path, int pri, eio_cb cb, void *data) 2224eio__2path (int type, const char *path, const char *new_path, int pri, eio_cb cb, void *data)
1658{ 2225{
1682eio_req *eio_rename (const char *path, const char *new_path, int pri, eio_cb cb, void *data) 2249eio_req *eio_rename (const char *path, const char *new_path, int pri, eio_cb cb, void *data)
1683{ 2250{
1684 return eio__2path (EIO_RENAME, path, new_path, pri, cb, data); 2251 return eio__2path (EIO_RENAME, path, new_path, pri, cb, data);
1685} 2252}
1686 2253
1687eio_req *eio_custom (eio_cb execute, int pri, eio_cb cb, void *data) 2254eio_req *eio_custom (void (*execute)(eio_req *), int pri, eio_cb cb, void *data)
1688{ 2255{
1689 REQ (EIO_CUSTOM); req->feed = (void (*)(eio_req *))execute; SEND; 2256 REQ (EIO_CUSTOM); req->feed = execute; SEND;
1690} 2257}
1691 2258
1692#endif 2259#endif
1693 2260
1694eio_req *eio_grp (eio_cb cb, void *data) 2261eio_req *eio_grp (eio_cb cb, void *data)
1703#undef SEND 2270#undef SEND
1704 2271
1705/*****************************************************************************/ 2272/*****************************************************************************/
1706/* grp functions */ 2273/* grp functions */
1707 2274
2275void
1708void eio_grp_feed (eio_req *grp, void (*feed)(eio_req *req), int limit) 2276eio_grp_feed (eio_req *grp, void (*feed)(eio_req *req), int limit)
1709{ 2277{
1710 grp->int2 = limit; 2278 grp->int2 = limit;
1711 grp->feed = feed; 2279 grp->feed = feed;
1712 2280
1713 grp_try_feed (grp); 2281 grp_try_feed (grp);
1714} 2282}
1715 2283
2284void
1716void eio_grp_limit (eio_req *grp, int limit) 2285eio_grp_limit (eio_req *grp, int limit)
1717{ 2286{
1718 grp->int2 = limit; 2287 grp->int2 = limit;
1719 2288
1720 grp_try_feed (grp); 2289 grp_try_feed (grp);
1721} 2290}
1722 2291
2292void
1723void eio_grp_add (eio_req *grp, eio_req *req) 2293eio_grp_add (eio_req *grp, eio_req *req)
1724{ 2294{
1725 assert (("cannot add requests to IO::AIO::GRP after the group finished", grp->int1 != 2)); 2295 assert (("cannot add requests to IO::AIO::GRP after the group finished", grp->int1 != 2));
1726 2296
1727 grp->flags |= EIO_FLAG_GROUPADD; 2297 grp->flags |= EIO_FLAG_GROUPADD;
1728 2298
1739} 2309}
1740 2310
1741/*****************************************************************************/ 2311/*****************************************************************************/
1742/* misc garbage */ 2312/* misc garbage */
1743 2313
2314ssize_t
1744ssize_t eio_sendfile_sync (int ofd, int ifd, off_t offset, size_t count) 2315eio_sendfile_sync (int ofd, int ifd, off_t offset, size_t count)
1745{ 2316{
1746 etp_worker wrk; 2317 etp_worker wrk;
2318 ssize_t ret;
1747 2319
1748 wrk.dbuf = 0; 2320 wrk.dbuf = 0;
1749 2321
1750 eio__sendfile (ofd, ifd, offset, count, &wrk); 2322 ret = eio__sendfile (ofd, ifd, offset, count, &wrk);
1751 2323
1752 if (wrk.dbuf) 2324 if (wrk.dbuf)
1753 free (wrk.dbuf); 2325 free (wrk.dbuf);
1754}
1755 2326
2327 return ret;
2328}
2329

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines