ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/libeio/eio.c
(Generate patch)

Comparing libeio/eio.c (file contents):
Revision 1.40 by root, Sat Jun 13 14:58:33 2009 UTC vs.
Revision 1.72 by root, Fri Jun 10 12:45:20 2011 UTC

1/* 1/*
2 * libeio implementation 2 * libeio implementation
3 * 3 *
4 * Copyright (c) 2007,2008,2009 Marc Alexander Lehmann <libeio@schmorp.de> 4 * Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann <libeio@schmorp.de>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without modifica- 7 * Redistribution and use in source and binary forms, with or without modifica-
8 * tion, are permitted provided that the following conditions are met: 8 * tion, are permitted provided that the following conditions are met:
9 * 9 *
35 * and other provisions required by the GPL. If you do not delete the 35 * and other provisions required by the GPL. If you do not delete the
36 * provisions above, a recipient may use your version of this file under 36 * provisions above, a recipient may use your version of this file under
37 * either the BSD or the GPL. 37 * either the BSD or the GPL.
38 */ 38 */
39 39
40#ifndef _WIN32
41# include "config.h"
42#endif
43
40#include "eio.h" 44#include "eio.h"
45#include "ecb.h"
41 46
42#ifdef EIO_STACKSIZE 47#ifdef EIO_STACKSIZE
43# define XTHREAD_STACKSIZE EIO_STACKSIZE 48# define XTHREAD_STACKSIZE EIO_STACKSIZE
44#endif 49#endif
45#include "xthread.h" 50#include "xthread.h"
49#include <stdlib.h> 54#include <stdlib.h>
50#include <string.h> 55#include <string.h>
51#include <errno.h> 56#include <errno.h>
52#include <sys/types.h> 57#include <sys/types.h>
53#include <sys/stat.h> 58#include <sys/stat.h>
59#include <sys/statvfs.h>
54#include <limits.h> 60#include <limits.h>
55#include <fcntl.h> 61#include <fcntl.h>
56#include <assert.h> 62#include <assert.h>
57 63
64/* intptr_t comes from unistd.h, says POSIX/UNIX/tradition */
65/* intptr_t only comes form stdint.h, says idiot openbsd coder */
66#if HAVE_STDINT_H
67# include <stdint.h>
68#endif
69
58#ifndef EIO_FINISH 70#ifndef EIO_FINISH
59# define EIO_FINISH(req) ((req)->finish) && !EIO_CANCELLED (req) ? (req)->finish (req) : 0 71# define EIO_FINISH(req) ((req)->finish) && !EIO_CANCELLED (req) ? (req)->finish (req) : 0
60#endif 72#endif
61 73
62#ifndef EIO_DESTROY 74#ifndef EIO_DESTROY
70#ifdef _WIN32 82#ifdef _WIN32
71 83
72 /*doh*/ 84 /*doh*/
73#else 85#else
74 86
75# include "config.h"
76# include <sys/time.h> 87# include <sys/time.h>
77# include <sys/select.h> 88# include <sys/select.h>
78# include <sys/mman.h>
79# include <unistd.h> 89# include <unistd.h>
80# include <utime.h> 90# include <utime.h>
81# include <signal.h> 91# include <signal.h>
82# include <dirent.h> 92# include <dirent.h>
83 93
94#if _POSIX_MEMLOCK || _POSIX_MEMLOCK_RANGE || _POSIX_MAPPED_FILES
95# include <sys/mman.h>
96#endif
97
84/* POSIX_SOURCE is useless on bsd's, and XOPEN_SOURCE is unreliable there, too */ 98/* POSIX_SOURCE is useless on bsd's, and XOPEN_SOURCE is unreliable there, too */
85# if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) 99# if __FreeBSD__ || defined __NetBSD__ || defined __OpenBSD__
86# define _DIRENT_HAVE_D_TYPE /* sigh */ 100# define _DIRENT_HAVE_D_TYPE /* sigh */
87# define D_INO(de) (de)->d_fileno 101# define D_INO(de) (de)->d_fileno
88# define D_NAMLEN(de) (de)->d_namlen 102# define D_NAMLEN(de) (de)->d_namlen
89# elif defined(__linux) || defined(d_ino) || _XOPEN_SOURCE >= 600 103# elif __linux || defined d_ino || _XOPEN_SOURCE >= 600
90# define D_INO(de) (de)->d_ino 104# define D_INO(de) (de)->d_ino
91# endif 105# endif
92 106
93#ifdef _D_EXACT_NAMLEN 107#ifdef _D_EXACT_NAMLEN
94# undef D_NAMLEN 108# undef D_NAMLEN
106#endif 120#endif
107 121
108#if HAVE_SENDFILE 122#if HAVE_SENDFILE
109# if __linux 123# if __linux
110# include <sys/sendfile.h> 124# include <sys/sendfile.h>
111# elif __freebsd 125# elif __FreeBSD__ || defined __APPLE__
112# include <sys/socket.h> 126# include <sys/socket.h>
113# include <sys/uio.h> 127# include <sys/uio.h>
114# elif __hpux 128# elif __hpux
115# include <sys/socket.h> 129# include <sys/socket.h>
116# elif __solaris /* not yet */ 130# elif __solaris
117# include <sys/sendfile.h> 131# include <sys/sendfile.h>
118# else 132# else
119# error sendfile support requested but not available 133# error sendfile support requested but not available
120# endif 134# endif
121#endif 135#endif
128#endif 142#endif
129#ifndef D_NAMLEN 143#ifndef D_NAMLEN
130# define D_NAMLEN(de) strlen ((de)->d_name) 144# define D_NAMLEN(de) strlen ((de)->d_name)
131#endif 145#endif
132 146
133/* number of seconds after which an idle threads exit */
134#define IDLE_TIMEOUT 10
135
136/* used for struct dirent, AIX doesn't provide it */ 147/* used for struct dirent, AIX doesn't provide it */
137#ifndef NAME_MAX 148#ifndef NAME_MAX
138# define NAME_MAX 4096 149# define NAME_MAX 4096
150#endif
151
152/* used for readlink etc. */
153#ifndef PATH_MAX
154# define PATH_MAX 4096
139#endif 155#endif
140 156
141/* buffer size for various temporary buffers */ 157/* buffer size for various temporary buffers */
142#define EIO_BUFSIZE 65536 158#define EIO_BUFSIZE 65536
143 159
149 errno = ENOMEM; \ 165 errno = ENOMEM; \
150 if (!eio_buf) \ 166 if (!eio_buf) \
151 return -1; 167 return -1;
152 168
153#define EIO_TICKS ((1000000 + 1023) >> 10) 169#define EIO_TICKS ((1000000 + 1023) >> 10)
154
155/*****************************************************************************/
156
157#if __GNUC__ >= 3
158# define expect(expr,value) __builtin_expect ((expr),(value))
159#else
160# define expect(expr,value) (expr)
161#endif
162
163#define expect_false(expr) expect ((expr) != 0, 0)
164#define expect_true(expr) expect ((expr) != 0, 1)
165
166/*****************************************************************************/
167 170
168#define ETP_PRI_MIN EIO_PRI_MIN 171#define ETP_PRI_MIN EIO_PRI_MIN
169#define ETP_PRI_MAX EIO_PRI_MAX 172#define ETP_PRI_MAX EIO_PRI_MAX
170 173
171struct etp_worker; 174struct etp_worker;
196 199
197/*****************************************************************************/ 200/*****************************************************************************/
198 201
199#define ETP_NUM_PRI (ETP_PRI_MAX - ETP_PRI_MIN + 1) 202#define ETP_NUM_PRI (ETP_PRI_MAX - ETP_PRI_MIN + 1)
200 203
201/* calculcate time difference in ~1/EIO_TICKS of a second */ 204/* calculate time difference in ~1/EIO_TICKS of a second */
205ECB_INLINE int
202static int tvdiff (struct timeval *tv1, struct timeval *tv2) 206tvdiff (struct timeval *tv1, struct timeval *tv2)
203{ 207{
204 return (tv2->tv_sec - tv1->tv_sec ) * EIO_TICKS 208 return (tv2->tv_sec - tv1->tv_sec ) * EIO_TICKS
205 + ((tv2->tv_usec - tv1->tv_usec) >> 10); 209 + ((tv2->tv_usec - tv1->tv_usec) >> 10);
206} 210}
207 211
214static unsigned int max_poll_reqs; /* reslock */ 218static unsigned int max_poll_reqs; /* reslock */
215 219
216static volatile unsigned int nreqs; /* reqlock */ 220static volatile unsigned int nreqs; /* reqlock */
217static volatile unsigned int nready; /* reqlock */ 221static volatile unsigned int nready; /* reqlock */
218static volatile unsigned int npending; /* reqlock */ 222static volatile unsigned int npending; /* reqlock */
219static volatile unsigned int max_idle = 4; 223static volatile unsigned int max_idle = 4; /* maximum number of threads that can idle indefinitely */
224static volatile unsigned int idle_timeout = 10; /* number of seconds after which an idle threads exit */
220 225
221static mutex_t wrklock = X_MUTEX_INIT; 226static xmutex_t wrklock;
222static mutex_t reslock = X_MUTEX_INIT; 227static xmutex_t reslock;
223static mutex_t reqlock = X_MUTEX_INIT; 228static xmutex_t reqlock;
224static cond_t reqwait = X_COND_INIT; 229static xcond_t reqwait;
225 230
226#if !HAVE_PREADWRITE 231#if !HAVE_PREADWRITE
227/* 232/*
228 * make our pread/pwrite emulation safe against themselves, but not against 233 * make our pread/pwrite emulation safe against themselves, but not against
229 * normal read/write by using a mutex. slows down execution a lot, 234 * normal read/write by using a mutex. slows down execution a lot,
230 * but that's your problem, not mine. 235 * but that's your problem, not mine.
231 */ 236 */
232static mutex_t preadwritelock = X_MUTEX_INIT; 237static xmutex_t preadwritelock = X_MUTEX_INIT;
233#endif 238#endif
234 239
235typedef struct etp_worker 240typedef struct etp_worker
236{ 241{
237 /* locked by wrklock */ 242 /* locked by wrklock */
238 struct etp_worker *prev, *next; 243 struct etp_worker *prev, *next;
239 244
240 thread_t tid; 245 xthread_t tid;
241 246
242 /* locked by reslock, reqlock or wrklock */ 247 /* locked by reslock, reqlock or wrklock */
243 ETP_REQ *req; /* currently processed request */ 248 ETP_REQ *req; /* currently processed request */
244 249
245 ETP_WORKER_COMMON 250 ETP_WORKER_COMMON
250#define ETP_WORKER_LOCK(wrk) X_LOCK (wrklock) 255#define ETP_WORKER_LOCK(wrk) X_LOCK (wrklock)
251#define ETP_WORKER_UNLOCK(wrk) X_UNLOCK (wrklock) 256#define ETP_WORKER_UNLOCK(wrk) X_UNLOCK (wrklock)
252 257
253/* worker threads management */ 258/* worker threads management */
254 259
260static void ecb_cold
255static void etp_worker_clear (etp_worker *wrk) 261etp_worker_clear (etp_worker *wrk)
256{ 262{
257 ETP_WORKER_CLEAR (wrk); 263 ETP_WORKER_CLEAR (wrk);
258} 264}
259 265
266static void ecb_cold
260static void etp_worker_free (etp_worker *wrk) 267etp_worker_free (etp_worker *wrk)
261{ 268{
262 wrk->next->prev = wrk->prev; 269 wrk->next->prev = wrk->prev;
263 wrk->prev->next = wrk->next; 270 wrk->prev->next = wrk->next;
264 271
265 free (wrk); 272 free (wrk);
266} 273}
267 274
268static unsigned int etp_nreqs (void) 275static unsigned int
276etp_nreqs (void)
269{ 277{
270 int retval; 278 int retval;
271 if (WORDACCESS_UNSAFE) X_LOCK (reqlock); 279 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
272 retval = nreqs; 280 retval = nreqs;
273 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); 281 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
274 return retval; 282 return retval;
275} 283}
276 284
277static unsigned int etp_nready (void) 285static unsigned int
286etp_nready (void)
278{ 287{
279 unsigned int retval; 288 unsigned int retval;
280 289
281 if (WORDACCESS_UNSAFE) X_LOCK (reqlock); 290 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
282 retval = nready; 291 retval = nready;
283 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); 292 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
284 293
285 return retval; 294 return retval;
286} 295}
287 296
288static unsigned int etp_npending (void) 297static unsigned int
298etp_npending (void)
289{ 299{
290 unsigned int retval; 300 unsigned int retval;
291 301
292 if (WORDACCESS_UNSAFE) X_LOCK (reqlock); 302 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
293 retval = npending; 303 retval = npending;
294 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); 304 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
295 305
296 return retval; 306 return retval;
297} 307}
298 308
299static unsigned int etp_nthreads (void) 309static unsigned int
310etp_nthreads (void)
300{ 311{
301 unsigned int retval; 312 unsigned int retval;
302 313
303 if (WORDACCESS_UNSAFE) X_LOCK (reqlock); 314 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
304 retval = started; 315 retval = started;
318} etp_reqq; 329} etp_reqq;
319 330
320static etp_reqq req_queue; 331static etp_reqq req_queue;
321static etp_reqq res_queue; 332static etp_reqq res_queue;
322 333
334static int ecb_noinline
323static int reqq_push (etp_reqq *q, ETP_REQ *req) 335reqq_push (etp_reqq *q, ETP_REQ *req)
324{ 336{
325 int pri = req->pri; 337 int pri = req->pri;
326 req->next = 0; 338 req->next = 0;
327 339
328 if (q->qe[pri]) 340 if (q->qe[pri])
334 q->qe[pri] = q->qs[pri] = req; 346 q->qe[pri] = q->qs[pri] = req;
335 347
336 return q->size++; 348 return q->size++;
337} 349}
338 350
351static ETP_REQ * ecb_noinline
339static ETP_REQ *reqq_shift (etp_reqq *q) 352reqq_shift (etp_reqq *q)
340{ 353{
341 int pri; 354 int pri;
342 355
343 if (!q->size) 356 if (!q->size)
344 return 0; 357 return 0;
359 } 372 }
360 373
361 abort (); 374 abort ();
362} 375}
363 376
377static void ecb_cold
378etp_thread_init (void)
379{
380 X_MUTEX_CREATE (wrklock);
381 X_MUTEX_CREATE (reslock);
382 X_MUTEX_CREATE (reqlock);
383 X_COND_CREATE (reqwait);
384}
385
386static void ecb_cold
364static void etp_atfork_prepare (void) 387etp_atfork_prepare (void)
365{ 388{
366 X_LOCK (wrklock); 389 X_LOCK (wrklock);
367 X_LOCK (reqlock); 390 X_LOCK (reqlock);
368 X_LOCK (reslock); 391 X_LOCK (reslock);
369#if !HAVE_PREADWRITE 392#if !HAVE_PREADWRITE
370 X_LOCK (preadwritelock); 393 X_LOCK (preadwritelock);
371#endif 394#endif
372} 395}
373 396
397static void ecb_cold
374static void etp_atfork_parent (void) 398etp_atfork_parent (void)
375{ 399{
376#if !HAVE_PREADWRITE 400#if !HAVE_PREADWRITE
377 X_UNLOCK (preadwritelock); 401 X_UNLOCK (preadwritelock);
378#endif 402#endif
379 X_UNLOCK (reslock); 403 X_UNLOCK (reslock);
380 X_UNLOCK (reqlock); 404 X_UNLOCK (reqlock);
381 X_UNLOCK (wrklock); 405 X_UNLOCK (wrklock);
382} 406}
383 407
408static void ecb_cold
384static void etp_atfork_child (void) 409etp_atfork_child (void)
385{ 410{
386 ETP_REQ *prv; 411 ETP_REQ *prv;
387 412
388 while ((prv = reqq_shift (&req_queue))) 413 while ((prv = reqq_shift (&req_queue)))
389 ETP_DESTROY (prv); 414 ETP_DESTROY (prv);
406 idle = 0; 431 idle = 0;
407 nreqs = 0; 432 nreqs = 0;
408 nready = 0; 433 nready = 0;
409 npending = 0; 434 npending = 0;
410 435
411 etp_atfork_parent (); 436 etp_thread_init ();
412} 437}
413 438
414static void 439static void ecb_cold
415etp_once_init (void) 440etp_once_init (void)
416{ 441{
442 etp_thread_init ();
417 X_THREAD_ATFORK (etp_atfork_prepare, etp_atfork_parent, etp_atfork_child); 443 X_THREAD_ATFORK (etp_atfork_prepare, etp_atfork_parent, etp_atfork_child);
418} 444}
419 445
420static int 446static int ecb_cold
421etp_init (void (*want_poll)(void), void (*done_poll)(void)) 447etp_init (void (*want_poll)(void), void (*done_poll)(void))
422{ 448{
423 static pthread_once_t doinit = PTHREAD_ONCE_INIT; 449 static pthread_once_t doinit = PTHREAD_ONCE_INIT;
424 450
425 pthread_once (&doinit, etp_once_init); 451 pthread_once (&doinit, etp_once_init);
430 return 0; 456 return 0;
431} 457}
432 458
433X_THREAD_PROC (etp_proc); 459X_THREAD_PROC (etp_proc);
434 460
461static void ecb_cold
435static void etp_start_thread (void) 462etp_start_thread (void)
436{ 463{
437 etp_worker *wrk = calloc (1, sizeof (etp_worker)); 464 etp_worker *wrk = calloc (1, sizeof (etp_worker));
438 465
439 /*TODO*/ 466 /*TODO*/
440 assert (("unable to allocate worker thread data", wrk)); 467 assert (("unable to allocate worker thread data", wrk));
453 free (wrk); 480 free (wrk);
454 481
455 X_UNLOCK (wrklock); 482 X_UNLOCK (wrklock);
456} 483}
457 484
485static void
458static void etp_maybe_start_thread (void) 486etp_maybe_start_thread (void)
459{ 487{
460 if (expect_true (etp_nthreads () >= wanted)) 488 if (ecb_expect_true (etp_nthreads () >= wanted))
461 return; 489 return;
462 490
463 /* todo: maybe use idle here, but might be less exact */ 491 /* todo: maybe use idle here, but might be less exact */
464 if (expect_true (0 <= (int)etp_nthreads () + (int)etp_npending () - (int)etp_nreqs ())) 492 if (ecb_expect_true (0 <= (int)etp_nthreads () + (int)etp_npending () - (int)etp_nreqs ()))
465 return; 493 return;
466 494
467 etp_start_thread (); 495 etp_start_thread ();
468} 496}
469 497
498static void ecb_cold
470static void etp_end_thread (void) 499etp_end_thread (void)
471{ 500{
472 eio_req *req = calloc (1, sizeof (eio_req)); 501 eio_req *req = calloc (1, sizeof (eio_req));
473 502
474 req->type = -1; 503 req->type = -1;
475 req->pri = ETP_PRI_MAX - ETP_PRI_MIN; 504 req->pri = ETP_PRI_MAX - ETP_PRI_MIN;
482 X_LOCK (wrklock); 511 X_LOCK (wrklock);
483 --started; 512 --started;
484 X_UNLOCK (wrklock); 513 X_UNLOCK (wrklock);
485} 514}
486 515
487static int etp_poll (void) 516static int
517etp_poll (void)
488{ 518{
489 unsigned int maxreqs; 519 unsigned int maxreqs;
490 unsigned int maxtime; 520 unsigned int maxtime;
491 struct timeval tv_start, tv_now; 521 struct timeval tv_start, tv_now;
492 522
522 552
523 X_LOCK (reqlock); 553 X_LOCK (reqlock);
524 --nreqs; 554 --nreqs;
525 X_UNLOCK (reqlock); 555 X_UNLOCK (reqlock);
526 556
527 if (expect_false (req->type == EIO_GROUP && req->size)) 557 if (ecb_expect_false (req->type == EIO_GROUP && req->size))
528 { 558 {
529 req->int1 = 1; /* mark request as delayed */ 559 req->int1 = 1; /* mark request as delayed */
530 continue; 560 continue;
531 } 561 }
532 else 562 else
533 { 563 {
534 int res = ETP_FINISH (req); 564 int res = ETP_FINISH (req);
535 if (expect_false (res)) 565 if (ecb_expect_false (res))
536 return res; 566 return res;
537 } 567 }
538 568
539 if (expect_false (maxreqs && !--maxreqs)) 569 if (ecb_expect_false (maxreqs && !--maxreqs))
540 break; 570 break;
541 571
542 if (maxtime) 572 if (maxtime)
543 { 573 {
544 gettimeofday (&tv_now, 0); 574 gettimeofday (&tv_now, 0);
550 580
551 errno = EAGAIN; 581 errno = EAGAIN;
552 return -1; 582 return -1;
553} 583}
554 584
585static void
555static void etp_cancel (ETP_REQ *req) 586etp_cancel (ETP_REQ *req)
556{ 587{
557 X_LOCK (wrklock); 588 X_LOCK (wrklock);
558 req->flags |= EIO_FLAG_CANCELLED; 589 req->flags |= EIO_FLAG_CANCELLED;
559 X_UNLOCK (wrklock); 590 X_UNLOCK (wrklock);
560 591
561 eio_grp_cancel (req); 592 eio_grp_cancel (req);
562} 593}
563 594
595static void
564static void etp_submit (ETP_REQ *req) 596etp_submit (ETP_REQ *req)
565{ 597{
566 req->pri -= ETP_PRI_MIN; 598 req->pri -= ETP_PRI_MIN;
567 599
568 if (expect_false (req->pri < ETP_PRI_MIN - ETP_PRI_MIN)) req->pri = ETP_PRI_MIN - ETP_PRI_MIN; 600 if (ecb_expect_false (req->pri < ETP_PRI_MIN - ETP_PRI_MIN)) req->pri = ETP_PRI_MIN - ETP_PRI_MIN;
569 if (expect_false (req->pri > ETP_PRI_MAX - ETP_PRI_MIN)) req->pri = ETP_PRI_MAX - ETP_PRI_MIN; 601 if (ecb_expect_false (req->pri > ETP_PRI_MAX - ETP_PRI_MIN)) req->pri = ETP_PRI_MAX - ETP_PRI_MIN;
570 602
571 if (expect_false (req->type == EIO_GROUP)) 603 if (ecb_expect_false (req->type == EIO_GROUP))
572 { 604 {
573 /* I hope this is worth it :/ */ 605 /* I hope this is worth it :/ */
574 X_LOCK (reqlock); 606 X_LOCK (reqlock);
575 ++nreqs; 607 ++nreqs;
576 X_UNLOCK (reqlock); 608 X_UNLOCK (reqlock);
595 627
596 etp_maybe_start_thread (); 628 etp_maybe_start_thread ();
597 } 629 }
598} 630}
599 631
632static void ecb_cold
600static void etp_set_max_poll_time (double nseconds) 633etp_set_max_poll_time (double nseconds)
601{ 634{
602 if (WORDACCESS_UNSAFE) X_LOCK (reslock); 635 if (WORDACCESS_UNSAFE) X_LOCK (reslock);
603 max_poll_time = nseconds; 636 max_poll_time = nseconds * EIO_TICKS;
604 if (WORDACCESS_UNSAFE) X_UNLOCK (reslock); 637 if (WORDACCESS_UNSAFE) X_UNLOCK (reslock);
605} 638}
606 639
640static void ecb_cold
607static void etp_set_max_poll_reqs (unsigned int maxreqs) 641etp_set_max_poll_reqs (unsigned int maxreqs)
608{ 642{
609 if (WORDACCESS_UNSAFE) X_LOCK (reslock); 643 if (WORDACCESS_UNSAFE) X_LOCK (reslock);
610 max_poll_reqs = maxreqs; 644 max_poll_reqs = maxreqs;
611 if (WORDACCESS_UNSAFE) X_UNLOCK (reslock); 645 if (WORDACCESS_UNSAFE) X_UNLOCK (reslock);
612} 646}
613 647
648static void ecb_cold
614static void etp_set_max_idle (unsigned int nthreads) 649etp_set_max_idle (unsigned int nthreads)
615{ 650{
616 if (WORDACCESS_UNSAFE) X_LOCK (reqlock); 651 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
617 max_idle = nthreads <= 0 ? 1 : nthreads; 652 max_idle = nthreads;
618 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); 653 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
619} 654}
620 655
656static void ecb_cold
657etp_set_idle_timeout (unsigned int seconds)
658{
659 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
660 idle_timeout = seconds;
661 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
662}
663
664static void ecb_cold
621static void etp_set_min_parallel (unsigned int nthreads) 665etp_set_min_parallel (unsigned int nthreads)
622{ 666{
623 if (wanted < nthreads) 667 if (wanted < nthreads)
624 wanted = nthreads; 668 wanted = nthreads;
625} 669}
626 670
671static void ecb_cold
627static void etp_set_max_parallel (unsigned int nthreads) 672etp_set_max_parallel (unsigned int nthreads)
628{ 673{
629 if (wanted > nthreads) 674 if (wanted > nthreads)
630 wanted = nthreads; 675 wanted = nthreads;
631 676
632 while (started > wanted) 677 while (started > wanted)
633 etp_end_thread (); 678 etp_end_thread ();
634} 679}
635 680
636/*****************************************************************************/ 681/*****************************************************************************/
637 682
683static void
638static void grp_try_feed (eio_req *grp) 684grp_try_feed (eio_req *grp)
639{ 685{
640 while (grp->size < grp->int2 && !EIO_CANCELLED (grp)) 686 while (grp->size < grp->int2 && !EIO_CANCELLED (grp))
641 { 687 {
642 grp->flags &= ~EIO_FLAG_GROUPADD; 688 grp->flags &= ~EIO_FLAG_GROUPADD;
643 689
650 break; 696 break;
651 } 697 }
652 } 698 }
653} 699}
654 700
701static int
655static int grp_dec (eio_req *grp) 702grp_dec (eio_req *grp)
656{ 703{
657 --grp->size; 704 --grp->size;
658 705
659 /* call feeder, if applicable */ 706 /* call feeder, if applicable */
660 grp_try_feed (grp); 707 grp_try_feed (grp);
664 return eio_finish (grp); 711 return eio_finish (grp);
665 else 712 else
666 return 0; 713 return 0;
667} 714}
668 715
716void
669void eio_destroy (eio_req *req) 717eio_destroy (eio_req *req)
670{ 718{
671 if ((req)->flags & EIO_FLAG_PTR1_FREE) free (req->ptr1); 719 if ((req)->flags & EIO_FLAG_PTR1_FREE) free (req->ptr1);
672 if ((req)->flags & EIO_FLAG_PTR2_FREE) free (req->ptr2); 720 if ((req)->flags & EIO_FLAG_PTR2_FREE) free (req->ptr2);
673 721
674 EIO_DESTROY (req); 722 EIO_DESTROY (req);
675} 723}
676 724
725static int
677static int eio_finish (eio_req *req) 726eio_finish (eio_req *req)
678{ 727{
679 int res = EIO_FINISH (req); 728 int res = EIO_FINISH (req);
680 729
681 if (req->grp) 730 if (req->grp)
682 { 731 {
699 eio_destroy (req); 748 eio_destroy (req);
700 749
701 return res; 750 return res;
702} 751}
703 752
753void
704void eio_grp_cancel (eio_req *grp) 754eio_grp_cancel (eio_req *grp)
705{ 755{
706 for (grp = grp->grp_first; grp; grp = grp->grp_next) 756 for (grp = grp->grp_first; grp; grp = grp->grp_next)
707 eio_cancel (grp); 757 eio_cancel (grp);
708} 758}
709 759
760void
710void eio_cancel (eio_req *req) 761eio_cancel (eio_req *req)
711{ 762{
712 etp_cancel (req); 763 etp_cancel (req);
713} 764}
714 765
766void
715void eio_submit (eio_req *req) 767eio_submit (eio_req *req)
716{ 768{
717 etp_submit (req); 769 etp_submit (req);
718} 770}
719 771
720unsigned int eio_nreqs (void) 772unsigned int
773eio_nreqs (void)
721{ 774{
722 return etp_nreqs (); 775 return etp_nreqs ();
723} 776}
724 777
725unsigned int eio_nready (void) 778unsigned int
779eio_nready (void)
726{ 780{
727 return etp_nready (); 781 return etp_nready ();
728} 782}
729 783
730unsigned int eio_npending (void) 784unsigned int
785eio_npending (void)
731{ 786{
732 return etp_npending (); 787 return etp_npending ();
733} 788}
734 789
735unsigned int eio_nthreads (void) 790unsigned int ecb_cold
791eio_nthreads (void)
736{ 792{
737 return etp_nthreads (); 793 return etp_nthreads ();
738} 794}
739 795
796void ecb_cold
740void eio_set_max_poll_time (double nseconds) 797eio_set_max_poll_time (double nseconds)
741{ 798{
742 etp_set_max_poll_time (nseconds); 799 etp_set_max_poll_time (nseconds);
743} 800}
744 801
802void ecb_cold
745void eio_set_max_poll_reqs (unsigned int maxreqs) 803eio_set_max_poll_reqs (unsigned int maxreqs)
746{ 804{
747 etp_set_max_poll_reqs (maxreqs); 805 etp_set_max_poll_reqs (maxreqs);
748} 806}
749 807
808void ecb_cold
750void eio_set_max_idle (unsigned int nthreads) 809eio_set_max_idle (unsigned int nthreads)
751{ 810{
752 etp_set_max_idle (nthreads); 811 etp_set_max_idle (nthreads);
753} 812}
754 813
814void ecb_cold
815eio_set_idle_timeout (unsigned int seconds)
816{
817 etp_set_idle_timeout (seconds);
818}
819
820void ecb_cold
755void eio_set_min_parallel (unsigned int nthreads) 821eio_set_min_parallel (unsigned int nthreads)
756{ 822{
757 etp_set_min_parallel (nthreads); 823 etp_set_min_parallel (nthreads);
758} 824}
759 825
826void ecb_cold
760void eio_set_max_parallel (unsigned int nthreads) 827eio_set_max_parallel (unsigned int nthreads)
761{ 828{
762 etp_set_max_parallel (nthreads); 829 etp_set_max_parallel (nthreads);
763} 830}
764 831
765int eio_poll (void) 832int eio_poll (void)
807 874
808 return res; 875 return res;
809} 876}
810#endif 877#endif
811 878
812#ifndef HAVE_FUTIMES 879#ifndef HAVE_UTIMES
813 880
814# undef utimes 881# undef utimes
815# undef futimes
816# define utimes(path,times) eio__utimes (path, times) 882# define utimes(path,times) eio__utimes (path, times)
817# define futimes(fd,times) eio__futimes (fd, times)
818 883
819static int 884static int
820eio__utimes (const char *filename, const struct timeval times[2]) 885eio__utimes (const char *filename, const struct timeval times[2])
821{ 886{
822 if (times) 887 if (times)
830 } 895 }
831 else 896 else
832 return utime (filename, 0); 897 return utime (filename, 0);
833} 898}
834 899
900#endif
901
902#ifndef HAVE_FUTIMES
903
904# undef futimes
905# define futimes(fd,times) eio__futimes (fd, times)
906
907static int
835static int eio__futimes (int fd, const struct timeval tv[2]) 908eio__futimes (int fd, const struct timeval tv[2])
836{ 909{
837 errno = ENOSYS; 910 errno = ENOSYS;
838 return -1; 911 return -1;
839} 912}
840 913
844# undef fdatasync 917# undef fdatasync
845# define fdatasync(fd) fsync (fd) 918# define fdatasync(fd) fsync (fd)
846#endif 919#endif
847 920
848/* sync_file_range always needs emulation */ 921/* sync_file_range always needs emulation */
849int 922static int
850eio__sync_file_range (int fd, off_t offset, size_t nbytes, unsigned int flags) 923eio__sync_file_range (int fd, off_t offset, size_t nbytes, unsigned int flags)
851{ 924{
852#if HAVE_SYNC_FILE_RANGE 925#if HAVE_SYNC_FILE_RANGE
853 int res; 926 int res;
854 927
867 if (!res || errno != ENOSYS) 940 if (!res || errno != ENOSYS)
868 return res; 941 return res;
869#endif 942#endif
870 943
871 /* even though we could play tricks with the flags, it's better to always 944 /* even though we could play tricks with the flags, it's better to always
872 * call fdatasync, as thta matches the expectation of it's users best */ 945 * call fdatasync, as that matches the expectation of its users best */
873 return fdatasync (fd); 946 return fdatasync (fd);
874} 947}
875 948
876#if !HAVE_READAHEAD 949#if !HAVE_READAHEAD
877# undef readahead 950# undef readahead
900 973
901/* sendfile always needs emulation */ 974/* sendfile always needs emulation */
902static ssize_t 975static ssize_t
903eio__sendfile (int ofd, int ifd, off_t offset, size_t count, etp_worker *self) 976eio__sendfile (int ofd, int ifd, off_t offset, size_t count, etp_worker *self)
904{ 977{
978 ssize_t written = 0;
905 ssize_t res; 979 ssize_t res;
906 980
907 if (!count) 981 if (!count)
908 return 0; 982 return 0;
909 983
984 for (;;)
985 {
910#if HAVE_SENDFILE 986#if HAVE_SENDFILE
911# if __linux 987# if __linux
988 off_t soffset = offset;
912 res = sendfile (ofd, ifd, &offset, count); 989 res = sendfile (ofd, ifd, &soffset, count);
913 990
914# elif __freebsd 991# elif __FreeBSD__
915 /* 992 /*
916 * Of course, the freebsd sendfile is a dire hack with no thoughts 993 * Of course, the freebsd sendfile is a dire hack with no thoughts
917 * wasted on making it similar to other I/O functions. 994 * wasted on making it similar to other I/O functions.
918 */ 995 */
919 {
920 off_t sbytes; 996 off_t sbytes;
921 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0); 997 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0);
922 998
923 if (res < 0 && sbytes) 999 #if 0 /* according to the manpage, this is correct, but broken behaviour */
924 /* maybe only on EAGAIN: as usual, the manpage leaves you guessing */ 1000 /* freebsd' sendfile will return 0 on success */
1001 /* freebsd 8 documents it as only setting *sbytes on EINTR and EAGAIN, but */
1002 /* not on e.g. EIO or EPIPE - sounds broken */
1003 if ((res < 0 && (errno == EAGAIN || errno == EINTR) && sbytes) || res == 0)
925 res = sbytes; 1004 res = sbytes;
926 } 1005 #endif
1006
1007 /* according to source inspection, this is correct, and useful behaviour */
1008 if (sbytes)
1009 res = sbytes;
1010
1011# elif defined (__APPLE__)
1012 off_t sbytes = count;
1013 res = sendfile (ifd, ofd, offset, &sbytes, 0, 0);
1014
1015 /* according to the manpage, sbytes is always valid */
1016 if (sbytes)
1017 res = sbytes;
927 1018
928# elif __hpux 1019# elif __hpux
929 res = sendfile (ofd, ifd, offset, count, 0, 0); 1020 res = sendfile (ofd, ifd, offset, count, 0, 0);
930 1021
931# elif __solaris 1022# elif __solaris
932 {
933 struct sendfilevec vec; 1023 struct sendfilevec vec;
934 size_t sbytes; 1024 size_t sbytes;
935 1025
936 vec.sfv_fd = ifd; 1026 vec.sfv_fd = ifd;
937 vec.sfv_flag = 0; 1027 vec.sfv_flag = 0;
938 vec.sfv_off = offset; 1028 vec.sfv_off = offset;
939 vec.sfv_len = count; 1029 vec.sfv_len = count;
940 1030
941 res = sendfilev (ofd, &vec, 1, &sbytes); 1031 res = sendfilev (ofd, &vec, 1, &sbytes);
942 1032
943 if (res < 0 && sbytes) 1033 if (res < 0 && sbytes)
944 res = sbytes; 1034 res = sbytes;
945 }
946 1035
947# endif 1036# endif
1037
1038#elif defined (_WIN32)
1039 /* does not work, just for documentation of what would need to be done */
1040 /* actually, cannot be done like this, as TransmitFile changes the file offset, */
1041 /* libeio guarantees that the file offset does not change, and windows */
1042 /* has no way to get an independent handle to the same file description */
1043 HANDLE h = TO_SOCKET (ifd);
1044 SetFilePointer (h, offset, 0, FILE_BEGIN);
1045 res = TransmitFile (TO_SOCKET (ofd), h, count, 0, 0, 0, 0);
1046
948#else 1047#else
949 res = -1; 1048 res = -1;
950 errno = ENOSYS; 1049 errno = ENOSYS;
951#endif 1050#endif
952 1051
1052 /* we assume sendfile can copy at least 128mb in one go */
1053 if (res <= 128 * 1024 * 1024)
1054 {
1055 if (res > 0)
1056 written += res;
1057
1058 if (written)
1059 return written;
1060
1061 break;
1062 }
1063 else
1064 {
1065 /* if we requested more, then probably the kernel was lazy */
1066 written += res;
1067 offset += res;
1068 count -= res;
1069
1070 if (!count)
1071 return written;
1072 }
1073 }
1074
953 if (res < 0 1075 if (res < 0
954 && (errno == ENOSYS || errno == EINVAL || errno == ENOTSOCK 1076 && (errno == ENOSYS || errno == EINVAL || errno == ENOTSOCK
1077 /* BSDs */
1078#ifdef ENOTSUP /* sigh, if the steenking pile called openbsd would only try to at least compile posix code... */
1079 || errno == ENOTSUP
1080#endif
1081 || errno == EOPNOTSUPP /* BSDs */
955#if __solaris 1082#if __solaris
956 || errno == EAFNOSUPPORT || errno == EPROTOTYPE 1083 || errno == EAFNOSUPPORT || errno == EPROTOTYPE
957#endif 1084#endif
958 ) 1085 )
959 ) 1086 )
993} 1120}
994 1121
995static signed char 1122static signed char
996eio_dent_cmp (const eio_dirent *a, const eio_dirent *b) 1123eio_dent_cmp (const eio_dirent *a, const eio_dirent *b)
997{ 1124{
998 return b->score - a->score ? b->score - a->score /* works because our signed char is always 0..100 */ 1125 return a->score - b->score ? a->score - b->score /* works because our signed char is always 0..100 */
999 : a->inode < b->inode ? -1 : a->inode > b->inode ? 1 : 0; 1126 : a->inode < b->inode ? -1
1127 : a->inode > b->inode ? 1
1128 : 0;
1000} 1129}
1001 1130
1002#define EIO_DENT_CMP(i,op,j) eio_dent_cmp (&i, &j) op 0 1131#define EIO_DENT_CMP(i,op,j) eio_dent_cmp (&i, &j) op 0
1003 1132
1004#define EIO_QSORT_CUTOFF 30 /* quite high, but performs well on many filesystems */ 1133#define EIO_SORT_CUTOFF 30 /* quite high, but performs well on many filesystems */
1005#define EIO_QSORT_SKIP 60 /* when to skip qsort completely */ 1134#define EIO_SORT_FAST 60 /* when to only use insertion sort */
1006 1135
1007static void 1136static void
1008eio_dent_sort (eio_dirent *dents, int size) 1137eio_dent_radix_sort (eio_dirent *dents, int size, signed char score_bits, ino_t inode_bits)
1009{ 1138{
1139 unsigned char bits [9 + sizeof (ino_t) * 8];
1140 unsigned char *bit = bits;
1141
1142 assert (CHAR_BIT == 8);
1143 assert (sizeof (eio_dirent) * 8 < 256);
1144 assert (offsetof (eio_dirent, inode)); /* we use bit #0 as sentinel */
1145 assert (offsetof (eio_dirent, score)); /* we use bit #0 as sentinel */
1146
1147 if (size <= EIO_SORT_FAST)
1148 return;
1149
1150 /* first prepare an array of bits to test in our radix sort */
1151 /* try to take endianness into account, as well as differences in ino_t sizes */
1152 /* inode_bits must contain all inodes ORed together */
1153 /* which is used to skip bits that are 0 everywhere, which is very common */
1154 {
1155 ino_t endianness;
1010 int i, j; 1156 int i, j;
1011 1157
1158 /* we store the byte offset of byte n into byte n of "endianness" */
1159 for (i = 0; i < sizeof (ino_t); ++i)
1160 ((unsigned char *)&endianness)[i] = i;
1161
1162 *bit++ = 0;
1163
1164 for (i = 0; i < sizeof (ino_t); ++i)
1165 {
1166 /* shifting off the byte offsets out of "endianness" */
1167 int offs = (offsetof (eio_dirent, inode) + (endianness & 0xff)) * 8;
1168 endianness >>= 8;
1169
1170 for (j = 0; j < 8; ++j)
1171 if (inode_bits & (((ino_t)1) << (i * 8 + j)))
1172 *bit++ = offs + j;
1173 }
1174
1175 for (j = 0; j < 8; ++j)
1176 if (score_bits & (1 << j))
1177 *bit++ = offsetof (eio_dirent, score) * 8 + j;
1178 }
1179
1180 /* now actually do the sorting (a variant of MSD radix sort) */
1181 {
1182 eio_dirent *base_stk [9 + sizeof (ino_t) * 8], *base;
1183 eio_dirent *end_stk [9 + sizeof (ino_t) * 8], *end;
1184 unsigned char *bit_stk [9 + sizeof (ino_t) * 8];
1185 int stk_idx = 0;
1186
1187 base_stk [stk_idx] = dents;
1188 end_stk [stk_idx] = dents + size;
1189 bit_stk [stk_idx] = bit - 1;
1190
1191 do
1192 {
1193 base = base_stk [stk_idx];
1194 end = end_stk [stk_idx];
1195 bit = bit_stk [stk_idx];
1196
1197 for (;;)
1198 {
1199 unsigned char O = *bit >> 3;
1200 unsigned char M = 1 << (*bit & 7);
1201
1202 eio_dirent *a = base;
1203 eio_dirent *b = end;
1204
1205 if (b - a < EIO_SORT_CUTOFF)
1206 break;
1207
1208 /* now bit-partition the array on the bit */
1209 /* this ugly asymmetric loop seems to perform much better than typical */
1210 /* partition algos found in the literature */
1211 do
1212 if (!(((unsigned char *)a)[O] & M))
1213 ++a;
1214 else if (!(((unsigned char *)--b)[O] & M))
1215 {
1216 eio_dirent tmp = *a; *a = *b; *b = tmp;
1217 ++a;
1218 }
1219 while (b > a);
1220
1221 /* next bit, or stop, if no bits left in this path */
1222 if (!*--bit)
1223 break;
1224
1225 base_stk [stk_idx] = a;
1226 end_stk [stk_idx] = end;
1227 bit_stk [stk_idx] = bit;
1228 ++stk_idx;
1229
1230 end = a;
1231 }
1232 }
1233 while (stk_idx--);
1234 }
1235}
1236
1237static void
1238eio_dent_insertion_sort (eio_dirent *dents, int size)
1239{
1240 /* first move the smallest element to the front, to act as a sentinel */
1241 {
1242 int i;
1243 eio_dirent *min = dents;
1244
1245 /* the radix pre-pass ensures that the minimum element is in the first EIO_SORT_CUTOFF + 1 elements */
1246 for (i = size > EIO_SORT_FAST ? EIO_SORT_CUTOFF + 1 : size; --i; )
1247 if (EIO_DENT_CMP (dents [i], <, *min))
1248 min = &dents [i];
1249
1250 /* swap elements 0 and j (minimum) */
1251 {
1252 eio_dirent tmp = *dents; *dents = *min; *min = tmp;
1253 }
1254 }
1255
1256 /* then do standard insertion sort, assuming that all elements are >= dents [0] */
1257 {
1258 eio_dirent *i, *j;
1259
1260 for (i = dents + 1; i < dents + size; ++i)
1261 {
1262 eio_dirent value = *i;
1263
1264 for (j = i - 1; EIO_DENT_CMP (*j, >, value); --j)
1265 j [1] = j [0];
1266
1267 j [1] = value;
1268 }
1269 }
1270}
1271
1272static void
1273eio_dent_sort (eio_dirent *dents, int size, signed char score_bits, ino_t inode_bits)
1274{
1012 if (size <= 1) 1275 if (size <= 1)
1013 return; /* our insertion sort relies on size > 0 */ 1276 return; /* our insertion sort relies on size > 0 */
1014 1277
1015 if (size > EIO_QSORT_SKIP) /* skip quicksort for small directories */ 1278 /* first we use a radix sort, but only for dirs >= EIO_SORT_FAST */
1016 { 1279 /* and stop sorting when the partitions are <= EIO_SORT_CUTOFF */
1017 /* first, use quicksort */ 1280 eio_dent_radix_sort (dents, size, score_bits, inode_bits);
1018 /* should be good for 2**31 entries */
1019 struct rng { int l, r; } rng [32];
1020 1281
1021 i = 0;
1022 rng[0].l = 0;
1023 rng[0].r = size;
1024
1025 while (expect_true (i >= 0))
1026 {
1027 int L = rng [i].l;
1028 int R = rng [i].r - 1;
1029
1030 if (expect_false (L + EIO_QSORT_CUTOFF < R))
1031 {
1032 eio_dirent piv = dents [L];
1033
1034 while (L < R)
1035 {
1036 while (EIO_DENT_CMP (dents [R], >=, piv) && L < R)
1037 --R;
1038
1039 if (L < R)
1040 dents [L++] = dents [R];
1041
1042 while (EIO_DENT_CMP (dents [L], <=, piv) && L < R)
1043 ++L;
1044
1045 if (L < R)
1046 dents [R--] = dents [L];
1047 }
1048
1049 dents [L] = piv;
1050
1051 ++i;
1052 rng [i].l = L + 1;
1053 rng [i].r = rng [i - 1].r;
1054 rng [i - 1].r = L;
1055
1056 if (rng [i].r - rng [i].l > rng [i - 1].r - rng [i - 1].l)
1057 {
1058 struct rng t;
1059
1060 t = rng [i]; rng [i] = rng [i - 1]; rng [i - 1] = t;
1061 }
1062 }
1063 else
1064 --i;
1065 }
1066 }
1067
1068 /* use an insertion sort after qsort, or for small arrays */ 1282 /* use an insertion sort at the end, or for small arrays, */
1069 /* first move the smallest element to the front, to act as a sentinel */ 1283 /* as insertion sort is more efficient for small partitions */
1070 { 1284 eio_dent_insertion_sort (dents, size);
1071 int min = 0;
1072
1073 for (i = size > EIO_QSORT_SKIP ? EIO_QSORT_CUTOFF + 1 : size; --i; )
1074 if (EIO_DENT_CMP (dents [i], <, dents [min]))
1075 min = i;
1076
1077 /* swap elements 0 and j (minimum) */
1078 {
1079 eio_dirent tmp = dents [0]; dents [0] = dents [min]; dents [min] = tmp;
1080 }
1081 }
1082
1083 /* then do standard insertion sort */
1084 for (i = 1; i < size; ++i)
1085 {
1086 eio_dirent value = dents [i];
1087
1088 for (j = i - 1; EIO_DENT_CMP (dents [j], >, value); --j)
1089 {
1090 assert (j >= 0);
1091 dents [j + 1] = dents [j];
1092 }
1093
1094 dents [j + 1] = value;
1095 }
1096} 1285}
1097 1286
1098/* read a full directory */ 1287/* read a full directory */
1099static void 1288static void
1100eio__scandir (eio_req *req, etp_worker *self) 1289eio__scandir (eio_req *req, etp_worker *self)
1106 int namesoffs = 0; 1295 int namesoffs = 0;
1107 int flags = req->int1; 1296 int flags = req->int1;
1108 eio_dirent *dents = 0; 1297 eio_dirent *dents = 0;
1109 int dentalloc = 128; 1298 int dentalloc = 128;
1110 int dentoffs = 0; 1299 int dentoffs = 0;
1300 ino_t inode_bits = 0;
1111 1301
1112 req->result = -1; 1302 req->result = -1;
1113 1303
1114 if (!(flags & EIO_READDIR_DENTS)) 1304 if (!(flags & EIO_READDIR_DENTS))
1115 flags &= ~(EIO_READDIR_DIRS_FIRST | EIO_READDIR_STAT_ORDER); 1305 flags &= ~(EIO_READDIR_DIRS_FIRST | EIO_READDIR_STAT_ORDER);
1116 1306
1117 X_LOCK (wrklock); 1307 X_LOCK (wrklock);
1118 /* the corresponding closedir is in ETP_WORKER_CLEAR */ 1308 /* the corresponding closedir is in ETP_WORKER_CLEAR */
1119 self->dirp = dirp = opendir (req->ptr1); 1309 self->dirp = dirp = opendir (req->ptr1);
1310
1120 req->flags |= EIO_FLAG_PTR1_FREE | EIO_FLAG_PTR2_FREE; 1311 req->flags |= EIO_FLAG_PTR1_FREE | EIO_FLAG_PTR2_FREE;
1121 req->ptr1 = names = malloc (namesalloc);
1122 req->ptr2 = dents = flags ? malloc (dentalloc * sizeof (eio_dirent)) : 0; 1312 req->ptr1 = dents = flags ? malloc (dentalloc * sizeof (eio_dirent)) : 0;
1313 req->ptr2 = names = malloc (namesalloc);
1123 X_UNLOCK (wrklock); 1314 X_UNLOCK (wrklock);
1124 1315
1125 if (dirp && names && (!flags || dents)) 1316 if (dirp && names && (!flags || dents))
1126 for (;;) 1317 for (;;)
1127 { 1318 {
1135 1326
1136 /* sort etc. */ 1327 /* sort etc. */
1137 req->int1 = flags; 1328 req->int1 = flags;
1138 req->result = dentoffs; 1329 req->result = dentoffs;
1139 1330
1140 if (dents)
1141 {
1142 eio_dirent *ent = dents + dentoffs;
1143
1144 while (ent > dents)
1145 (--ent)->name = names + (size_t)ent->name;
1146 }
1147
1148 if (flags & EIO_READDIR_STAT_ORDER 1331 if (flags & EIO_READDIR_STAT_ORDER)
1149 || !(~flags & (EIO_READDIR_DIRS_FIRST | EIO_READDIR_FOUND_UNKNOWN))) 1332 eio_dent_sort (dents, dentoffs, flags & EIO_READDIR_DIRS_FIRST ? 7 : 0, inode_bits);
1150 eio_dent_sort (dents, dentoffs); /* score depends of DIRS_FIRST */
1151 else if (flags & EIO_READDIR_DIRS_FIRST) 1333 else if (flags & EIO_READDIR_DIRS_FIRST)
1334 if (flags & EIO_READDIR_FOUND_UNKNOWN)
1335 eio_dent_sort (dents, dentoffs, 7, inode_bits); /* sort by score and inode */
1336 else
1152 { 1337 {
1153 /* in this case, all is known, and we just put dirs first and sort them */ 1338 /* in this case, all is known, and we just put dirs first and sort them */
1154 eio_dirent *ent = dents + dentoffs; 1339 eio_dirent *oth = dents + dentoffs;
1155 eio_dirent *dir = dents; 1340 eio_dirent *dir = dents;
1156 1341
1157 /* now move dirs to the front, and non-dirs to the back */ 1342 /* now partition dirs to the front, and non-dirs to the back */
1158 /* by walking from both sides and swapping if necessary */ 1343 /* by walking from both sides and swapping if necessary */
1159 while (ent > dir) 1344 while (oth > dir)
1160 { 1345 {
1161 if (dir->type == DT_DIR) 1346 if (dir->type == EIO_DT_DIR)
1162 ++dir; 1347 ++dir;
1163 else
1164 {
1165 --ent;
1166
1167 if (ent->type == DT_DIR) 1348 else if ((--oth)->type == EIO_DT_DIR)
1168 { 1349 {
1169 eio_dirent tmp = *dir; 1350 eio_dirent tmp = *dir; *dir = *oth; *oth = tmp;
1170 *dir = *ent;
1171 *ent = tmp;
1172 1351
1173 ++dir; 1352 ++dir;
1174 } 1353 }
1175 } 1354 }
1355
1356 /* now sort the dirs only (dirs all have the same score) */
1357 eio_dent_sort (dents, dir - dents, 0, inode_bits);
1176 } 1358 }
1177
1178 /* now sort the dirs only */
1179 eio_dent_sort (dents, dir - dents);
1180 }
1181
1182 /* only provide the names array unless DENTS is specified */
1183 if (!(flags & EIO_READDIR_DENTS))
1184 {
1185 X_LOCK (wrklock);
1186 assert (!dents);
1187 req->ptr1 = 0;
1188 req->ptr2 = names;
1189 X_UNLOCK (wrklock);
1190 }
1191 1359
1192 break; 1360 break;
1193 } 1361 }
1194 1362
1195 /* now add the entry to our list(s) */ 1363 /* now add the entry to our list(s) */
1198 /* skip . and .. entries */ 1366 /* skip . and .. entries */
1199 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2]))) 1367 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
1200 { 1368 {
1201 int len = D_NAMLEN (entp) + 1; 1369 int len = D_NAMLEN (entp) + 1;
1202 1370
1203 while (expect_false (namesoffs + len > namesalloc)) 1371 while (ecb_expect_false (namesoffs + len > namesalloc))
1204 { 1372 {
1205 namesalloc *= 2; 1373 namesalloc *= 2;
1206 X_LOCK (wrklock); 1374 X_LOCK (wrklock);
1207 req->ptr1 = names = realloc (names, namesalloc); 1375 req->ptr2 = names = realloc (names, namesalloc);
1208 X_UNLOCK (wrklock); 1376 X_UNLOCK (wrklock);
1209 1377
1210 if (!names) 1378 if (!names)
1211 break; 1379 break;
1212 } 1380 }
1215 1383
1216 if (dents) 1384 if (dents)
1217 { 1385 {
1218 struct eio_dirent *ent; 1386 struct eio_dirent *ent;
1219 1387
1220 if (expect_false (dentoffs == dentalloc)) 1388 if (ecb_expect_false (dentoffs == dentalloc))
1221 { 1389 {
1222 dentalloc *= 2; 1390 dentalloc *= 2;
1223 X_LOCK (wrklock); 1391 X_LOCK (wrklock);
1224 req->ptr2 = dents = realloc (dents, dentalloc * sizeof (eio_dirent)); 1392 req->ptr1 = dents = realloc (dents, dentalloc * sizeof (eio_dirent));
1225 X_UNLOCK (wrklock); 1393 X_UNLOCK (wrklock);
1226 1394
1227 if (!dents) 1395 if (!dents)
1228 break; 1396 break;
1229 } 1397 }
1230 1398
1231 ent = dents + dentoffs; 1399 ent = dents + dentoffs;
1232 1400
1233 ent->name = (char *)(size_t)namesoffs; /* rather dirtily we store the offset in the pointer */ 1401 ent->nameofs = namesoffs; /* rather dirtily we store the offset in the pointer */
1234 ent->namelen = len - 1; 1402 ent->namelen = len - 1;
1235 ent->inode = D_INO (entp); 1403 ent->inode = D_INO (entp);
1404
1405 inode_bits |= ent->inode;
1236 1406
1237 switch (D_TYPE (entp)) 1407 switch (D_TYPE (entp))
1238 { 1408 {
1239 default: 1409 default:
1240 ent->type = EIO_DT_UNKNOWN; 1410 ent->type = EIO_DT_UNKNOWN;
1283 #ifdef DT_WHT 1453 #ifdef DT_WHT
1284 case DT_WHT: ent->type = EIO_DT_WHT; break; 1454 case DT_WHT: ent->type = EIO_DT_WHT; break;
1285 #endif 1455 #endif
1286 } 1456 }
1287 1457
1288 ent->score = 0; 1458 ent->score = 7;
1289 1459
1290 if (flags & EIO_READDIR_DIRS_FIRST) 1460 if (flags & EIO_READDIR_DIRS_FIRST)
1291 { 1461 {
1292 if (ent->type == EIO_DT_UNKNOWN) 1462 if (ent->type == EIO_DT_UNKNOWN)
1293 { 1463 {
1294 if (*name == '.') /* leading dots are likely directories, and, in any case, rare */ 1464 if (*name == '.') /* leading dots are likely directories, and, in any case, rare */
1295 ent->score = 98; 1465 ent->score = 1;
1296 else if (!strchr (name, '.')) /* absense of dots indicate likely dirs */ 1466 else if (!strchr (name, '.')) /* absense of dots indicate likely dirs */
1297 ent->score = len <= 2 ? len + 6 : len <= 4 ? 5 : len <= 7 ? 4 : 1; /* shorter == more likely dir, but avoid too many classes */ 1467 ent->score = len <= 2 ? 4 - len : len <= 4 ? 4 : len <= 7 ? 5 : 6; /* shorter == more likely dir, but avoid too many classes */
1298 } 1468 }
1299 else if (ent->type == EIO_DT_DIR) 1469 else if (ent->type == EIO_DT_DIR)
1300 ent->score = 100; 1470 ent->score = 0;
1301 } 1471 }
1302 } 1472 }
1303 1473
1304 namesoffs += len; 1474 namesoffs += len;
1305 ++dentoffs; 1475 ++dentoffs;
1311 break; 1481 break;
1312 } 1482 }
1313 } 1483 }
1314} 1484}
1315 1485
1316#if !(_POSIX_MAPPED_FILES && _POSIX_SYNCHRONIZED_IO)
1317# undef msync
1318# define msync(a,b,c) ((errno = ENOSYS), -1)
1319#endif
1320
1321int
1322eio__mtouch (void *mem, size_t len, int flags)
1323{
1324 intptr_t addr = (intptr_t)mem;
1325 intptr_t end = addr + len;
1326#ifdef PAGESIZE 1486#ifdef PAGESIZE
1327 const intptr_t page = PAGESIZE; 1487# define eio_pagesize() PAGESIZE
1328#else 1488#else
1489static intptr_t
1490eio_pagesize (void)
1491{
1329 static intptr_t page; 1492 static intptr_t page;
1330 1493
1331 if (!page) 1494 if (!page)
1332 page = sysconf (_SC_PAGESIZE); 1495 page = sysconf (_SC_PAGESIZE);
1496
1497 return page;
1498}
1499#endif
1500
1501static void
1502eio_page_align (void **addr, size_t *length)
1503{
1504 intptr_t mask = eio_pagesize () - 1;
1505
1506 /* round down addr */
1507 intptr_t adj = mask & (intptr_t)*addr;
1508
1509 *addr = (void *)((intptr_t)*addr - adj);
1510 *length += adj;
1511
1512 /* round up length */
1513 *length = (*length + mask) & ~mask;
1514}
1515
1516#if !_POSIX_MEMLOCK
1517# define eio__mlockall(a) ((errno = ENOSYS), -1)
1518#else
1519
1520static int
1521eio__mlockall (int flags)
1522{
1523 #if __GLIBC__ == 2 && __GLIBC_MINOR__ <= 7
1524 extern int mallopt (int, int);
1525 mallopt (-6, 238); /* http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=473812 */
1333#endif 1526 #endif
1334 1527
1335 addr &= ~(page - 1); /* assume page size is always a power of two */ 1528 if (EIO_MCL_CURRENT != MCL_CURRENT
1529 || EIO_MCL_FUTURE != MCL_FUTURE)
1530 {
1531 flags = 0
1532 | (flags & EIO_MCL_CURRENT ? MCL_CURRENT : 0)
1533 | (flags & EIO_MCL_FUTURE ? MCL_FUTURE : 0);
1534 }
1336 1535
1536 return mlockall (flags);
1537}
1538#endif
1539
1540#if !_POSIX_MEMLOCK_RANGE
1541# define eio__mlock(a,b) ((errno = ENOSYS), -1)
1542#else
1543
1544static int
1545eio__mlock (void *addr, size_t length)
1546{
1547 eio_page_align (&addr, &length);
1548
1549 return mlock (addr, length);
1550}
1551
1552#endif
1553
1554#if !(_POSIX_MAPPED_FILES && _POSIX_SYNCHRONIZED_IO)
1555# define eio__msync(a,b,c) ((errno = ENOSYS), -1)
1556#else
1557
1558static int
1559eio__msync (void *mem, size_t len, int flags)
1560{
1561 eio_page_align (&mem, &len);
1562
1563 if (EIO_MS_ASYNC != MS_SYNC
1564 || EIO_MS_INVALIDATE != MS_INVALIDATE
1565 || EIO_MS_SYNC != MS_SYNC)
1566 {
1567 flags = 0
1568 | (flags & EIO_MS_ASYNC ? MS_ASYNC : 0)
1569 | (flags & EIO_MS_INVALIDATE ? MS_INVALIDATE : 0)
1570 | (flags & EIO_MS_SYNC ? MS_SYNC : 0);
1571 }
1572
1573 return msync (mem, len, flags);
1574}
1575
1576#endif
1577
1578static int
1579eio__mtouch (eio_req *req)
1580{
1581 void *mem = req->ptr2;
1582 size_t len = req->size;
1583 int flags = req->int1;
1584
1585 eio_page_align (&mem, &len);
1586
1587 {
1588 intptr_t addr = (intptr_t)mem;
1589 intptr_t end = addr + len;
1590 intptr_t page = eio_pagesize ();
1591
1337 if (addr < end) 1592 if (addr < end)
1338 if (flags) /* modify */ 1593 if (flags & EIO_MT_MODIFY) /* modify */
1339 do { *((volatile sig_atomic_t *)addr) |= 0; } while ((addr += page) < len); 1594 do { *((volatile sig_atomic_t *)addr) |= 0; } while ((addr += page) < len && !EIO_CANCELLED (req));
1340 else 1595 else
1341 do { *((volatile sig_atomic_t *)addr) ; } while ((addr += page) < len); 1596 do { *((volatile sig_atomic_t *)addr) ; } while ((addr += page) < len && !EIO_CANCELLED (req));
1597 }
1342 1598
1343 return 0; 1599 return 0;
1344} 1600}
1345 1601
1346/*****************************************************************************/ 1602/*****************************************************************************/
1380 if (req) 1636 if (req)
1381 break; 1637 break;
1382 1638
1383 ++idle; 1639 ++idle;
1384 1640
1385 ts.tv_sec = time (0) + IDLE_TIMEOUT; 1641 ts.tv_sec = time (0) + idle_timeout;
1386 if (X_COND_TIMEDWAIT (reqwait, reqlock, ts) == ETIMEDOUT) 1642 if (X_COND_TIMEDWAIT (reqwait, reqlock, ts) == ETIMEDOUT)
1387 { 1643 {
1388 if (idle > max_idle) 1644 if (idle > max_idle)
1389 { 1645 {
1390 --idle; 1646 --idle;
1433 return 0; 1689 return 0;
1434} 1690}
1435 1691
1436/*****************************************************************************/ 1692/*****************************************************************************/
1437 1693
1694int ecb_cold
1438int eio_init (void (*want_poll)(void), void (*done_poll)(void)) 1695eio_init (void (*want_poll)(void), void (*done_poll)(void))
1439{ 1696{
1440 return etp_init (want_poll, done_poll); 1697 return etp_init (want_poll, done_poll);
1441} 1698}
1442 1699
1700ECB_INLINE void
1443static void eio_api_destroy (eio_req *req) 1701eio_api_destroy (eio_req *req)
1444{ 1702{
1445 free (req); 1703 free (req);
1446} 1704}
1447 1705
1448#define REQ(rtype) \ 1706#define REQ(rtype) \
1467 { \ 1725 { \
1468 eio_api_destroy (req); \ 1726 eio_api_destroy (req); \
1469 return 0; \ 1727 return 0; \
1470 } 1728 }
1471 1729
1730static void
1472static void eio_execute (etp_worker *self, eio_req *req) 1731eio_execute (etp_worker *self, eio_req *req)
1473{ 1732{
1474 errno = 0;
1475
1476 switch (req->type) 1733 switch (req->type)
1477 { 1734 {
1478 case EIO_READ: ALLOC (req->size); 1735 case EIO_READ: ALLOC (req->size);
1479 req->result = req->offs >= 0 1736 req->result = req->offs >= 0
1480 ? pread (req->int1, req->ptr2, req->size, req->offs) 1737 ? pread (req->int1, req->ptr2, req->size, req->offs)
1491 case EIO_LSTAT: ALLOC (sizeof (EIO_STRUCT_STAT)); 1748 case EIO_LSTAT: ALLOC (sizeof (EIO_STRUCT_STAT));
1492 req->result = lstat (req->ptr1, (EIO_STRUCT_STAT *)req->ptr2); break; 1749 req->result = lstat (req->ptr1, (EIO_STRUCT_STAT *)req->ptr2); break;
1493 case EIO_FSTAT: ALLOC (sizeof (EIO_STRUCT_STAT)); 1750 case EIO_FSTAT: ALLOC (sizeof (EIO_STRUCT_STAT));
1494 req->result = fstat (req->int1, (EIO_STRUCT_STAT *)req->ptr2); break; 1751 req->result = fstat (req->int1, (EIO_STRUCT_STAT *)req->ptr2); break;
1495 1752
1753 case EIO_STATVFS: ALLOC (sizeof (EIO_STRUCT_STATVFS));
1754 req->result = statvfs (req->ptr1, (EIO_STRUCT_STATVFS *)req->ptr2); break;
1755 case EIO_FSTATVFS: ALLOC (sizeof (EIO_STRUCT_STATVFS));
1756 req->result = fstatvfs (req->int1, (EIO_STRUCT_STATVFS *)req->ptr2); break;
1757
1496 case EIO_CHOWN: req->result = chown (req->ptr1, req->int2, req->int3); break; 1758 case EIO_CHOWN: req->result = chown (req->ptr1, req->int2, req->int3); break;
1497 case EIO_FCHOWN: req->result = fchown (req->int1, req->int2, req->int3); break; 1759 case EIO_FCHOWN: req->result = fchown (req->int1, req->int2, req->int3); break;
1498 case EIO_CHMOD: req->result = chmod (req->ptr1, (mode_t)req->int2); break; 1760 case EIO_CHMOD: req->result = chmod (req->ptr1, (mode_t)req->int2); break;
1499 case EIO_FCHMOD: req->result = fchmod (req->int1, (mode_t)req->int2); break; 1761 case EIO_FCHMOD: req->result = fchmod (req->int1, (mode_t)req->int2); break;
1500 case EIO_TRUNCATE: req->result = truncate (req->ptr1, req->offs); break; 1762 case EIO_TRUNCATE: req->result = truncate (req->ptr1, req->offs); break;
1507 case EIO_RMDIR: req->result = rmdir (req->ptr1); break; 1769 case EIO_RMDIR: req->result = rmdir (req->ptr1); break;
1508 case EIO_MKDIR: req->result = mkdir (req->ptr1, (mode_t)req->int2); break; 1770 case EIO_MKDIR: req->result = mkdir (req->ptr1, (mode_t)req->int2); break;
1509 case EIO_RENAME: req->result = rename (req->ptr1, req->ptr2); break; 1771 case EIO_RENAME: req->result = rename (req->ptr1, req->ptr2); break;
1510 case EIO_LINK: req->result = link (req->ptr1, req->ptr2); break; 1772 case EIO_LINK: req->result = link (req->ptr1, req->ptr2); break;
1511 case EIO_SYMLINK: req->result = symlink (req->ptr1, req->ptr2); break; 1773 case EIO_SYMLINK: req->result = symlink (req->ptr1, req->ptr2); break;
1512 case EIO_MKNOD: req->result = mknod (req->ptr1, (mode_t)req->int2, (dev_t)req->int3); break; 1774 case EIO_MKNOD: req->result = mknod (req->ptr1, (mode_t)req->int2, (dev_t)req->offs); break;
1513 1775
1514 case EIO_READLINK: ALLOC (NAME_MAX); 1776 case EIO_READLINK: ALLOC (PATH_MAX);
1515 req->result = readlink (req->ptr1, req->ptr2, NAME_MAX); break; 1777 req->result = readlink (req->ptr1, req->ptr2, PATH_MAX); break;
1516 1778
1517 case EIO_SYNC: req->result = 0; sync (); break; 1779 case EIO_SYNC: req->result = 0; sync (); break;
1518 case EIO_FSYNC: req->result = fsync (req->int1); break; 1780 case EIO_FSYNC: req->result = fsync (req->int1); break;
1519 case EIO_FDATASYNC: req->result = fdatasync (req->int1); break; 1781 case EIO_FDATASYNC: req->result = fdatasync (req->int1); break;
1520 case EIO_MSYNC: req->result = msync (req->ptr2, req->size, req->int1); break; 1782 case EIO_MSYNC: req->result = eio__msync (req->ptr2, req->size, req->int1); break;
1783 case EIO_MTOUCH: req->result = eio__mtouch (req); break;
1521 case EIO_MTOUCH: req->result = eio__mtouch (req->ptr2, req->size, req->int1); break; 1784 case EIO_MLOCK: req->result = eio__mlock (req->ptr2, req->size); break;
1785 case EIO_MLOCKALL: req->result = eio__mlockall (req->int1); break;
1522 case EIO_SYNC_FILE_RANGE: req->result = eio__sync_file_range (req->int1, req->offs, req->size, req->int2); break; 1786 case EIO_SYNC_FILE_RANGE: req->result = eio__sync_file_range (req->int1, req->offs, req->size, req->int2); break;
1523 1787
1524 case EIO_READDIR: eio__scandir (req, self); break; 1788 case EIO_READDIR: eio__scandir (req, self); break;
1525 1789
1526 case EIO_BUSY: 1790 case EIO_BUSY:
1527#ifdef _WIN32 1791#ifdef _WIN32
1528 Sleep (req->nv1 * 1000.); 1792 Sleep (req->nv1 * 1e3);
1529#else 1793#else
1530 { 1794 {
1531 struct timeval tv; 1795 struct timeval tv;
1532 1796
1533 tv.tv_sec = req->nv1; 1797 tv.tv_sec = req->nv1;
1534 tv.tv_usec = (req->nv1 - tv.tv_sec) * 1000000.; 1798 tv.tv_usec = (req->nv1 - tv.tv_sec) * 1e6;
1535 1799
1536 req->result = select (0, 0, 0, 0, &tv); 1800 req->result = select (0, 0, 0, 0, &tv);
1537 } 1801 }
1538#endif 1802#endif
1539 break; 1803 break;
1554 times = tv; 1818 times = tv;
1555 } 1819 }
1556 else 1820 else
1557 times = 0; 1821 times = 0;
1558 1822
1559
1560 req->result = req->type == EIO_FUTIME 1823 req->result = req->type == EIO_FUTIME
1561 ? futimes (req->int1, times) 1824 ? futimes (req->int1, times)
1562 : utimes (req->ptr1, times); 1825 : utimes (req->ptr1, times);
1563 } 1826 }
1564 break; 1827 break;
1569 case EIO_NOP: 1832 case EIO_NOP:
1570 req->result = 0; 1833 req->result = 0;
1571 break; 1834 break;
1572 1835
1573 case EIO_CUSTOM: 1836 case EIO_CUSTOM:
1574 ((void (*)(eio_req *))req->feed) (req); 1837 req->feed (req);
1575 break; 1838 break;
1576 1839
1577 default: 1840 default:
1841 errno = ENOSYS;
1578 req->result = -1; 1842 req->result = -1;
1579 break; 1843 break;
1580 } 1844 }
1581 1845
1582 req->errorno = errno; 1846 req->errorno = errno;
1612eio_req *eio_mtouch (void *addr, size_t length, int flags, int pri, eio_cb cb, void *data) 1876eio_req *eio_mtouch (void *addr, size_t length, int flags, int pri, eio_cb cb, void *data)
1613{ 1877{
1614 REQ (EIO_MTOUCH); req->ptr2 = addr; req->size = length; req->int1 = flags; SEND; 1878 REQ (EIO_MTOUCH); req->ptr2 = addr; req->size = length; req->int1 = flags; SEND;
1615} 1879}
1616 1880
1881eio_req *eio_mlock (void *addr, size_t length, int pri, eio_cb cb, void *data)
1882{
1883 REQ (EIO_MLOCK); req->ptr2 = addr; req->size = length; SEND;
1884}
1885
1886eio_req *eio_mlockall (int flags, int pri, eio_cb cb, void *data)
1887{
1888 REQ (EIO_MLOCKALL); req->int1 = flags; SEND;
1889}
1890
1617eio_req *eio_sync_file_range (int fd, off_t offset, size_t nbytes, unsigned int flags, int pri, eio_cb cb, void *data) 1891eio_req *eio_sync_file_range (int fd, off_t offset, size_t nbytes, unsigned int flags, int pri, eio_cb cb, void *data)
1618{ 1892{
1619 REQ (EIO_SYNC_FILE_RANGE); req->int1 = fd; req->offs = offset; req->size = nbytes; req->int2 = flags; SEND; 1893 REQ (EIO_SYNC_FILE_RANGE); req->int1 = fd; req->offs = offset; req->size = nbytes; req->int2 = flags; SEND;
1620} 1894}
1621 1895
1645} 1919}
1646 1920
1647eio_req *eio_fstat (int fd, int pri, eio_cb cb, void *data) 1921eio_req *eio_fstat (int fd, int pri, eio_cb cb, void *data)
1648{ 1922{
1649 REQ (EIO_FSTAT); req->int1 = fd; SEND; 1923 REQ (EIO_FSTAT); req->int1 = fd; SEND;
1924}
1925
1926eio_req *eio_fstatvfs (int fd, int pri, eio_cb cb, void *data)
1927{
1928 REQ (EIO_FSTATVFS); req->int1 = fd; SEND;
1650} 1929}
1651 1930
1652eio_req *eio_futime (int fd, double atime, double mtime, int pri, eio_cb cb, void *data) 1931eio_req *eio_futime (int fd, double atime, double mtime, int pri, eio_cb cb, void *data)
1653{ 1932{
1654 REQ (EIO_FUTIME); req->int1 = fd; req->nv1 = atime; req->nv2 = mtime; SEND; 1933 REQ (EIO_FUTIME); req->int1 = fd; req->nv1 = atime; req->nv2 = mtime; SEND;
1728eio_req *eio_lstat (const char *path, int pri, eio_cb cb, void *data) 2007eio_req *eio_lstat (const char *path, int pri, eio_cb cb, void *data)
1729{ 2008{
1730 return eio__1path (EIO_LSTAT, path, pri, cb, data); 2009 return eio__1path (EIO_LSTAT, path, pri, cb, data);
1731} 2010}
1732 2011
2012eio_req *eio_statvfs (const char *path, int pri, eio_cb cb, void *data)
2013{
2014 return eio__1path (EIO_STATVFS, path, pri, cb, data);
2015}
2016
1733eio_req *eio_unlink (const char *path, int pri, eio_cb cb, void *data) 2017eio_req *eio_unlink (const char *path, int pri, eio_cb cb, void *data)
1734{ 2018{
1735 return eio__1path (EIO_UNLINK, path, pri, cb, data); 2019 return eio__1path (EIO_UNLINK, path, pri, cb, data);
1736} 2020}
1737 2021
1745 REQ (EIO_READDIR); PATH; req->int1 = flags; SEND; 2029 REQ (EIO_READDIR); PATH; req->int1 = flags; SEND;
1746} 2030}
1747 2031
1748eio_req *eio_mknod (const char *path, mode_t mode, dev_t dev, int pri, eio_cb cb, void *data) 2032eio_req *eio_mknod (const char *path, mode_t mode, dev_t dev, int pri, eio_cb cb, void *data)
1749{ 2033{
1750 REQ (EIO_MKNOD); PATH; req->int2 = (long)mode; req->int3 = (long)dev; SEND; 2034 REQ (EIO_MKNOD); PATH; req->int2 = (long)mode; req->offs = (off_t)dev; SEND;
1751} 2035}
1752 2036
1753static eio_req * 2037static eio_req *
1754eio__2path (int type, const char *path, const char *new_path, int pri, eio_cb cb, void *data) 2038eio__2path (int type, const char *path, const char *new_path, int pri, eio_cb cb, void *data)
1755{ 2039{
1779eio_req *eio_rename (const char *path, const char *new_path, int pri, eio_cb cb, void *data) 2063eio_req *eio_rename (const char *path, const char *new_path, int pri, eio_cb cb, void *data)
1780{ 2064{
1781 return eio__2path (EIO_RENAME, path, new_path, pri, cb, data); 2065 return eio__2path (EIO_RENAME, path, new_path, pri, cb, data);
1782} 2066}
1783 2067
1784eio_req *eio_custom (eio_cb execute, int pri, eio_cb cb, void *data) 2068eio_req *eio_custom (void (*execute)(eio_req *), int pri, eio_cb cb, void *data)
1785{ 2069{
1786 REQ (EIO_CUSTOM); req->feed = (void (*)(eio_req *))execute; SEND; 2070 REQ (EIO_CUSTOM); req->feed = execute; SEND;
1787} 2071}
1788 2072
1789#endif 2073#endif
1790 2074
1791eio_req *eio_grp (eio_cb cb, void *data) 2075eio_req *eio_grp (eio_cb cb, void *data)
1800#undef SEND 2084#undef SEND
1801 2085
1802/*****************************************************************************/ 2086/*****************************************************************************/
1803/* grp functions */ 2087/* grp functions */
1804 2088
2089void
1805void eio_grp_feed (eio_req *grp, void (*feed)(eio_req *req), int limit) 2090eio_grp_feed (eio_req *grp, void (*feed)(eio_req *req), int limit)
1806{ 2091{
1807 grp->int2 = limit; 2092 grp->int2 = limit;
1808 grp->feed = feed; 2093 grp->feed = feed;
1809 2094
1810 grp_try_feed (grp); 2095 grp_try_feed (grp);
1811} 2096}
1812 2097
2098void
1813void eio_grp_limit (eio_req *grp, int limit) 2099eio_grp_limit (eio_req *grp, int limit)
1814{ 2100{
1815 grp->int2 = limit; 2101 grp->int2 = limit;
1816 2102
1817 grp_try_feed (grp); 2103 grp_try_feed (grp);
1818} 2104}
1819 2105
2106void
1820void eio_grp_add (eio_req *grp, eio_req *req) 2107eio_grp_add (eio_req *grp, eio_req *req)
1821{ 2108{
1822 assert (("cannot add requests to IO::AIO::GRP after the group finished", grp->int1 != 2)); 2109 assert (("cannot add requests to IO::AIO::GRP after the group finished", grp->int1 != 2));
1823 2110
1824 grp->flags |= EIO_FLAG_GROUPADD; 2111 grp->flags |= EIO_FLAG_GROUPADD;
1825 2112
1836} 2123}
1837 2124
1838/*****************************************************************************/ 2125/*****************************************************************************/
1839/* misc garbage */ 2126/* misc garbage */
1840 2127
2128ssize_t
1841ssize_t eio_sendfile_sync (int ofd, int ifd, off_t offset, size_t count) 2129eio_sendfile_sync (int ofd, int ifd, off_t offset, size_t count)
1842{ 2130{
1843 etp_worker wrk; 2131 etp_worker wrk;
1844 ssize_t ret; 2132 ssize_t ret;
1845 2133
1846 wrk.dbuf = 0; 2134 wrk.dbuf = 0;

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines