ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
(Generate patch)

Comparing IO-AIO/AIO.xs (file contents):
Revision 1.70 by root, Tue Oct 24 15:15:56 2006 UTC vs.
Revision 1.80 by root, Fri Oct 27 19:17:23 2006 UTC

1/* solaris */ 1/* solaris */
2#define _POSIX_PTHREAD_SEMANTICS 1 2#define _POSIX_PTHREAD_SEMANTICS 1
3 3
4#if __linux 4#if __linux && !defined(_GNU_SOURCE)
5# define _GNU_SOURCE 5# define _GNU_SOURCE
6#endif 6#endif
7 7
8/* just in case */
8#define _REENTRANT 1 9#define _REENTRANT 1
9 10
10#include <errno.h> 11#include <errno.h>
11 12
12#include "EXTERN.h" 13#include "EXTERN.h"
47/* used for struct dirent, AIX doesn't provide it */ 48/* used for struct dirent, AIX doesn't provide it */
48#ifndef NAME_MAX 49#ifndef NAME_MAX
49# define NAME_MAX 4096 50# define NAME_MAX 4096
50#endif 51#endif
51 52
53#ifndef PTHREAD_STACK_MIN
54/* care for broken platforms, e.g. windows */
55# define PTHREAD_STACK_MIN 16384
56#endif
57
52#if __ia64 58#if __ia64
53# define STACKSIZE 65536 59# define STACKSIZE 65536
54#elif __i386 || __x86_64 /* 16k is unreasonably high :( */ 60#elif __i386 || __x86_64 /* 16k is unreasonably high :( */
55# define STACKSIZE PTHREAD_STACK_MIN 61# define STACKSIZE PTHREAD_STACK_MIN
56#else 62#else
57# define STACKSIZE 16384 63# define STACKSIZE 16384
58#endif 64#endif
59 65
66/* wether word reads are potentially non-atomic.
67 * this is conservatice, likely most arches this runs
68 * on have atomic word read/writes.
69 */
70#ifndef WORDREAD_UNSAFE
71# if __i386 || __x86_64
72# define WORDREAD_UNSAFE 0
73# else
74# define WORDREAD_UNSAFE 1
75# endif
76#endif
77
60/* buffer size for various temporary buffers */ 78/* buffer size for various temporary buffers */
61#define AIO_BUFSIZE 65536 79#define AIO_BUFSIZE 65536
62 80
63#define dBUF \ 81#define dBUF \
82 char *aio_buf; \
83 LOCK (wrklock); \
64 char *aio_buf = malloc (AIO_BUFSIZE); \ 84 self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \
85 UNLOCK (wrklock); \
65 if (!aio_buf) \ 86 if (!aio_buf) \
66 return -1; 87 return -1;
67
68#define fBUF free (aio_buf)
69 88
70enum { 89enum {
71 REQ_QUIT, 90 REQ_QUIT,
72 REQ_OPEN, REQ_CLOSE, 91 REQ_OPEN, REQ_CLOSE,
73 REQ_READ, REQ_WRITE, REQ_READAHEAD, 92 REQ_READ, REQ_WRITE, REQ_READAHEAD,
125 NUM_PRI = PRI_MAX + PRI_BIAS + 1, 144 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
126}; 145};
127 146
128static int next_pri = DEFAULT_PRI + PRI_BIAS; 147static int next_pri = DEFAULT_PRI + PRI_BIAS;
129 148
130static int started, wanted; 149static unsigned int started, wanted;
131static volatile int nreqs;
132static int max_outstanding = 1<<30;
133static int respipe [2];
134 150
135#if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP) 151#if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)
136# define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP 152# define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
137#else 153#else
138# define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER 154# define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
139#endif 155#endif
140 156
157#define LOCK(mutex) pthread_mutex_lock (&(mutex))
158#define UNLOCK(mutex) pthread_mutex_unlock (&(mutex))
159
160/* worker threads management */
161static pthread_mutex_t wrklock = AIO_MUTEX_INIT;
162
163typedef struct worker {
164 /* locked by wrklock */
165 struct worker *prev, *next;
166
167 pthread_t tid;
168
169 /* locked by reslock, reqlock or wrklock */
170 aio_req req; /* currently processed request */
171 void *dbuf;
172 DIR *dirp;
173} worker;
174
175static worker wrk_first = { &wrk_first, &wrk_first, 0 };
176
177static void worker_clear (worker *wrk)
178{
179 if (wrk->dirp)
180 {
181 closedir (wrk->dirp);
182 wrk->dirp = 0;
183 }
184
185 if (wrk->dbuf)
186 {
187 free (wrk->dbuf);
188 wrk->dbuf = 0;
189 }
190}
191
192static void worker_free (worker *wrk)
193{
194 wrk->next->prev = wrk->prev;
195 wrk->prev->next = wrk->next;
196
197 free (wrk);
198}
199
200static volatile unsigned int nreqs, nready, npending;
201static volatile unsigned int max_outstanding = 0xffffffff;
202static int respipe [2];
203
141static pthread_mutex_t reslock = AIO_MUTEX_INIT; 204static pthread_mutex_t reslock = AIO_MUTEX_INIT;
142static pthread_mutex_t reqlock = AIO_MUTEX_INIT; 205static pthread_mutex_t reqlock = AIO_MUTEX_INIT;
143static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER; 206static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER;
207
208#if WORDREAD_UNSAFE
209
210static unsigned int get_nready ()
211{
212 unsigned int retval;
213
214 LOCK (reqlock);
215 retval = nready;
216 UNLOCK (reqlock);
217
218 return retval;
219}
220
221static unsigned int get_npending ()
222{
223 unsigned int retval;
224
225 LOCK (reslock);
226 retval = npending;
227 UNLOCK (reslock);
228
229 return retval;
230}
231
232#else
233
234# define get_nready() nready
235# define get_npending() npending
236
237#endif
144 238
145/* 239/*
146 * a somewhat faster data structure might be nice, but 240 * a somewhat faster data structure might be nice, but
147 * with 8 priorities this actually needs <20 insns 241 * with 8 priorities this actually needs <20 insns
148 * per shift, the most expensive operation. 242 * per shift, the most expensive operation.
194 } 288 }
195 289
196 abort (); 290 abort ();
197} 291}
198 292
293static int poll_cb (int max);
199static void req_invoke (aio_req req); 294static void req_invoke (aio_req req);
200static void req_free (aio_req req); 295static void req_free (aio_req req);
296static void req_cancel (aio_req req);
201 297
202/* must be called at most once */ 298/* must be called at most once */
203static SV *req_sv (aio_req req, const char *klass) 299static SV *req_sv (aio_req req, const char *klass)
204{ 300{
205 if (!req->self) 301 if (!req->self)
267 req_invoke (grp); 363 req_invoke (grp);
268 req_free (grp); 364 req_free (grp);
269 } 365 }
270} 366}
271 367
272static void poll_wait ()
273{
274 fd_set rfd;
275
276 while (nreqs)
277 {
278 int size;
279#if !(__i386 || __x86_64) /* safe without sempahore on this archs */
280 pthread_mutex_lock (&reslock);
281#endif
282 size = res_queue.size;
283#if !(__i386 || __x86_64) /* safe without sempahore on this archs */
284 pthread_mutex_unlock (&reslock);
285#endif
286
287 if (size)
288 return;
289
290 FD_ZERO(&rfd);
291 FD_SET(respipe [0], &rfd);
292
293 select (respipe [0] + 1, &rfd, 0, 0, 0);
294 }
295}
296
297static void req_invoke (aio_req req) 368static void req_invoke (aio_req req)
298{ 369{
299 dSP; 370 dSP;
300 371
301 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback)) 372 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
302 { 373 {
303 errno = req->errorno;
304
305 ENTER; 374 ENTER;
306 SAVETMPS; 375 SAVETMPS;
307 PUSHMARK (SP); 376 PUSHMARK (SP);
308 EXTEND (SP, 1); 377 EXTEND (SP, 1);
309 378
313 { 382 {
314 SV *rv = &PL_sv_undef; 383 SV *rv = &PL_sv_undef;
315 384
316 if (req->result >= 0) 385 if (req->result >= 0)
317 { 386 {
387 int i;
318 char *buf = req->data2ptr; 388 char *buf = req->data2ptr;
319 AV *av = newAV (); 389 AV *av = newAV ();
320 390
321 while (req->result) 391 av_extend (av, req->result - 1);
392
393 for (i = 0; i < req->result; ++i)
322 { 394 {
323 SV *sv = newSVpv (buf, 0); 395 SV *sv = newSVpv (buf, 0);
324 396
325 av_push (av, sv); 397 av_store (av, i, sv);
326 buf += SvCUR (sv) + 1; 398 buf += SvCUR (sv) + 1;
327 req->result--;
328 } 399 }
329 400
330 rv = sv_2mortal (newRV_noinc ((SV *)av)); 401 rv = sv_2mortal (newRV_noinc ((SV *)av));
331 } 402 }
332 403
372 default: 443 default:
373 PUSHs (sv_2mortal (newSViv (req->result))); 444 PUSHs (sv_2mortal (newSViv (req->result)));
374 break; 445 break;
375 } 446 }
376 447
448 errno = req->errorno;
377 449
378 PUTBACK; 450 PUTBACK;
379 call_sv (req->callback, G_VOID | G_EVAL); 451 call_sv (req->callback, G_VOID | G_EVAL);
380 SPAGAIN; 452 SPAGAIN;
381 453
416 SvREFCNT_dec (req->fh); 488 SvREFCNT_dec (req->fh);
417 SvREFCNT_dec (req->fh2); 489 SvREFCNT_dec (req->fh2);
418 SvREFCNT_dec (req->callback); 490 SvREFCNT_dec (req->callback);
419 Safefree (req->statdata); 491 Safefree (req->statdata);
420 492
421 if (req->type == REQ_READDIR && req->result >= 0) 493 if (req->type == REQ_READDIR)
422 free (req->data2ptr); 494 free (req->data2ptr);
423 495
424 Safefree (req); 496 Safefree (req);
425} 497}
426 498
499static void req_cancel_subs (aio_req grp)
500{
501 aio_req sub;
502
503 if (grp->type != REQ_GROUP)
504 return;
505
506 SvREFCNT_dec (grp->fh2);
507 grp->fh2 = 0;
508
509 for (sub = grp->grp_first; sub; sub = sub->grp_next)
510 req_cancel (sub);
511}
512
427static void req_cancel (aio_req req) 513static void req_cancel (aio_req req)
428{ 514{
429 req->flags |= FLAG_CANCELLED; 515 req->flags |= FLAG_CANCELLED;
430 516
431 if (req->type == REQ_GROUP) 517 req_cancel_subs (req);
432 { 518}
433 aio_req sub;
434 519
435 for (sub = req->grp_first; sub; sub = sub->grp_next) 520static void *aio_proc(void *arg);
436 req_cancel (sub); 521
522static void start_thread (void)
523{
524 sigset_t fullsigset, oldsigset;
525 pthread_attr_t attr;
526
527 worker *wrk = calloc (1, sizeof (worker));
528
529 if (!wrk)
530 croak ("unable to allocate worker thread data");
531
532 pthread_attr_init (&attr);
533 pthread_attr_setstacksize (&attr, STACKSIZE);
534 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
535#ifdef PTHREAD_SCOPE_PROCESS
536 pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS);
537#endif
538
539 sigfillset (&fullsigset);
540
541 LOCK (wrklock);
542 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
543
544 if (pthread_create (&wrk->tid, &attr, aio_proc, (void *)wrk) == 0)
437 } 545 {
438} 546 wrk->prev = &wrk_first;
547 wrk->next = wrk_first.next;
548 wrk_first.next->prev = wrk;
549 wrk_first.next = wrk;
550 ++started;
551 }
552 else
553 free (wrk);
439 554
555 sigprocmask (SIG_SETMASK, &oldsigset, 0);
556 UNLOCK (wrklock);
557}
558
559static void maybe_start_thread ()
560{
561#if 0
562 static struct timeval last;
563 struct timeval diff, now;
564#endif
565
566 if (started >= wanted)
567 return;
568
569 if (nready <= nreqs - get_nready () - get_npending ())
570 return;
571
572#if 0
573 gettimeofday (&now, 0);
574
575 diff.tv_sec = now.tv_sec - last.tv_sec;
576 diff.tv_usec = now.tv_usec - last.tv_usec;
577
578 if (diff.tv_usec < 0)
579 {
580 --diff.tv_sec;
581 diff.tv_usec += 1000000;
582 }
583
584 if (!diff.tv_sec && diff.tv_usec < 10000)
585 return;
586
587 last = now;
588#endif
589
590 start_thread ();
591}
592
593static void req_send (aio_req req)
594{
595 ++nreqs;
596
597 LOCK (reqlock);
598 ++nready;
599 reqq_push (&req_queue, req);
600 pthread_cond_signal (&reqwait);
601 UNLOCK (reqlock);
602
603 maybe_start_thread ();
604}
605
606static void end_thread (void)
607{
608 aio_req req;
609
610 Newz (0, req, 1, aio_cb);
611
612 req->type = REQ_QUIT;
613 req->pri = PRI_MAX + PRI_BIAS;
614
615 req_send (req);
616
617 LOCK (wrklock);
618 --started;
619 UNLOCK (wrklock);
620}
621
622static void min_parallel (int nthreads)
623{
624 if (wanted < nthreads)
625 wanted = nthreads;
626}
627
628static void max_parallel (int nthreads)
629{
630 if (wanted > nthreads)
631 wanted = nthreads;
632
633 while (started > wanted)
634 end_thread ();
635}
636
637static void poll_wait ()
638{
639 fd_set rfd;
640
641 while (nreqs)
642 {
643 int size;
644 if (WORDREAD_UNSAFE) LOCK (reslock);
645 size = res_queue.size;
646 if (WORDREAD_UNSAFE) UNLOCK (reslock);
647
648 if (size)
649 return;
650
651 maybe_start_thread ();
652
653 FD_ZERO(&rfd);
654 FD_SET(respipe [0], &rfd);
655
656 select (respipe [0] + 1, &rfd, 0, 0, 0);
657 }
658}
659
440static int poll_cb () 660static int poll_cb (int max)
441{ 661{
442 dSP; 662 dSP;
443 int count = 0; 663 int count = 0;
444 int do_croak = 0; 664 int do_croak = 0;
445 aio_req req; 665 aio_req req;
446 666
447 for (;;) 667 for (;;)
448 { 668 {
449 pthread_mutex_lock (&reslock); 669 while (max <= 0 || count < max)
450 req = reqq_shift (&res_queue);
451
452 if (req)
453 { 670 {
671 maybe_start_thread ();
672
673 LOCK (reslock);
674 req = reqq_shift (&res_queue);
675
454 if (!res_queue.size) 676 if (req)
455 { 677 {
678 --npending;
679
680 if (!res_queue.size)
681 {
456 /* read any signals sent by the worker threads */ 682 /* read any signals sent by the worker threads */
457 char buf [32]; 683 char buf [32];
458 while (read (respipe [0], buf, 32) == 32) 684 while (read (respipe [0], buf, 32) == 32)
685 ;
459 ; 686 }
460 } 687 }
688
689 UNLOCK (reslock);
690
691 if (!req)
692 break;
693
694 --nreqs;
695
696 if (req->type == REQ_GROUP && req->length)
697 {
698 req->fd = 1; /* mark request as delayed */
699 continue;
700 }
701 else
702 {
703 if (req->type == REQ_READ)
704 SvCUR_set (req->data, req->dataoffset + (req->result > 0 ? req->result : 0));
705
706 if (req->data2ptr && (req->type == REQ_READ || req->type == REQ_WRITE))
707 SvREADONLY_off (req->data);
708
709 if (req->statdata)
710 {
711 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
712 PL_laststatval = req->result;
713 PL_statcache = *(req->statdata);
714 }
715
716 req_invoke (req);
717
718 count++;
719 }
720
721 req_free (req);
461 } 722 }
462 723
463 pthread_mutex_unlock (&reslock); 724 if (nreqs <= max_outstanding)
464
465 if (!req)
466 break; 725 break;
467 726
468 --nreqs; 727 poll_wait ();
469 728
470 if (req->type == REQ_QUIT) 729 max = 0;
471 started--;
472 else if (req->type == REQ_GROUP && req->length)
473 {
474 req->fd = 1; /* mark request as delayed */
475 continue;
476 }
477 else
478 {
479 if (req->type == REQ_READ)
480 SvCUR_set (req->data, req->dataoffset + (req->result > 0 ? req->result : 0));
481
482 if (req->data2ptr && (req->type == REQ_READ || req->type == REQ_WRITE))
483 SvREADONLY_off (req->data);
484
485 if (req->statdata)
486 {
487 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
488 PL_laststatval = req->result;
489 PL_statcache = *(req->statdata);
490 }
491
492 req_invoke (req);
493
494 count++;
495 }
496
497 req_free (req);
498 } 730 }
499 731
500 return count; 732 return count;
501}
502
503static void *aio_proc(void *arg);
504
505static void start_thread (void)
506{
507 sigset_t fullsigset, oldsigset;
508 pthread_t tid;
509 pthread_attr_t attr;
510
511 pthread_attr_init (&attr);
512 pthread_attr_setstacksize (&attr, STACKSIZE);
513 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
514
515 sigfillset (&fullsigset);
516 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
517
518 if (pthread_create (&tid, &attr, aio_proc, 0) == 0)
519 started++;
520
521 sigprocmask (SIG_SETMASK, &oldsigset, 0);
522}
523
524static void req_send (aio_req req)
525{
526 while (started < wanted && nreqs >= started)
527 start_thread ();
528
529 ++nreqs;
530
531 pthread_mutex_lock (&reqlock);
532 reqq_push (&req_queue, req);
533 pthread_cond_signal (&reqwait);
534 pthread_mutex_unlock (&reqlock);
535
536 if (nreqs > max_outstanding)
537 for (;;)
538 {
539 poll_cb ();
540
541 if (nreqs <= max_outstanding)
542 break;
543
544 poll_wait ();
545 }
546}
547
548static void end_thread (void)
549{
550 aio_req req;
551
552 Newz (0, req, 1, aio_cb);
553
554 req->type = REQ_QUIT;
555 req->pri = PRI_MAX + PRI_BIAS;
556
557 req_send (req);
558}
559
560static void min_parallel (int nthreads)
561{
562 if (wanted < nthreads)
563 wanted = nthreads;
564}
565
566static void max_parallel (int nthreads)
567{
568 int cur = started;
569
570 if (wanted > nthreads)
571 wanted = nthreads;
572
573 while (cur > wanted)
574 {
575 end_thread ();
576 cur--;
577 }
578
579 while (started > wanted)
580 {
581 poll_wait ();
582 poll_cb ();
583 }
584} 733}
585 734
586static void create_pipe () 735static void create_pipe ()
587{ 736{
588 if (pipe (respipe)) 737 if (pipe (respipe))
612static ssize_t pread (int fd, void *buf, size_t count, off_t offset) 761static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
613{ 762{
614 ssize_t res; 763 ssize_t res;
615 off_t ooffset; 764 off_t ooffset;
616 765
617 pthread_mutex_lock (&preadwritelock); 766 LOCK (preadwritelock);
618 ooffset = lseek (fd, 0, SEEK_CUR); 767 ooffset = lseek (fd, 0, SEEK_CUR);
619 lseek (fd, offset, SEEK_SET); 768 lseek (fd, offset, SEEK_SET);
620 res = read (fd, buf, count); 769 res = read (fd, buf, count);
621 lseek (fd, ooffset, SEEK_SET); 770 lseek (fd, ooffset, SEEK_SET);
622 pthread_mutex_unlock (&preadwritelock); 771 UNLOCK (preadwritelock);
623 772
624 return res; 773 return res;
625} 774}
626 775
627static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset) 776static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
628{ 777{
629 ssize_t res; 778 ssize_t res;
630 off_t ooffset; 779 off_t ooffset;
631 780
632 pthread_mutex_lock (&preadwritelock); 781 LOCK (preadwritelock);
633 ooffset = lseek (fd, 0, SEEK_CUR); 782 ooffset = lseek (fd, 0, SEEK_CUR);
634 lseek (fd, offset, SEEK_SET); 783 lseek (fd, offset, SEEK_SET);
635 res = write (fd, buf, count); 784 res = write (fd, buf, count);
636 lseek (fd, offset, SEEK_SET); 785 lseek (fd, offset, SEEK_SET);
637 pthread_mutex_unlock (&preadwritelock); 786 UNLOCK (preadwritelock);
638 787
639 return res; 788 return res;
640} 789}
641#endif 790#endif
642 791
643#if !HAVE_FDATASYNC 792#if !HAVE_FDATASYNC
644# define fdatasync fsync 793# define fdatasync fsync
645#endif 794#endif
646 795
647#if !HAVE_READAHEAD 796#if !HAVE_READAHEAD
648# define readahead aio_readahead 797# define readahead(fd,offset,count) aio_readahead (fd, offset, count, self)
649 798
650static ssize_t readahead (int fd, off_t offset, size_t count) 799static ssize_t aio_readahead (int fd, off_t offset, size_t count, worker *self)
651{ 800{
652 dBUF; 801 dBUF;
653 802
654 while (count > 0) 803 while (count > 0)
655 { 804 {
658 pread (fd, aio_buf, len, offset); 807 pread (fd, aio_buf, len, offset);
659 offset += len; 808 offset += len;
660 count -= len; 809 count -= len;
661 } 810 }
662 811
663 fBUF;
664
665 errno = 0; 812 errno = 0;
666} 813}
814
667#endif 815#endif
668 816
669#if !HAVE_READDIR_R 817#if !HAVE_READDIR_R
670# define readdir_r aio_readdir_r 818# define readdir_r aio_readdir_r
671 819
674static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res) 822static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res)
675{ 823{
676 struct dirent *e; 824 struct dirent *e;
677 int errorno; 825 int errorno;
678 826
679 pthread_mutex_lock (&readdirlock); 827 LOCK (readdirlock);
680 828
681 e = readdir (dirp); 829 e = readdir (dirp);
682 errorno = errno; 830 errorno = errno;
683 831
684 if (e) 832 if (e)
687 strcpy (ent->d_name, e->d_name); 835 strcpy (ent->d_name, e->d_name);
688 } 836 }
689 else 837 else
690 *res = 0; 838 *res = 0;
691 839
692 pthread_mutex_unlock (&readdirlock); 840 UNLOCK (readdirlock);
693 841
694 errno = errorno; 842 errno = errorno;
695 return e ? 0 : -1; 843 return e ? 0 : -1;
696} 844}
697#endif 845#endif
698 846
699/* sendfile always needs emulation */ 847/* sendfile always needs emulation */
700static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count) 848static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self)
701{ 849{
702 ssize_t res; 850 ssize_t res;
703 851
704 if (!count) 852 if (!count)
705 return 0; 853 return 0;
716 { 864 {
717 off_t sbytes; 865 off_t sbytes;
718 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0); 866 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0);
719 867
720 if (res < 0 && sbytes) 868 if (res < 0 && sbytes)
721 /* maybe only on EAGAIN only: as usual, the manpage leaves you guessing */ 869 /* maybe only on EAGAIN: as usual, the manpage leaves you guessing */
722 res = sbytes; 870 res = sbytes;
723 } 871 }
724 872
725# elif __hpux 873# elif __hpux
726 res = sendfile (ofd, ifd, offset, count, 0, 0); 874 res = sendfile (ofd, ifd, offset, count, 0, 0);
782 930
783 offset += cnt; 931 offset += cnt;
784 res += cnt; 932 res += cnt;
785 count -= cnt; 933 count -= cnt;
786 } 934 }
787
788 fBUF;
789 } 935 }
790 936
791 return res; 937 return res;
792} 938}
793 939
794/* read a full directory */ 940/* read a full directory */
795static int scandir_ (const char *path, void **namesp) 941static void scandir_ (aio_req req, worker *self)
796{ 942{
797 DIR *dirp; 943 DIR *dirp;
798 union 944 union
799 { 945 {
800 struct dirent d; 946 struct dirent d;
805 int memlen = 4096; 951 int memlen = 4096;
806 int memofs = 0; 952 int memofs = 0;
807 int res = 0; 953 int res = 0;
808 int errorno; 954 int errorno;
809 955
810 dirp = opendir (path); 956 LOCK (wrklock);
811 if (!dirp) 957 self->dirp = dirp = opendir (req->dataptr);
812 return -1;
813
814 u = malloc (sizeof (*u)); 958 self->dbuf = u = malloc (sizeof (*u));
815 names = malloc (memlen); 959 req->data2ptr = names = malloc (memlen);
960 UNLOCK (wrklock);
816 961
817 if (u && names) 962 if (dirp && u && names)
818 for (;;) 963 for (;;)
819 { 964 {
820 errno = 0; 965 errno = 0;
821 readdir_r (dirp, &u->d, &entp); 966 readdir_r (dirp, &u->d, &entp);
822 967
832 res++; 977 res++;
833 978
834 while (memofs + len > memlen) 979 while (memofs + len > memlen)
835 { 980 {
836 memlen *= 2; 981 memlen *= 2;
982 LOCK (wrklock);
837 names = realloc (names, memlen); 983 req->data2ptr = names = realloc (names, memlen);
984 UNLOCK (wrklock);
985
838 if (!names) 986 if (!names)
839 break; 987 break;
840 } 988 }
841 989
842 memcpy (names + memofs, name, len); 990 memcpy (names + memofs, name, len);
843 memofs += len; 991 memofs += len;
844 } 992 }
845 } 993 }
846 994
847 errorno = errno;
848 free (u);
849 closedir (dirp);
850
851 if (errorno) 995 if (errno)
852 {
853 free (names);
854 errno = errorno;
855 res = -1; 996 res = -1;
856 } 997
857 998 req->result = res;
858 *namesp = (void *)names;
859 return res;
860} 999}
861 1000
862/*****************************************************************************/ 1001/*****************************************************************************/
863 1002
864static void *aio_proc (void *thr_arg) 1003static void *aio_proc (void *thr_arg)
865{ 1004{
866 aio_req req; 1005 aio_req req;
867 int type; 1006 worker *self = (worker *)thr_arg;
868 1007
869 do 1008 for (;;)
870 { 1009 {
871 pthread_mutex_lock (&reqlock); 1010 LOCK (reqlock);
872 1011
873 for (;;) 1012 for (;;)
874 { 1013 {
875 req = reqq_shift (&req_queue); 1014 self->req = req = reqq_shift (&req_queue);
876 1015
877 if (req) 1016 if (req)
878 break; 1017 break;
879 1018
880 pthread_cond_wait (&reqwait, &reqlock); 1019 pthread_cond_wait (&reqwait, &reqlock);
881 } 1020 }
882 1021
883 pthread_mutex_unlock (&reqlock); 1022 --nready;
1023
1024 UNLOCK (reqlock);
884 1025
885 errno = 0; /* strictly unnecessary */ 1026 errno = 0; /* strictly unnecessary */
886 type = req->type; /* remember type for QUIT check */
887 1027
888 if (!(req->flags & FLAG_CANCELLED)) 1028 if (!(req->flags & FLAG_CANCELLED))
889 switch (type) 1029 switch (req->type)
890 { 1030 {
891 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break; 1031 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break;
892 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break; 1032 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break;
893 1033
894 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break; 1034 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break;
895 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length); break; 1035 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length, self); break;
896 1036
897 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break; 1037 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break;
898 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break; 1038 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break;
899 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break; 1039 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break;
900 1040
906 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break; 1046 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break;
907 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break; 1047 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break;
908 1048
909 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break; 1049 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break;
910 case REQ_FSYNC: req->result = fsync (req->fd); break; 1050 case REQ_FSYNC: req->result = fsync (req->fd); break;
911 case REQ_READDIR: req->result = scandir_ (req->dataptr, &req->data2ptr); break; 1051 case REQ_READDIR: scandir_ (req, self); break;
912 1052
913 case REQ_BUSY: 1053 case REQ_BUSY:
914 { 1054 {
915 struct timeval tv; 1055 struct timeval tv;
916 1056
920 req->result = select (0, 0, 0, 0, &tv); 1060 req->result = select (0, 0, 0, 0, &tv);
921 } 1061 }
922 1062
923 case REQ_GROUP: 1063 case REQ_GROUP:
924 case REQ_NOP: 1064 case REQ_NOP:
1065 break;
1066
925 case REQ_QUIT: 1067 case REQ_QUIT:
1068 LOCK (wrklock);
1069 worker_free (self);
1070 --started;
1071 UNLOCK (wrklock);
926 break; 1072 return 0;
927 1073
928 default: 1074 default:
929 req->result = ENOSYS; 1075 req->result = ENOSYS;
930 break; 1076 break;
931 } 1077 }
932 1078
933 req->errorno = errno; 1079 req->errorno = errno;
934 1080
935 pthread_mutex_lock (&reslock); 1081 LOCK (reslock);
1082
1083 ++npending;
936 1084
937 if (!reqq_push (&res_queue, req)) 1085 if (!reqq_push (&res_queue, req))
938 /* write a dummy byte to the pipe so fh becomes ready */ 1086 /* write a dummy byte to the pipe so fh becomes ready */
939 write (respipe [1], &respipe, 1); 1087 write (respipe [1], &respipe, 1);
940 1088
941 pthread_mutex_unlock (&reslock); 1089 self->req = 0;
942 } 1090 worker_clear (self);
943 while (type != REQ_QUIT);
944 1091
945 return 0; 1092 UNLOCK (reslock);
1093 }
946} 1094}
947 1095
948/*****************************************************************************/ 1096/*****************************************************************************/
949 1097
950static void atfork_prepare (void) 1098static void atfork_prepare (void)
951{ 1099{
952 pthread_mutex_lock (&reqlock); 1100 LOCK (wrklock);
953 pthread_mutex_lock (&reslock); 1101 LOCK (reqlock);
1102 LOCK (reslock);
954#if !HAVE_PREADWRITE 1103#if !HAVE_PREADWRITE
955 pthread_mutex_lock (&preadwritelock); 1104 LOCK (preadwritelock);
956#endif 1105#endif
957#if !HAVE_READDIR_R 1106#if !HAVE_READDIR_R
958 pthread_mutex_lock (&readdirlock); 1107 LOCK (readdirlock);
959#endif 1108#endif
960} 1109}
961 1110
962static void atfork_parent (void) 1111static void atfork_parent (void)
963{ 1112{
964#if !HAVE_READDIR_R 1113#if !HAVE_READDIR_R
965 pthread_mutex_unlock (&readdirlock); 1114 UNLOCK (readdirlock);
966#endif 1115#endif
967#if !HAVE_PREADWRITE 1116#if !HAVE_PREADWRITE
968 pthread_mutex_unlock (&preadwritelock); 1117 UNLOCK (preadwritelock);
969#endif 1118#endif
970 pthread_mutex_unlock (&reslock); 1119 UNLOCK (reslock);
971 pthread_mutex_unlock (&reqlock); 1120 UNLOCK (reqlock);
1121 UNLOCK (wrklock);
972} 1122}
973 1123
974static void atfork_child (void) 1124static void atfork_child (void)
975{ 1125{
976 aio_req prv; 1126 aio_req prv;
977
978 started = 0;
979 1127
980 while (prv = reqq_shift (&req_queue)) 1128 while (prv = reqq_shift (&req_queue))
981 req_free (prv); 1129 req_free (prv);
982 1130
983 while (prv = reqq_shift (&res_queue)) 1131 while (prv = reqq_shift (&res_queue))
984 req_free (prv); 1132 req_free (prv);
985 1133
1134 while (wrk_first.next != &wrk_first)
1135 {
1136 worker *wrk = wrk_first.next;
1137
1138 if (wrk->req)
1139 req_free (wrk->req);
1140
1141 worker_clear (wrk);
1142 worker_free (wrk);
1143 }
1144
1145 started = 0;
1146 nreqs = 0;
1147
986 close (respipe [0]); 1148 close (respipe [0]);
987 close (respipe [1]); 1149 close (respipe [1]);
988 create_pipe (); 1150 create_pipe ();
989 1151
990 atfork_parent (); 1152 atfork_parent ();
1022 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY)); 1184 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY));
1023 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY)); 1185 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY));
1024 1186
1025 create_pipe (); 1187 create_pipe ();
1026 pthread_atfork (atfork_prepare, atfork_parent, atfork_child); 1188 pthread_atfork (atfork_prepare, atfork_parent, atfork_child);
1027}
1028 1189
1190 start_thread ();
1191}
1192
1029void 1193void
1030min_parallel (nthreads) 1194min_parallel (int nthreads)
1031 int nthreads
1032 PROTOTYPE: $ 1195 PROTOTYPE: $
1033 1196
1034void 1197void
1035max_parallel (nthreads) 1198max_parallel (int nthreads)
1036 int nthreads
1037 PROTOTYPE: $ 1199 PROTOTYPE: $
1038 1200
1039int 1201int
1040max_outstanding (nreqs) 1202max_outstanding (int maxreqs)
1041 int nreqs 1203 PROTOTYPE: $
1042 PROTOTYPE: $
1043 CODE: 1204 CODE:
1044 RETVAL = max_outstanding; 1205 RETVAL = max_outstanding;
1045 max_outstanding = nreqs; 1206 max_outstanding = maxreqs;
1207 OUTPUT:
1208 RETVAL
1046 1209
1047void 1210void
1048aio_open (pathname,flags,mode,callback=&PL_sv_undef) 1211aio_open (pathname,flags,mode,callback=&PL_sv_undef)
1049 SV * pathname 1212 SV * pathname
1050 int flags 1213 int flags
1305 req->type = REQ_NOP; 1468 req->type = REQ_NOP;
1306 1469
1307 REQ_SEND; 1470 REQ_SEND;
1308} 1471}
1309 1472
1310void 1473int
1311aioreq_pri (int pri = DEFAULT_PRI) 1474aioreq_pri (int pri = 0)
1312 CODE: 1475 PROTOTYPE: ;$
1476 CODE:
1477 RETVAL = next_pri - PRI_BIAS;
1478 if (items > 0)
1479 {
1313 if (pri < PRI_MIN) pri = PRI_MIN; 1480 if (pri < PRI_MIN) pri = PRI_MIN;
1314 if (pri > PRI_MAX) pri = PRI_MAX; 1481 if (pri > PRI_MAX) pri = PRI_MAX;
1315 next_pri = pri + PRI_BIAS; 1482 next_pri = pri + PRI_BIAS;
1483 }
1484 OUTPUT:
1485 RETVAL
1316 1486
1317void 1487void
1318aioreq_nice (int nice = 0) 1488aioreq_nice (int nice = 0)
1319 CODE: 1489 CODE:
1320 nice = next_pri - nice; 1490 nice = next_pri - nice;
1321 if (nice < PRI_MIN) nice = PRI_MIN; 1491 if (nice < PRI_MIN) nice = PRI_MIN;
1322 if (nice > PRI_MAX) nice = PRI_MAX; 1492 if (nice > PRI_MAX) nice = PRI_MAX;
1323 next_pri = nice + PRI_BIAS; 1493 next_pri = nice + PRI_BIAS;
1324 1494
1325void 1495void
1326flush () 1496flush ()
1327 PROTOTYPE: 1497 PROTOTYPE:
1328 CODE: 1498 CODE:
1329 while (nreqs) 1499 while (nreqs)
1330 { 1500 {
1331 poll_wait (); 1501 poll_wait ();
1332 poll_cb (); 1502 poll_cb (0);
1333 } 1503 }
1334 1504
1335void 1505void
1336poll() 1506poll()
1337 PROTOTYPE: 1507 PROTOTYPE:
1338 CODE: 1508 CODE:
1339 if (nreqs) 1509 if (nreqs)
1340 { 1510 {
1341 poll_wait (); 1511 poll_wait ();
1342 poll_cb (); 1512 poll_cb (0);
1343 } 1513 }
1344 1514
1345int 1515int
1346poll_fileno() 1516poll_fileno()
1347 PROTOTYPE: 1517 PROTOTYPE:
1352 1522
1353int 1523int
1354poll_cb(...) 1524poll_cb(...)
1355 PROTOTYPE: 1525 PROTOTYPE:
1356 CODE: 1526 CODE:
1357 RETVAL = poll_cb (); 1527 RETVAL = poll_cb (0);
1528 OUTPUT:
1529 RETVAL
1530
1531int
1532poll_some(int max = 0)
1533 PROTOTYPE: $
1534 CODE:
1535 RETVAL = poll_cb (max);
1358 OUTPUT: 1536 OUTPUT:
1359 RETVAL 1537 RETVAL
1360 1538
1361void 1539void
1362poll_wait() 1540poll_wait()
1371 CODE: 1549 CODE:
1372 RETVAL = nreqs; 1550 RETVAL = nreqs;
1373 OUTPUT: 1551 OUTPUT:
1374 RETVAL 1552 RETVAL
1375 1553
1554int
1555nready()
1556 PROTOTYPE:
1557 CODE:
1558 RETVAL = get_nready ();
1559 OUTPUT:
1560 RETVAL
1561
1562int
1563npending()
1564 PROTOTYPE:
1565 CODE:
1566 RETVAL = get_npending ();
1567 OUTPUT:
1568 RETVAL
1569
1376PROTOTYPES: DISABLE 1570PROTOTYPES: DISABLE
1377 1571
1378MODULE = IO::AIO PACKAGE = IO::AIO::REQ 1572MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1379 1573
1380void 1574void
1422 } 1616 }
1423 } 1617 }
1424} 1618}
1425 1619
1426void 1620void
1621cancel_subs (aio_req_ornot req)
1622 CODE:
1623 req_cancel_subs (req);
1624
1625void
1427result (aio_req grp, ...) 1626result (aio_req grp, ...)
1428 CODE: 1627 CODE:
1429{ 1628{
1430 int i; 1629 int i;
1630 AV *av;
1631
1632 grp->errorno = errno;
1633
1431 AV *av = newAV (); 1634 av = newAV ();
1432 1635
1433 for (i = 1; i < items; ++i ) 1636 for (i = 1; i < items; ++i )
1434 av_push (av, newSVsv (ST (i))); 1637 av_push (av, newSVsv (ST (i)));
1435 1638
1436 SvREFCNT_dec (grp->data); 1639 SvREFCNT_dec (grp->data);
1437 grp->data = (SV *)av; 1640 grp->data = (SV *)av;
1438} 1641}
1642
1643void
1644errno (aio_req grp, int errorno = errno)
1645 CODE:
1646 grp->errorno = errorno;
1439 1647
1440void 1648void
1441limit (aio_req grp, int limit) 1649limit (aio_req grp, int limit)
1442 CODE: 1650 CODE:
1443 grp->fd2 = limit; 1651 grp->fd2 = limit;

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines