ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
Revision: 1.67
Committed: Tue Oct 24 03:17:39 2006 UTC (17 years, 6 months ago) by root
Branch: MAIN
Changes since 1.66: +145 -140 lines
Log Message:
*** empty log message ***

File Contents

# Content
1 #if __linux
2 # define _GNU_SOURCE
3 #endif
4
5 #define _REENTRANT 1
6
7 #include <errno.h>
8
9 #include "EXTERN.h"
10 #include "perl.h"
11 #include "XSUB.h"
12
13 #include "autoconf/config.h"
14
15 #include <pthread.h>
16
17 #include <stddef.h>
18 #include <errno.h>
19 #include <sys/time.h>
20 #include <sys/select.h>
21 #include <sys/types.h>
22 #include <sys/stat.h>
23 #include <limits.h>
24 #include <unistd.h>
25 #include <fcntl.h>
26 #include <signal.h>
27 #include <sched.h>
28
29 #if HAVE_SENDFILE
30 # if __linux
31 # include <sys/sendfile.h>
32 # elif __freebsd
33 # include <sys/socket.h>
34 # include <sys/uio.h>
35 # elif __hpux
36 # include <sys/socket.h>
37 # elif __solaris /* not yet */
38 # include <sys/sendfile.h>
39 # else
40 # error sendfile support requested but not available
41 # endif
42 #endif
43
44 /* used for struct dirent, AIX doesn't provide it */
45 #ifndef NAME_MAX
46 # define NAME_MAX 4096
47 #endif
48
49 #if __ia64
50 # define STACKSIZE 65536
51 #elif __i386 || __x86_64 /* 16k is unreasonably high :( */
52 # define STACKSIZE PTHREAD_STACK_MIN
53 #else
54 # define STACKSIZE 16384
55 #endif
56
57 /* buffer size for various temporary buffers */
58 #define AIO_BUFSIZE 65536
59
60 #define dBUF \
61 char *aio_buf = malloc (AIO_BUFSIZE); \
62 if (!aio_buf) \
63 return -1;
64
65 #define fBUF free (aio_buf)
66
67 enum {
68 REQ_QUIT,
69 REQ_OPEN, REQ_CLOSE,
70 REQ_READ, REQ_WRITE, REQ_READAHEAD,
71 REQ_SENDFILE,
72 REQ_STAT, REQ_LSTAT, REQ_FSTAT,
73 REQ_FSYNC, REQ_FDATASYNC,
74 REQ_UNLINK, REQ_RMDIR, REQ_RENAME,
75 REQ_READDIR,
76 REQ_LINK, REQ_SYMLINK,
77 REQ_GROUP, REQ_NOP,
78 REQ_SLEEP,
79 };
80
81 #define AIO_REQ_KLASS "IO::AIO::REQ"
82 #define AIO_GRP_KLASS "IO::AIO::GRP"
83
84 typedef struct aio_cb
85 {
86 struct aio_cb *volatile next;
87
88 SV *data, *callback;
89 SV *fh, *fh2;
90 void *dataptr, *data2ptr;
91 Stat_t *statdata;
92 off_t offset;
93 size_t length;
94 ssize_t result;
95
96 STRLEN dataoffset;
97 int type;
98 int fd, fd2;
99 int errorno;
100 mode_t mode; /* open */
101
102 unsigned char flags;
103 unsigned char pri;
104
105 SV *self; /* the perl counterpart of this request, if any */
106 struct aio_cb *grp, *grp_prev, *grp_next, *grp_first;
107 } aio_cb;
108
109 enum {
110 FLAG_CANCELLED = 0x01,
111 };
112
113 typedef aio_cb *aio_req;
114 typedef aio_cb *aio_req_ornot;
115
116 enum {
117 PRI_MIN = -4,
118 PRI_MAX = 4,
119
120 DEFAULT_PRI = 0,
121 PRI_BIAS = -PRI_MIN,
122 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
123 };
124
125 static int next_pri = DEFAULT_PRI + PRI_BIAS;
126
127 static int started, wanted;
128 static volatile int nreqs;
129 static int max_outstanding = 1<<30;
130 static int respipe [2];
131
132 #if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)
133 # define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
134 #else
135 # define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
136 #endif
137
138 static pthread_mutex_t reslock = AIO_MUTEX_INIT;
139 static pthread_mutex_t reqlock = AIO_MUTEX_INIT;
140 static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER;
141
142 /*
143 * a somewhat faster data structure might be nice, but
144 * with 8 priorities this actually needs <20 insns
145 * per shift, the most expensive operation.
146 */
147 typedef struct {
148 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
149 int size;
150 } reqq;
151
152 static reqq req_queue;
153 static reqq res_queue;
154
155 int reqq_push (reqq *q, aio_req req)
156 {
157 int pri = req->pri;
158 req->next = 0;
159
160 if (q->qe[pri])
161 {
162 q->qe[pri]->next = req;
163 q->qe[pri] = req;
164 }
165 else
166 q->qe[pri] = q->qs[pri] = req;
167
168 return q->size++;
169 }
170
171 aio_req reqq_shift (reqq *q)
172 {
173 int pri;
174
175 if (!q->size)
176 return 0;
177
178 --q->size;
179
180 for (pri = NUM_PRI; pri--; )
181 {
182 aio_req req = q->qs[pri];
183
184 if (req)
185 {
186 if (!(q->qs[pri] = req->next))
187 q->qe[pri] = 0;
188
189 return req;
190 }
191 }
192
193 abort ();
194 }
195
196 static void req_invoke (aio_req req);
197 static void req_free (aio_req req);
198
199 /* must be called at most once */
200 static SV *req_sv (aio_req req, const char *klass)
201 {
202 if (!req->self)
203 {
204 req->self = (SV *)newHV ();
205 sv_magic (req->self, 0, PERL_MAGIC_ext, (char *)req, 0);
206 }
207
208 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1)));
209 }
210
211 static aio_req SvAIO_REQ (SV *sv)
212 {
213 MAGIC *mg;
214
215 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv))
216 croak ("object of class " AIO_REQ_KLASS " expected");
217
218 mg = mg_find (SvRV (sv), PERL_MAGIC_ext);
219
220 return mg ? (aio_req)mg->mg_ptr : 0;
221 }
222
223 static void aio_grp_feed (aio_req grp)
224 {
225 while (grp->length < grp->fd2 && !(grp->flags & FLAG_CANCELLED))
226 {
227 int old_len = grp->length;
228
229 if (grp->fh2 && SvOK (grp->fh2))
230 {
231 dSP;
232
233 ENTER;
234 SAVETMPS;
235 PUSHMARK (SP);
236 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
237 PUTBACK;
238 call_sv (grp->fh2, G_VOID | G_EVAL | G_KEEPERR);
239 SPAGAIN;
240 FREETMPS;
241 LEAVE;
242 }
243
244 /* stop if no progress has been made */
245 if (old_len == grp->length)
246 {
247 SvREFCNT_dec (grp->fh2);
248 grp->fh2 = 0;
249 break;
250 }
251 }
252 }
253
254 static void aio_grp_dec (aio_req grp)
255 {
256 --grp->length;
257
258 /* call feeder, if applicable */
259 aio_grp_feed (grp);
260
261 /* finish, if done */
262 if (!grp->length && grp->fd)
263 {
264 req_invoke (grp);
265 req_free (grp);
266 }
267 }
268
269 static void poll_wait ()
270 {
271 fd_set rfd;
272
273 while (nreqs)
274 {
275 int size;
276 #if !(__i386 || __x86_64) /* safe without sempahore on this archs */
277 pthread_mutex_lock (&reslock);
278 #endif
279 size = res_queue.size;
280 #if !(__i386 || __x86_64) /* safe without sempahore on this archs */
281 pthread_mutex_unlock (&reslock);
282 #endif
283
284 if (size)
285 return;
286
287 FD_ZERO(&rfd);
288 FD_SET(respipe [0], &rfd);
289
290 select (respipe [0] + 1, &rfd, 0, 0, 0);
291 }
292 }
293
294 static void req_invoke (aio_req req)
295 {
296 dSP;
297
298 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
299 {
300 errno = req->errorno;
301
302 ENTER;
303 SAVETMPS;
304 PUSHMARK (SP);
305 EXTEND (SP, 1);
306
307 switch (req->type)
308 {
309 case REQ_READDIR:
310 {
311 SV *rv = &PL_sv_undef;
312
313 if (req->result >= 0)
314 {
315 char *buf = req->data2ptr;
316 AV *av = newAV ();
317
318 while (req->result)
319 {
320 SV *sv = newSVpv (buf, 0);
321
322 av_push (av, sv);
323 buf += SvCUR (sv) + 1;
324 req->result--;
325 }
326
327 rv = sv_2mortal (newRV_noinc ((SV *)av));
328 }
329
330 PUSHs (rv);
331 }
332 break;
333
334 case REQ_OPEN:
335 {
336 /* convert fd to fh */
337 SV *fh;
338
339 PUSHs (sv_2mortal (newSViv (req->result)));
340 PUTBACK;
341 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
342 SPAGAIN;
343
344 fh = SvREFCNT_inc (POPs);
345
346 PUSHMARK (SP);
347 XPUSHs (sv_2mortal (fh));
348 }
349 break;
350
351 case REQ_GROUP:
352 req->fd = 2; /* mark group as finished */
353
354 if (req->data)
355 {
356 int i;
357 AV *av = (AV *)req->data;
358
359 EXTEND (SP, AvFILL (av) + 1);
360 for (i = 0; i <= AvFILL (av); ++i)
361 PUSHs (*av_fetch (av, i, 0));
362 }
363 break;
364
365 case REQ_NOP:
366 case REQ_SLEEP:
367 break;
368
369 default:
370 PUSHs (sv_2mortal (newSViv (req->result)));
371 break;
372 }
373
374
375 PUTBACK;
376 call_sv (req->callback, G_VOID | G_EVAL);
377 SPAGAIN;
378
379 FREETMPS;
380 LEAVE;
381 }
382
383 if (req->grp)
384 {
385 aio_req grp = req->grp;
386
387 /* unlink request */
388 if (req->grp_next) req->grp_next->grp_prev = req->grp_prev;
389 if (req->grp_prev) req->grp_prev->grp_next = req->grp_next;
390
391 if (grp->grp_first == req)
392 grp->grp_first = req->grp_next;
393
394 aio_grp_dec (grp);
395 }
396
397 if (SvTRUE (ERRSV))
398 {
399 req_free (req);
400 croak (0);
401 }
402 }
403
404 static void req_free (aio_req req)
405 {
406 if (req->self)
407 {
408 sv_unmagic (req->self, PERL_MAGIC_ext);
409 SvREFCNT_dec (req->self);
410 }
411
412 SvREFCNT_dec (req->data);
413 SvREFCNT_dec (req->fh);
414 SvREFCNT_dec (req->fh2);
415 SvREFCNT_dec (req->callback);
416 Safefree (req->statdata);
417
418 if (req->type == REQ_READDIR && req->result >= 0)
419 free (req->data2ptr);
420
421 Safefree (req);
422 }
423
424 static void req_cancel (aio_req req)
425 {
426 req->flags |= FLAG_CANCELLED;
427
428 if (req->type == REQ_GROUP)
429 {
430 aio_req sub;
431
432 for (sub = req->grp_first; sub; sub = sub->grp_next)
433 req_cancel (sub);
434 }
435 }
436
437 static int poll_cb ()
438 {
439 dSP;
440 int count = 0;
441 int do_croak = 0;
442 aio_req req;
443
444 for (;;)
445 {
446 pthread_mutex_lock (&reslock);
447 req = reqq_shift (&res_queue);
448
449 if (req)
450 {
451 if (!res_queue.size)
452 {
453 /* read any signals sent by the worker threads */
454 char buf [32];
455 while (read (respipe [0], buf, 32) == 32)
456 ;
457 }
458 }
459
460 pthread_mutex_unlock (&reslock);
461
462 if (!req)
463 break;
464
465 --nreqs;
466
467 if (req->type == REQ_QUIT)
468 started--;
469 else if (req->type == REQ_GROUP && req->length)
470 {
471 req->fd = 1; /* mark request as delayed */
472 continue;
473 }
474 else
475 {
476 if (req->type == REQ_READ)
477 SvCUR_set (req->data, req->dataoffset + (req->result > 0 ? req->result : 0));
478
479 if (req->data2ptr && (req->type == REQ_READ || req->type == REQ_WRITE))
480 SvREADONLY_off (req->data);
481
482 if (req->statdata)
483 {
484 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
485 PL_laststatval = req->result;
486 PL_statcache = *(req->statdata);
487 }
488
489 req_invoke (req);
490
491 count++;
492 }
493
494 req_free (req);
495 }
496
497 return count;
498 }
499
500 static void *aio_proc(void *arg);
501
502 static void start_thread (void)
503 {
504 sigset_t fullsigset, oldsigset;
505 pthread_t tid;
506 pthread_attr_t attr;
507
508 pthread_attr_init (&attr);
509 pthread_attr_setstacksize (&attr, STACKSIZE);
510 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
511
512 sigfillset (&fullsigset);
513 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
514
515 if (pthread_create (&tid, &attr, aio_proc, 0) == 0)
516 started++;
517
518 sigprocmask (SIG_SETMASK, &oldsigset, 0);
519 }
520
521 static void req_send (aio_req req)
522 {
523 while (started < wanted && nreqs >= started)
524 start_thread ();
525
526 ++nreqs;
527
528 pthread_mutex_lock (&reqlock);
529 reqq_push (&req_queue, req);
530 pthread_cond_signal (&reqwait);
531 pthread_mutex_unlock (&reqlock);
532
533 if (nreqs > max_outstanding)
534 for (;;)
535 {
536 poll_cb ();
537
538 if (nreqs <= max_outstanding)
539 break;
540
541 poll_wait ();
542 }
543 }
544
545 static void end_thread (void)
546 {
547 aio_req req;
548
549 Newz (0, req, 1, aio_cb);
550
551 req->type = REQ_QUIT;
552 req->pri = PRI_MAX + PRI_BIAS;
553
554 req_send (req);
555 }
556
557 static void min_parallel (int nthreads)
558 {
559 if (wanted < nthreads)
560 wanted = nthreads;
561 }
562
563 static void max_parallel (int nthreads)
564 {
565 int cur = started;
566
567 if (wanted > nthreads)
568 wanted = nthreads;
569
570 while (cur > wanted)
571 {
572 end_thread ();
573 cur--;
574 }
575
576 while (started > wanted)
577 {
578 poll_wait ();
579 poll_cb ();
580 }
581 }
582
583 static void create_pipe ()
584 {
585 if (pipe (respipe))
586 croak ("unable to initialize result pipe");
587
588 if (fcntl (respipe [0], F_SETFL, O_NONBLOCK))
589 croak ("cannot set result pipe to nonblocking mode");
590
591 if (fcntl (respipe [1], F_SETFL, O_NONBLOCK))
592 croak ("cannot set result pipe to nonblocking mode");
593 }
594
595 /*****************************************************************************/
596 /* work around various missing functions */
597
598 #if !HAVE_PREADWRITE
599 # define pread aio_pread
600 # define pwrite aio_pwrite
601
602 /*
603 * make our pread/pwrite safe against themselves, but not against
604 * normal read/write by using a mutex. slows down execution a lot,
605 * but that's your problem, not mine.
606 */
607 static pthread_mutex_t preadwritelock = PTHREAD_MUTEX_INITIALIZER;
608
609 static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
610 {
611 ssize_t res;
612 off_t ooffset;
613
614 pthread_mutex_lock (&preadwritelock);
615 ooffset = lseek (fd, 0, SEEK_CUR);
616 lseek (fd, offset, SEEK_SET);
617 res = read (fd, buf, count);
618 lseek (fd, ooffset, SEEK_SET);
619 pthread_mutex_unlock (&preadwritelock);
620
621 return res;
622 }
623
624 static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
625 {
626 ssize_t res;
627 off_t ooffset;
628
629 pthread_mutex_lock (&preadwritelock);
630 ooffset = lseek (fd, 0, SEEK_CUR);
631 lseek (fd, offset, SEEK_SET);
632 res = write (fd, buf, count);
633 lseek (fd, offset, SEEK_SET);
634 pthread_mutex_unlock (&preadwritelock);
635
636 return res;
637 }
638 #endif
639
640 #if !HAVE_FDATASYNC
641 # define fdatasync fsync
642 #endif
643
644 #if !HAVE_READAHEAD
645 # define readahead aio_readahead
646
647 static ssize_t readahead (int fd, off_t offset, size_t count)
648 {
649 dBUF;
650
651 while (count > 0)
652 {
653 size_t len = count < AIO_BUFSIZE ? count : AIO_BUFSIZE;
654
655 pread (fd, aio_buf, len, offset);
656 offset += len;
657 count -= len;
658 }
659
660 fBUF;
661
662 errno = 0;
663 }
664 #endif
665
666 #if !HAVE_READDIR_R
667 # define readdir_r aio_readdir_r
668
669 static pthread_mutex_t readdirlock = PTHREAD_MUTEX_INITIALIZER;
670
671 static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res)
672 {
673 struct dirent *e;
674 int errorno;
675
676 pthread_mutex_lock (&readdirlock);
677
678 e = readdir (dirp);
679 errorno = errno;
680
681 if (e)
682 {
683 *res = ent;
684 strcpy (ent->d_name, e->d_name);
685 }
686 else
687 *res = 0;
688
689 pthread_mutex_unlock (&readdirlock);
690
691 errno = errorno;
692 return e ? 0 : -1;
693 }
694 #endif
695
696 /* sendfile always needs emulation */
697 static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count)
698 {
699 ssize_t res;
700
701 if (!count)
702 return 0;
703
704 #if HAVE_SENDFILE
705 # if __linux
706 res = sendfile (ofd, ifd, &offset, count);
707
708 # elif __freebsd
709 /*
710 * Of course, the freebsd sendfile is a dire hack with no thoughts
711 * wasted on making it similar to other I/O functions.
712 */
713 {
714 off_t sbytes;
715 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0);
716
717 if (res < 0 && sbytes)
718 /* maybe only on EAGAIN only: as usual, the manpage leaves you guessing */
719 res = sbytes;
720 }
721
722 # elif __hpux
723 res = sendfile (ofd, ifd, offset, count, 0, 0);
724
725 # elif __solaris
726 {
727 struct sendfilevec vec;
728 size_t sbytes;
729
730 vec.sfv_fd = ifd;
731 vec.sfv_flag = 0;
732 vec.sfv_off = offset;
733 vec.sfv_len = count;
734
735 res = sendfilev (ofd, &vec, 1, &sbytes);
736
737 if (res < 0 && sbytes)
738 res = sbytes;
739 }
740
741 # endif
742 #else
743 res = -1;
744 errno = ENOSYS;
745 #endif
746
747 if (res < 0
748 && (errno == ENOSYS || errno == EINVAL || errno == ENOTSOCK
749 #if __solaris
750 || errno == EAFNOSUPPORT || errno == EPROTOTYPE
751 #endif
752 )
753 )
754 {
755 /* emulate sendfile. this is a major pain in the ass */
756 dBUF;
757
758 res = 0;
759
760 while (count)
761 {
762 ssize_t cnt;
763
764 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
765
766 if (cnt <= 0)
767 {
768 if (cnt && !res) res = -1;
769 break;
770 }
771
772 cnt = write (ofd, aio_buf, cnt);
773
774 if (cnt <= 0)
775 {
776 if (cnt && !res) res = -1;
777 break;
778 }
779
780 offset += cnt;
781 res += cnt;
782 count -= cnt;
783 }
784
785 fBUF;
786 }
787
788 return res;
789 }
790
791 /* read a full directory */
792 static int scandir_ (const char *path, void **namesp)
793 {
794 DIR *dirp;
795 union
796 {
797 struct dirent d;
798 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1];
799 } *u;
800 struct dirent *entp;
801 char *name, *names;
802 int memlen = 4096;
803 int memofs = 0;
804 int res = 0;
805 int errorno;
806
807 dirp = opendir (path);
808 if (!dirp)
809 return -1;
810
811 u = malloc (sizeof (*u));
812 names = malloc (memlen);
813
814 if (u && names)
815 for (;;)
816 {
817 errno = 0;
818 readdir_r (dirp, &u->d, &entp);
819
820 if (!entp)
821 break;
822
823 name = entp->d_name;
824
825 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
826 {
827 int len = strlen (name) + 1;
828
829 res++;
830
831 while (memofs + len > memlen)
832 {
833 memlen *= 2;
834 names = realloc (names, memlen);
835 if (!names)
836 break;
837 }
838
839 memcpy (names + memofs, name, len);
840 memofs += len;
841 }
842 }
843
844 errorno = errno;
845 free (u);
846 closedir (dirp);
847
848 if (errorno)
849 {
850 free (names);
851 errno = errorno;
852 res = -1;
853 }
854
855 *namesp = (void *)names;
856 return res;
857 }
858
859 /*****************************************************************************/
860
861 static void *aio_proc (void *thr_arg)
862 {
863 aio_req req;
864 int type;
865
866 do
867 {
868 pthread_mutex_lock (&reqlock);
869
870 for (;;)
871 {
872 req = reqq_shift (&req_queue);
873
874 if (req)
875 break;
876
877 pthread_cond_wait (&reqwait, &reqlock);
878 }
879
880 pthread_mutex_unlock (&reqlock);
881
882 errno = 0; /* strictly unnecessary */
883 type = req->type; /* remember type for QUIT check */
884
885 if (!(req->flags & FLAG_CANCELLED))
886 switch (type)
887 {
888 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break;
889 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break;
890
891 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break;
892 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length); break;
893
894 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break;
895 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break;
896 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break;
897
898 case REQ_OPEN: req->result = open (req->dataptr, req->fd, req->mode); break;
899 case REQ_CLOSE: req->result = close (req->fd); break;
900 case REQ_UNLINK: req->result = unlink (req->dataptr); break;
901 case REQ_RMDIR: req->result = rmdir (req->dataptr); break;
902 case REQ_RENAME: req->result = rename (req->data2ptr, req->dataptr); break;
903 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break;
904 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break;
905
906 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break;
907 case REQ_FSYNC: req->result = fsync (req->fd); break;
908 case REQ_READDIR: req->result = scandir_ (req->dataptr, &req->data2ptr); break;
909
910 case REQ_SLEEP:
911 {
912 struct timeval tv;
913
914 tv.tv_sec = req->fd;
915 tv.tv_usec = req->fd2;
916
917 req->result = select (0, 0, 0, 0, &tv);
918 }
919
920 case REQ_GROUP:
921 case REQ_NOP:
922 case REQ_QUIT:
923 break;
924
925 default:
926 req->result = ENOSYS;
927 break;
928 }
929
930 req->errorno = errno;
931
932 pthread_mutex_lock (&reslock);
933
934 if (!reqq_push (&res_queue, req))
935 /* write a dummy byte to the pipe so fh becomes ready */
936 write (respipe [1], &respipe, 1);
937
938 pthread_mutex_unlock (&reslock);
939 }
940 while (type != REQ_QUIT);
941
942 return 0;
943 }
944
945 /*****************************************************************************/
946
947 static void atfork_prepare (void)
948 {
949 pthread_mutex_lock (&reqlock);
950 pthread_mutex_lock (&reslock);
951 #if !HAVE_PREADWRITE
952 pthread_mutex_lock (&preadwritelock);
953 #endif
954 #if !HAVE_READDIR_R
955 pthread_mutex_lock (&readdirlock);
956 #endif
957 }
958
959 static void atfork_parent (void)
960 {
961 #if !HAVE_READDIR_R
962 pthread_mutex_unlock (&readdirlock);
963 #endif
964 #if !HAVE_PREADWRITE
965 pthread_mutex_unlock (&preadwritelock);
966 #endif
967 pthread_mutex_unlock (&reslock);
968 pthread_mutex_unlock (&reqlock);
969 }
970
971 static void atfork_child (void)
972 {
973 aio_req prv;
974
975 started = 0;
976
977 while (prv = reqq_shift (&req_queue))
978 req_free (prv);
979
980 while (prv = reqq_shift (&res_queue))
981 req_free (prv);
982
983 close (respipe [0]);
984 close (respipe [1]);
985 create_pipe ();
986
987 atfork_parent ();
988 }
989
990 #define dREQ \
991 aio_req req; \
992 int req_pri = next_pri; \
993 next_pri = DEFAULT_PRI + PRI_BIAS; \
994 \
995 if (SvOK (callback) && !SvROK (callback)) \
996 croak ("callback must be undef or of reference type"); \
997 \
998 Newz (0, req, 1, aio_cb); \
999 if (!req) \
1000 croak ("out of memory during aio_req allocation"); \
1001 \
1002 req->callback = newSVsv (callback); \
1003 req->pri = req_pri
1004
1005 #define REQ_SEND \
1006 req_send (req); \
1007 \
1008 if (GIMME_V != G_VOID) \
1009 XPUSHs (req_sv (req, AIO_REQ_KLASS));
1010
1011 MODULE = IO::AIO PACKAGE = IO::AIO
1012
1013 PROTOTYPES: ENABLE
1014
1015 BOOT:
1016 {
1017 HV *stash = gv_stashpv ("IO::AIO", 1);
1018 newCONSTSUB (stash, "EXDEV", newSViv (EXDEV));
1019 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY));
1020 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY));
1021
1022 create_pipe ();
1023 pthread_atfork (atfork_prepare, atfork_parent, atfork_child);
1024 }
1025
1026 void
1027 min_parallel (nthreads)
1028 int nthreads
1029 PROTOTYPE: $
1030
1031 void
1032 max_parallel (nthreads)
1033 int nthreads
1034 PROTOTYPE: $
1035
1036 int
1037 max_outstanding (nreqs)
1038 int nreqs
1039 PROTOTYPE: $
1040 CODE:
1041 RETVAL = max_outstanding;
1042 max_outstanding = nreqs;
1043
1044 void
1045 aio_open (pathname,flags,mode,callback=&PL_sv_undef)
1046 SV * pathname
1047 int flags
1048 int mode
1049 SV * callback
1050 PROTOTYPE: $$$;$
1051 PPCODE:
1052 {
1053 dREQ;
1054
1055 req->type = REQ_OPEN;
1056 req->data = newSVsv (pathname);
1057 req->dataptr = SvPVbyte_nolen (req->data);
1058 req->fd = flags;
1059 req->mode = mode;
1060
1061 REQ_SEND;
1062 }
1063
1064 void
1065 aio_close (fh,callback=&PL_sv_undef)
1066 SV * fh
1067 SV * callback
1068 PROTOTYPE: $;$
1069 ALIAS:
1070 aio_close = REQ_CLOSE
1071 aio_fsync = REQ_FSYNC
1072 aio_fdatasync = REQ_FDATASYNC
1073 PPCODE:
1074 {
1075 dREQ;
1076
1077 req->type = ix;
1078 req->fh = newSVsv (fh);
1079 req->fd = PerlIO_fileno (IoIFP (sv_2io (fh)));
1080
1081 REQ_SEND (req);
1082 }
1083
1084 void
1085 aio_read (fh,offset,length,data,dataoffset,callback=&PL_sv_undef)
1086 SV * fh
1087 UV offset
1088 UV length
1089 SV * data
1090 UV dataoffset
1091 SV * callback
1092 ALIAS:
1093 aio_read = REQ_READ
1094 aio_write = REQ_WRITE
1095 PROTOTYPE: $$$$$;$
1096 PPCODE:
1097 {
1098 aio_req req;
1099 STRLEN svlen;
1100 char *svptr = SvPVbyte (data, svlen);
1101
1102 SvUPGRADE (data, SVt_PV);
1103 SvPOK_on (data);
1104
1105 if (dataoffset < 0)
1106 dataoffset += svlen;
1107
1108 if (dataoffset < 0 || dataoffset > svlen)
1109 croak ("data offset outside of string");
1110
1111 if (ix == REQ_WRITE)
1112 {
1113 /* write: check length and adjust. */
1114 if (length < 0 || length + dataoffset > svlen)
1115 length = svlen - dataoffset;
1116 }
1117 else
1118 {
1119 /* read: grow scalar as necessary */
1120 svptr = SvGROW (data, length + dataoffset);
1121 }
1122
1123 if (length < 0)
1124 croak ("length must not be negative");
1125
1126 {
1127 dREQ;
1128
1129 req->type = ix;
1130 req->fh = newSVsv (fh);
1131 req->fd = PerlIO_fileno (ix == REQ_READ ? IoIFP (sv_2io (fh))
1132 : IoOFP (sv_2io (fh)));
1133 req->offset = offset;
1134 req->length = length;
1135 req->data = SvREFCNT_inc (data);
1136 req->dataptr = (char *)svptr + dataoffset;
1137
1138 if (!SvREADONLY (data))
1139 {
1140 SvREADONLY_on (data);
1141 req->data2ptr = (void *)data;
1142 }
1143
1144 REQ_SEND;
1145 }
1146 }
1147
1148 void
1149 aio_sendfile (out_fh,in_fh,in_offset,length,callback=&PL_sv_undef)
1150 SV * out_fh
1151 SV * in_fh
1152 UV in_offset
1153 UV length
1154 SV * callback
1155 PROTOTYPE: $$$$;$
1156 PPCODE:
1157 {
1158 dREQ;
1159
1160 req->type = REQ_SENDFILE;
1161 req->fh = newSVsv (out_fh);
1162 req->fd = PerlIO_fileno (IoIFP (sv_2io (out_fh)));
1163 req->fh2 = newSVsv (in_fh);
1164 req->fd2 = PerlIO_fileno (IoIFP (sv_2io (in_fh)));
1165 req->offset = in_offset;
1166 req->length = length;
1167
1168 REQ_SEND;
1169 }
1170
1171 void
1172 aio_readahead (fh,offset,length,callback=&PL_sv_undef)
1173 SV * fh
1174 UV offset
1175 IV length
1176 SV * callback
1177 PROTOTYPE: $$$;$
1178 PPCODE:
1179 {
1180 dREQ;
1181
1182 req->type = REQ_READAHEAD;
1183 req->fh = newSVsv (fh);
1184 req->fd = PerlIO_fileno (IoIFP (sv_2io (fh)));
1185 req->offset = offset;
1186 req->length = length;
1187
1188 REQ_SEND;
1189 }
1190
1191 void
1192 aio_stat (fh_or_path,callback=&PL_sv_undef)
1193 SV * fh_or_path
1194 SV * callback
1195 ALIAS:
1196 aio_stat = REQ_STAT
1197 aio_lstat = REQ_LSTAT
1198 PPCODE:
1199 {
1200 dREQ;
1201
1202 New (0, req->statdata, 1, Stat_t);
1203 if (!req->statdata)
1204 {
1205 req_free (req);
1206 croak ("out of memory during aio_req->statdata allocation");
1207 }
1208
1209 if (SvPOK (fh_or_path))
1210 {
1211 req->type = ix;
1212 req->data = newSVsv (fh_or_path);
1213 req->dataptr = SvPVbyte_nolen (req->data);
1214 }
1215 else
1216 {
1217 req->type = REQ_FSTAT;
1218 req->fh = newSVsv (fh_or_path);
1219 req->fd = PerlIO_fileno (IoIFP (sv_2io (fh_or_path)));
1220 }
1221
1222 REQ_SEND;
1223 }
1224
1225 void
1226 aio_unlink (pathname,callback=&PL_sv_undef)
1227 SV * pathname
1228 SV * callback
1229 ALIAS:
1230 aio_unlink = REQ_UNLINK
1231 aio_rmdir = REQ_RMDIR
1232 aio_readdir = REQ_READDIR
1233 PPCODE:
1234 {
1235 dREQ;
1236
1237 req->type = ix;
1238 req->data = newSVsv (pathname);
1239 req->dataptr = SvPVbyte_nolen (req->data);
1240
1241 REQ_SEND;
1242 }
1243
1244 void
1245 aio_link (oldpath,newpath,callback=&PL_sv_undef)
1246 SV * oldpath
1247 SV * newpath
1248 SV * callback
1249 ALIAS:
1250 aio_link = REQ_LINK
1251 aio_symlink = REQ_SYMLINK
1252 aio_rename = REQ_RENAME
1253 PPCODE:
1254 {
1255 dREQ;
1256
1257 req->type = ix;
1258 req->fh = newSVsv (oldpath);
1259 req->data2ptr = SvPVbyte_nolen (req->fh);
1260 req->data = newSVsv (newpath);
1261 req->dataptr = SvPVbyte_nolen (req->data);
1262
1263 REQ_SEND;
1264 }
1265
1266 void
1267 aio_sleep (delay,callback=&PL_sv_undef)
1268 double delay
1269 SV * callback
1270 PPCODE:
1271 {
1272 dREQ;
1273
1274 req->type = REQ_SLEEP;
1275 req->fd = delay < 0. ? 0 : delay;
1276 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd);
1277
1278 REQ_SEND;
1279 }
1280
1281 void
1282 aio_group (callback=&PL_sv_undef)
1283 SV * callback
1284 PROTOTYPE: ;$
1285 PPCODE:
1286 {
1287 dREQ;
1288
1289 req->type = REQ_GROUP;
1290 req_send (req);
1291
1292 XPUSHs (req_sv (req, AIO_GRP_KLASS));
1293 }
1294
1295 void
1296 aio_nop (callback=&PL_sv_undef)
1297 SV * callback
1298 PPCODE:
1299 {
1300 dREQ;
1301
1302 req->type = REQ_NOP;
1303
1304 REQ_SEND;
1305 }
1306
1307 void
1308 aioreq_pri (int pri = DEFAULT_PRI)
1309 CODE:
1310 if (pri < PRI_MIN) pri = PRI_MIN;
1311 if (pri > PRI_MAX) pri = PRI_MAX;
1312 next_pri = pri + PRI_BIAS;
1313
1314 void
1315 flush ()
1316 PROTOTYPE:
1317 CODE:
1318 while (nreqs)
1319 {
1320 poll_wait ();
1321 poll_cb ();
1322 }
1323
1324 void
1325 poll()
1326 PROTOTYPE:
1327 CODE:
1328 if (nreqs)
1329 {
1330 poll_wait ();
1331 poll_cb ();
1332 }
1333
1334 int
1335 poll_fileno()
1336 PROTOTYPE:
1337 CODE:
1338 RETVAL = respipe [0];
1339 OUTPUT:
1340 RETVAL
1341
1342 int
1343 poll_cb(...)
1344 PROTOTYPE:
1345 CODE:
1346 RETVAL = poll_cb ();
1347 OUTPUT:
1348 RETVAL
1349
1350 void
1351 poll_wait()
1352 PROTOTYPE:
1353 CODE:
1354 if (nreqs)
1355 poll_wait ();
1356
1357 int
1358 nreqs()
1359 PROTOTYPE:
1360 CODE:
1361 RETVAL = nreqs;
1362 OUTPUT:
1363 RETVAL
1364
1365 PROTOTYPES: DISABLE
1366
1367 MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1368
1369 void
1370 cancel (aio_req_ornot req)
1371 CODE:
1372 req_cancel (req);
1373
1374 void
1375 cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1376 CODE:
1377 SvREFCNT_dec (req->callback);
1378 req->callback = newSVsv (callback);
1379
1380 MODULE = IO::AIO PACKAGE = IO::AIO::GRP
1381
1382 void
1383 add (aio_req grp, ...)
1384 PPCODE:
1385 {
1386 int i;
1387 aio_req req;
1388
1389 if (grp->fd == 2)
1390 croak ("cannot add requests to IO::AIO::GRP after the group finished");
1391
1392 for (i = 1; i < items; ++i )
1393 {
1394 if (GIMME_V != G_VOID)
1395 XPUSHs (sv_2mortal (newSVsv (ST (i))));
1396
1397 req = SvAIO_REQ (ST (i));
1398
1399 if (req)
1400 {
1401 ++grp->length;
1402 req->grp = grp;
1403
1404 req->grp_prev = 0;
1405 req->grp_next = grp->grp_first;
1406
1407 if (grp->grp_first)
1408 grp->grp_first->grp_prev = req;
1409
1410 grp->grp_first = req;
1411 }
1412 }
1413 }
1414
1415 void
1416 result (aio_req grp, ...)
1417 CODE:
1418 {
1419 int i;
1420 AV *av = newAV ();
1421
1422 for (i = 1; i < items; ++i )
1423 av_push (av, newSVsv (ST (i)));
1424
1425 SvREFCNT_dec (grp->data);
1426 grp->data = (SV *)av;
1427 }
1428
1429 void
1430 limit (aio_req grp, int limit)
1431 CODE:
1432 grp->fd2 = limit;
1433 aio_grp_feed (grp);
1434
1435 void
1436 feed (aio_req grp, SV *callback=&PL_sv_undef)
1437 CODE:
1438 {
1439 SvREFCNT_dec (grp->fh2);
1440 grp->fh2 = newSVsv (callback);
1441
1442 if (grp->fd2 <= 0)
1443 grp->fd2 = 2;
1444
1445 aio_grp_feed (grp);
1446 }
1447