ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
Revision: 1.70
Committed: Tue Oct 24 15:15:56 2006 UTC (17 years, 6 months ago) by root
Branch: MAIN
Changes since 1.69: +3 -0 lines
Log Message:
*** empty log message ***

File Contents

# Content
1 /* solaris */
2 #define _POSIX_PTHREAD_SEMANTICS 1
3
4 #if __linux
5 # define _GNU_SOURCE
6 #endif
7
8 #define _REENTRANT 1
9
10 #include <errno.h>
11
12 #include "EXTERN.h"
13 #include "perl.h"
14 #include "XSUB.h"
15
16 #include "autoconf/config.h"
17
18 #include <pthread.h>
19
20 #include <stddef.h>
21 #include <errno.h>
22 #include <sys/time.h>
23 #include <sys/select.h>
24 #include <sys/types.h>
25 #include <sys/stat.h>
26 #include <limits.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <signal.h>
30 #include <sched.h>
31
32 #if HAVE_SENDFILE
33 # if __linux
34 # include <sys/sendfile.h>
35 # elif __freebsd
36 # include <sys/socket.h>
37 # include <sys/uio.h>
38 # elif __hpux
39 # include <sys/socket.h>
40 # elif __solaris /* not yet */
41 # include <sys/sendfile.h>
42 # else
43 # error sendfile support requested but not available
44 # endif
45 #endif
46
47 /* used for struct dirent, AIX doesn't provide it */
48 #ifndef NAME_MAX
49 # define NAME_MAX 4096
50 #endif
51
52 #if __ia64
53 # define STACKSIZE 65536
54 #elif __i386 || __x86_64 /* 16k is unreasonably high :( */
55 # define STACKSIZE PTHREAD_STACK_MIN
56 #else
57 # define STACKSIZE 16384
58 #endif
59
60 /* buffer size for various temporary buffers */
61 #define AIO_BUFSIZE 65536
62
63 #define dBUF \
64 char *aio_buf = malloc (AIO_BUFSIZE); \
65 if (!aio_buf) \
66 return -1;
67
68 #define fBUF free (aio_buf)
69
70 enum {
71 REQ_QUIT,
72 REQ_OPEN, REQ_CLOSE,
73 REQ_READ, REQ_WRITE, REQ_READAHEAD,
74 REQ_SENDFILE,
75 REQ_STAT, REQ_LSTAT, REQ_FSTAT,
76 REQ_FSYNC, REQ_FDATASYNC,
77 REQ_UNLINK, REQ_RMDIR, REQ_RENAME,
78 REQ_READDIR,
79 REQ_LINK, REQ_SYMLINK,
80 REQ_GROUP, REQ_NOP,
81 REQ_BUSY,
82 };
83
84 #define AIO_REQ_KLASS "IO::AIO::REQ"
85 #define AIO_GRP_KLASS "IO::AIO::GRP"
86
87 typedef struct aio_cb
88 {
89 struct aio_cb *volatile next;
90
91 SV *data, *callback;
92 SV *fh, *fh2;
93 void *dataptr, *data2ptr;
94 Stat_t *statdata;
95 off_t offset;
96 size_t length;
97 ssize_t result;
98
99 STRLEN dataoffset;
100 int type;
101 int fd, fd2;
102 int errorno;
103 mode_t mode; /* open */
104
105 unsigned char flags;
106 unsigned char pri;
107
108 SV *self; /* the perl counterpart of this request, if any */
109 struct aio_cb *grp, *grp_prev, *grp_next, *grp_first;
110 } aio_cb;
111
112 enum {
113 FLAG_CANCELLED = 0x01,
114 };
115
116 typedef aio_cb *aio_req;
117 typedef aio_cb *aio_req_ornot;
118
119 enum {
120 PRI_MIN = -4,
121 PRI_MAX = 4,
122
123 DEFAULT_PRI = 0,
124 PRI_BIAS = -PRI_MIN,
125 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
126 };
127
128 static int next_pri = DEFAULT_PRI + PRI_BIAS;
129
130 static int started, wanted;
131 static volatile int nreqs;
132 static int max_outstanding = 1<<30;
133 static int respipe [2];
134
135 #if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)
136 # define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
137 #else
138 # define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
139 #endif
140
141 static pthread_mutex_t reslock = AIO_MUTEX_INIT;
142 static pthread_mutex_t reqlock = AIO_MUTEX_INIT;
143 static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER;
144
145 /*
146 * a somewhat faster data structure might be nice, but
147 * with 8 priorities this actually needs <20 insns
148 * per shift, the most expensive operation.
149 */
150 typedef struct {
151 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
152 int size;
153 } reqq;
154
155 static reqq req_queue;
156 static reqq res_queue;
157
158 int reqq_push (reqq *q, aio_req req)
159 {
160 int pri = req->pri;
161 req->next = 0;
162
163 if (q->qe[pri])
164 {
165 q->qe[pri]->next = req;
166 q->qe[pri] = req;
167 }
168 else
169 q->qe[pri] = q->qs[pri] = req;
170
171 return q->size++;
172 }
173
174 aio_req reqq_shift (reqq *q)
175 {
176 int pri;
177
178 if (!q->size)
179 return 0;
180
181 --q->size;
182
183 for (pri = NUM_PRI; pri--; )
184 {
185 aio_req req = q->qs[pri];
186
187 if (req)
188 {
189 if (!(q->qs[pri] = req->next))
190 q->qe[pri] = 0;
191
192 return req;
193 }
194 }
195
196 abort ();
197 }
198
199 static void req_invoke (aio_req req);
200 static void req_free (aio_req req);
201
202 /* must be called at most once */
203 static SV *req_sv (aio_req req, const char *klass)
204 {
205 if (!req->self)
206 {
207 req->self = (SV *)newHV ();
208 sv_magic (req->self, 0, PERL_MAGIC_ext, (char *)req, 0);
209 }
210
211 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1)));
212 }
213
214 static aio_req SvAIO_REQ (SV *sv)
215 {
216 MAGIC *mg;
217
218 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv))
219 croak ("object of class " AIO_REQ_KLASS " expected");
220
221 mg = mg_find (SvRV (sv), PERL_MAGIC_ext);
222
223 return mg ? (aio_req)mg->mg_ptr : 0;
224 }
225
226 static void aio_grp_feed (aio_req grp)
227 {
228 while (grp->length < grp->fd2 && !(grp->flags & FLAG_CANCELLED))
229 {
230 int old_len = grp->length;
231
232 if (grp->fh2 && SvOK (grp->fh2))
233 {
234 dSP;
235
236 ENTER;
237 SAVETMPS;
238 PUSHMARK (SP);
239 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
240 PUTBACK;
241 call_sv (grp->fh2, G_VOID | G_EVAL | G_KEEPERR);
242 SPAGAIN;
243 FREETMPS;
244 LEAVE;
245 }
246
247 /* stop if no progress has been made */
248 if (old_len == grp->length)
249 {
250 SvREFCNT_dec (grp->fh2);
251 grp->fh2 = 0;
252 break;
253 }
254 }
255 }
256
257 static void aio_grp_dec (aio_req grp)
258 {
259 --grp->length;
260
261 /* call feeder, if applicable */
262 aio_grp_feed (grp);
263
264 /* finish, if done */
265 if (!grp->length && grp->fd)
266 {
267 req_invoke (grp);
268 req_free (grp);
269 }
270 }
271
272 static void poll_wait ()
273 {
274 fd_set rfd;
275
276 while (nreqs)
277 {
278 int size;
279 #if !(__i386 || __x86_64) /* safe without sempahore on this archs */
280 pthread_mutex_lock (&reslock);
281 #endif
282 size = res_queue.size;
283 #if !(__i386 || __x86_64) /* safe without sempahore on this archs */
284 pthread_mutex_unlock (&reslock);
285 #endif
286
287 if (size)
288 return;
289
290 FD_ZERO(&rfd);
291 FD_SET(respipe [0], &rfd);
292
293 select (respipe [0] + 1, &rfd, 0, 0, 0);
294 }
295 }
296
297 static void req_invoke (aio_req req)
298 {
299 dSP;
300
301 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
302 {
303 errno = req->errorno;
304
305 ENTER;
306 SAVETMPS;
307 PUSHMARK (SP);
308 EXTEND (SP, 1);
309
310 switch (req->type)
311 {
312 case REQ_READDIR:
313 {
314 SV *rv = &PL_sv_undef;
315
316 if (req->result >= 0)
317 {
318 char *buf = req->data2ptr;
319 AV *av = newAV ();
320
321 while (req->result)
322 {
323 SV *sv = newSVpv (buf, 0);
324
325 av_push (av, sv);
326 buf += SvCUR (sv) + 1;
327 req->result--;
328 }
329
330 rv = sv_2mortal (newRV_noinc ((SV *)av));
331 }
332
333 PUSHs (rv);
334 }
335 break;
336
337 case REQ_OPEN:
338 {
339 /* convert fd to fh */
340 SV *fh;
341
342 PUSHs (sv_2mortal (newSViv (req->result)));
343 PUTBACK;
344 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
345 SPAGAIN;
346
347 fh = SvREFCNT_inc (POPs);
348
349 PUSHMARK (SP);
350 XPUSHs (sv_2mortal (fh));
351 }
352 break;
353
354 case REQ_GROUP:
355 req->fd = 2; /* mark group as finished */
356
357 if (req->data)
358 {
359 int i;
360 AV *av = (AV *)req->data;
361
362 EXTEND (SP, AvFILL (av) + 1);
363 for (i = 0; i <= AvFILL (av); ++i)
364 PUSHs (*av_fetch (av, i, 0));
365 }
366 break;
367
368 case REQ_NOP:
369 case REQ_BUSY:
370 break;
371
372 default:
373 PUSHs (sv_2mortal (newSViv (req->result)));
374 break;
375 }
376
377
378 PUTBACK;
379 call_sv (req->callback, G_VOID | G_EVAL);
380 SPAGAIN;
381
382 FREETMPS;
383 LEAVE;
384 }
385
386 if (req->grp)
387 {
388 aio_req grp = req->grp;
389
390 /* unlink request */
391 if (req->grp_next) req->grp_next->grp_prev = req->grp_prev;
392 if (req->grp_prev) req->grp_prev->grp_next = req->grp_next;
393
394 if (grp->grp_first == req)
395 grp->grp_first = req->grp_next;
396
397 aio_grp_dec (grp);
398 }
399
400 if (SvTRUE (ERRSV))
401 {
402 req_free (req);
403 croak (0);
404 }
405 }
406
407 static void req_free (aio_req req)
408 {
409 if (req->self)
410 {
411 sv_unmagic (req->self, PERL_MAGIC_ext);
412 SvREFCNT_dec (req->self);
413 }
414
415 SvREFCNT_dec (req->data);
416 SvREFCNT_dec (req->fh);
417 SvREFCNT_dec (req->fh2);
418 SvREFCNT_dec (req->callback);
419 Safefree (req->statdata);
420
421 if (req->type == REQ_READDIR && req->result >= 0)
422 free (req->data2ptr);
423
424 Safefree (req);
425 }
426
427 static void req_cancel (aio_req req)
428 {
429 req->flags |= FLAG_CANCELLED;
430
431 if (req->type == REQ_GROUP)
432 {
433 aio_req sub;
434
435 for (sub = req->grp_first; sub; sub = sub->grp_next)
436 req_cancel (sub);
437 }
438 }
439
440 static int poll_cb ()
441 {
442 dSP;
443 int count = 0;
444 int do_croak = 0;
445 aio_req req;
446
447 for (;;)
448 {
449 pthread_mutex_lock (&reslock);
450 req = reqq_shift (&res_queue);
451
452 if (req)
453 {
454 if (!res_queue.size)
455 {
456 /* read any signals sent by the worker threads */
457 char buf [32];
458 while (read (respipe [0], buf, 32) == 32)
459 ;
460 }
461 }
462
463 pthread_mutex_unlock (&reslock);
464
465 if (!req)
466 break;
467
468 --nreqs;
469
470 if (req->type == REQ_QUIT)
471 started--;
472 else if (req->type == REQ_GROUP && req->length)
473 {
474 req->fd = 1; /* mark request as delayed */
475 continue;
476 }
477 else
478 {
479 if (req->type == REQ_READ)
480 SvCUR_set (req->data, req->dataoffset + (req->result > 0 ? req->result : 0));
481
482 if (req->data2ptr && (req->type == REQ_READ || req->type == REQ_WRITE))
483 SvREADONLY_off (req->data);
484
485 if (req->statdata)
486 {
487 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
488 PL_laststatval = req->result;
489 PL_statcache = *(req->statdata);
490 }
491
492 req_invoke (req);
493
494 count++;
495 }
496
497 req_free (req);
498 }
499
500 return count;
501 }
502
503 static void *aio_proc(void *arg);
504
505 static void start_thread (void)
506 {
507 sigset_t fullsigset, oldsigset;
508 pthread_t tid;
509 pthread_attr_t attr;
510
511 pthread_attr_init (&attr);
512 pthread_attr_setstacksize (&attr, STACKSIZE);
513 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
514
515 sigfillset (&fullsigset);
516 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
517
518 if (pthread_create (&tid, &attr, aio_proc, 0) == 0)
519 started++;
520
521 sigprocmask (SIG_SETMASK, &oldsigset, 0);
522 }
523
524 static void req_send (aio_req req)
525 {
526 while (started < wanted && nreqs >= started)
527 start_thread ();
528
529 ++nreqs;
530
531 pthread_mutex_lock (&reqlock);
532 reqq_push (&req_queue, req);
533 pthread_cond_signal (&reqwait);
534 pthread_mutex_unlock (&reqlock);
535
536 if (nreqs > max_outstanding)
537 for (;;)
538 {
539 poll_cb ();
540
541 if (nreqs <= max_outstanding)
542 break;
543
544 poll_wait ();
545 }
546 }
547
548 static void end_thread (void)
549 {
550 aio_req req;
551
552 Newz (0, req, 1, aio_cb);
553
554 req->type = REQ_QUIT;
555 req->pri = PRI_MAX + PRI_BIAS;
556
557 req_send (req);
558 }
559
560 static void min_parallel (int nthreads)
561 {
562 if (wanted < nthreads)
563 wanted = nthreads;
564 }
565
566 static void max_parallel (int nthreads)
567 {
568 int cur = started;
569
570 if (wanted > nthreads)
571 wanted = nthreads;
572
573 while (cur > wanted)
574 {
575 end_thread ();
576 cur--;
577 }
578
579 while (started > wanted)
580 {
581 poll_wait ();
582 poll_cb ();
583 }
584 }
585
586 static void create_pipe ()
587 {
588 if (pipe (respipe))
589 croak ("unable to initialize result pipe");
590
591 if (fcntl (respipe [0], F_SETFL, O_NONBLOCK))
592 croak ("cannot set result pipe to nonblocking mode");
593
594 if (fcntl (respipe [1], F_SETFL, O_NONBLOCK))
595 croak ("cannot set result pipe to nonblocking mode");
596 }
597
598 /*****************************************************************************/
599 /* work around various missing functions */
600
601 #if !HAVE_PREADWRITE
602 # define pread aio_pread
603 # define pwrite aio_pwrite
604
605 /*
606 * make our pread/pwrite safe against themselves, but not against
607 * normal read/write by using a mutex. slows down execution a lot,
608 * but that's your problem, not mine.
609 */
610 static pthread_mutex_t preadwritelock = PTHREAD_MUTEX_INITIALIZER;
611
612 static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
613 {
614 ssize_t res;
615 off_t ooffset;
616
617 pthread_mutex_lock (&preadwritelock);
618 ooffset = lseek (fd, 0, SEEK_CUR);
619 lseek (fd, offset, SEEK_SET);
620 res = read (fd, buf, count);
621 lseek (fd, ooffset, SEEK_SET);
622 pthread_mutex_unlock (&preadwritelock);
623
624 return res;
625 }
626
627 static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
628 {
629 ssize_t res;
630 off_t ooffset;
631
632 pthread_mutex_lock (&preadwritelock);
633 ooffset = lseek (fd, 0, SEEK_CUR);
634 lseek (fd, offset, SEEK_SET);
635 res = write (fd, buf, count);
636 lseek (fd, offset, SEEK_SET);
637 pthread_mutex_unlock (&preadwritelock);
638
639 return res;
640 }
641 #endif
642
643 #if !HAVE_FDATASYNC
644 # define fdatasync fsync
645 #endif
646
647 #if !HAVE_READAHEAD
648 # define readahead aio_readahead
649
650 static ssize_t readahead (int fd, off_t offset, size_t count)
651 {
652 dBUF;
653
654 while (count > 0)
655 {
656 size_t len = count < AIO_BUFSIZE ? count : AIO_BUFSIZE;
657
658 pread (fd, aio_buf, len, offset);
659 offset += len;
660 count -= len;
661 }
662
663 fBUF;
664
665 errno = 0;
666 }
667 #endif
668
669 #if !HAVE_READDIR_R
670 # define readdir_r aio_readdir_r
671
672 static pthread_mutex_t readdirlock = PTHREAD_MUTEX_INITIALIZER;
673
674 static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res)
675 {
676 struct dirent *e;
677 int errorno;
678
679 pthread_mutex_lock (&readdirlock);
680
681 e = readdir (dirp);
682 errorno = errno;
683
684 if (e)
685 {
686 *res = ent;
687 strcpy (ent->d_name, e->d_name);
688 }
689 else
690 *res = 0;
691
692 pthread_mutex_unlock (&readdirlock);
693
694 errno = errorno;
695 return e ? 0 : -1;
696 }
697 #endif
698
699 /* sendfile always needs emulation */
700 static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count)
701 {
702 ssize_t res;
703
704 if (!count)
705 return 0;
706
707 #if HAVE_SENDFILE
708 # if __linux
709 res = sendfile (ofd, ifd, &offset, count);
710
711 # elif __freebsd
712 /*
713 * Of course, the freebsd sendfile is a dire hack with no thoughts
714 * wasted on making it similar to other I/O functions.
715 */
716 {
717 off_t sbytes;
718 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0);
719
720 if (res < 0 && sbytes)
721 /* maybe only on EAGAIN only: as usual, the manpage leaves you guessing */
722 res = sbytes;
723 }
724
725 # elif __hpux
726 res = sendfile (ofd, ifd, offset, count, 0, 0);
727
728 # elif __solaris
729 {
730 struct sendfilevec vec;
731 size_t sbytes;
732
733 vec.sfv_fd = ifd;
734 vec.sfv_flag = 0;
735 vec.sfv_off = offset;
736 vec.sfv_len = count;
737
738 res = sendfilev (ofd, &vec, 1, &sbytes);
739
740 if (res < 0 && sbytes)
741 res = sbytes;
742 }
743
744 # endif
745 #else
746 res = -1;
747 errno = ENOSYS;
748 #endif
749
750 if (res < 0
751 && (errno == ENOSYS || errno == EINVAL || errno == ENOTSOCK
752 #if __solaris
753 || errno == EAFNOSUPPORT || errno == EPROTOTYPE
754 #endif
755 )
756 )
757 {
758 /* emulate sendfile. this is a major pain in the ass */
759 dBUF;
760
761 res = 0;
762
763 while (count)
764 {
765 ssize_t cnt;
766
767 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
768
769 if (cnt <= 0)
770 {
771 if (cnt && !res) res = -1;
772 break;
773 }
774
775 cnt = write (ofd, aio_buf, cnt);
776
777 if (cnt <= 0)
778 {
779 if (cnt && !res) res = -1;
780 break;
781 }
782
783 offset += cnt;
784 res += cnt;
785 count -= cnt;
786 }
787
788 fBUF;
789 }
790
791 return res;
792 }
793
794 /* read a full directory */
795 static int scandir_ (const char *path, void **namesp)
796 {
797 DIR *dirp;
798 union
799 {
800 struct dirent d;
801 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1];
802 } *u;
803 struct dirent *entp;
804 char *name, *names;
805 int memlen = 4096;
806 int memofs = 0;
807 int res = 0;
808 int errorno;
809
810 dirp = opendir (path);
811 if (!dirp)
812 return -1;
813
814 u = malloc (sizeof (*u));
815 names = malloc (memlen);
816
817 if (u && names)
818 for (;;)
819 {
820 errno = 0;
821 readdir_r (dirp, &u->d, &entp);
822
823 if (!entp)
824 break;
825
826 name = entp->d_name;
827
828 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
829 {
830 int len = strlen (name) + 1;
831
832 res++;
833
834 while (memofs + len > memlen)
835 {
836 memlen *= 2;
837 names = realloc (names, memlen);
838 if (!names)
839 break;
840 }
841
842 memcpy (names + memofs, name, len);
843 memofs += len;
844 }
845 }
846
847 errorno = errno;
848 free (u);
849 closedir (dirp);
850
851 if (errorno)
852 {
853 free (names);
854 errno = errorno;
855 res = -1;
856 }
857
858 *namesp = (void *)names;
859 return res;
860 }
861
862 /*****************************************************************************/
863
864 static void *aio_proc (void *thr_arg)
865 {
866 aio_req req;
867 int type;
868
869 do
870 {
871 pthread_mutex_lock (&reqlock);
872
873 for (;;)
874 {
875 req = reqq_shift (&req_queue);
876
877 if (req)
878 break;
879
880 pthread_cond_wait (&reqwait, &reqlock);
881 }
882
883 pthread_mutex_unlock (&reqlock);
884
885 errno = 0; /* strictly unnecessary */
886 type = req->type; /* remember type for QUIT check */
887
888 if (!(req->flags & FLAG_CANCELLED))
889 switch (type)
890 {
891 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break;
892 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break;
893
894 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break;
895 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length); break;
896
897 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break;
898 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break;
899 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break;
900
901 case REQ_OPEN: req->result = open (req->dataptr, req->fd, req->mode); break;
902 case REQ_CLOSE: req->result = close (req->fd); break;
903 case REQ_UNLINK: req->result = unlink (req->dataptr); break;
904 case REQ_RMDIR: req->result = rmdir (req->dataptr); break;
905 case REQ_RENAME: req->result = rename (req->data2ptr, req->dataptr); break;
906 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break;
907 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break;
908
909 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break;
910 case REQ_FSYNC: req->result = fsync (req->fd); break;
911 case REQ_READDIR: req->result = scandir_ (req->dataptr, &req->data2ptr); break;
912
913 case REQ_BUSY:
914 {
915 struct timeval tv;
916
917 tv.tv_sec = req->fd;
918 tv.tv_usec = req->fd2;
919
920 req->result = select (0, 0, 0, 0, &tv);
921 }
922
923 case REQ_GROUP:
924 case REQ_NOP:
925 case REQ_QUIT:
926 break;
927
928 default:
929 req->result = ENOSYS;
930 break;
931 }
932
933 req->errorno = errno;
934
935 pthread_mutex_lock (&reslock);
936
937 if (!reqq_push (&res_queue, req))
938 /* write a dummy byte to the pipe so fh becomes ready */
939 write (respipe [1], &respipe, 1);
940
941 pthread_mutex_unlock (&reslock);
942 }
943 while (type != REQ_QUIT);
944
945 return 0;
946 }
947
948 /*****************************************************************************/
949
950 static void atfork_prepare (void)
951 {
952 pthread_mutex_lock (&reqlock);
953 pthread_mutex_lock (&reslock);
954 #if !HAVE_PREADWRITE
955 pthread_mutex_lock (&preadwritelock);
956 #endif
957 #if !HAVE_READDIR_R
958 pthread_mutex_lock (&readdirlock);
959 #endif
960 }
961
962 static void atfork_parent (void)
963 {
964 #if !HAVE_READDIR_R
965 pthread_mutex_unlock (&readdirlock);
966 #endif
967 #if !HAVE_PREADWRITE
968 pthread_mutex_unlock (&preadwritelock);
969 #endif
970 pthread_mutex_unlock (&reslock);
971 pthread_mutex_unlock (&reqlock);
972 }
973
974 static void atfork_child (void)
975 {
976 aio_req prv;
977
978 started = 0;
979
980 while (prv = reqq_shift (&req_queue))
981 req_free (prv);
982
983 while (prv = reqq_shift (&res_queue))
984 req_free (prv);
985
986 close (respipe [0]);
987 close (respipe [1]);
988 create_pipe ();
989
990 atfork_parent ();
991 }
992
993 #define dREQ \
994 aio_req req; \
995 int req_pri = next_pri; \
996 next_pri = DEFAULT_PRI + PRI_BIAS; \
997 \
998 if (SvOK (callback) && !SvROK (callback)) \
999 croak ("callback must be undef or of reference type"); \
1000 \
1001 Newz (0, req, 1, aio_cb); \
1002 if (!req) \
1003 croak ("out of memory during aio_req allocation"); \
1004 \
1005 req->callback = newSVsv (callback); \
1006 req->pri = req_pri
1007
1008 #define REQ_SEND \
1009 req_send (req); \
1010 \
1011 if (GIMME_V != G_VOID) \
1012 XPUSHs (req_sv (req, AIO_REQ_KLASS));
1013
1014 MODULE = IO::AIO PACKAGE = IO::AIO
1015
1016 PROTOTYPES: ENABLE
1017
1018 BOOT:
1019 {
1020 HV *stash = gv_stashpv ("IO::AIO", 1);
1021 newCONSTSUB (stash, "EXDEV", newSViv (EXDEV));
1022 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY));
1023 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY));
1024
1025 create_pipe ();
1026 pthread_atfork (atfork_prepare, atfork_parent, atfork_child);
1027 }
1028
1029 void
1030 min_parallel (nthreads)
1031 int nthreads
1032 PROTOTYPE: $
1033
1034 void
1035 max_parallel (nthreads)
1036 int nthreads
1037 PROTOTYPE: $
1038
1039 int
1040 max_outstanding (nreqs)
1041 int nreqs
1042 PROTOTYPE: $
1043 CODE:
1044 RETVAL = max_outstanding;
1045 max_outstanding = nreqs;
1046
1047 void
1048 aio_open (pathname,flags,mode,callback=&PL_sv_undef)
1049 SV * pathname
1050 int flags
1051 int mode
1052 SV * callback
1053 PROTOTYPE: $$$;$
1054 PPCODE:
1055 {
1056 dREQ;
1057
1058 req->type = REQ_OPEN;
1059 req->data = newSVsv (pathname);
1060 req->dataptr = SvPVbyte_nolen (req->data);
1061 req->fd = flags;
1062 req->mode = mode;
1063
1064 REQ_SEND;
1065 }
1066
1067 void
1068 aio_close (fh,callback=&PL_sv_undef)
1069 SV * fh
1070 SV * callback
1071 PROTOTYPE: $;$
1072 ALIAS:
1073 aio_close = REQ_CLOSE
1074 aio_fsync = REQ_FSYNC
1075 aio_fdatasync = REQ_FDATASYNC
1076 PPCODE:
1077 {
1078 dREQ;
1079
1080 req->type = ix;
1081 req->fh = newSVsv (fh);
1082 req->fd = PerlIO_fileno (IoIFP (sv_2io (fh)));
1083
1084 REQ_SEND (req);
1085 }
1086
1087 void
1088 aio_read (fh,offset,length,data,dataoffset,callback=&PL_sv_undef)
1089 SV * fh
1090 UV offset
1091 UV length
1092 SV * data
1093 UV dataoffset
1094 SV * callback
1095 ALIAS:
1096 aio_read = REQ_READ
1097 aio_write = REQ_WRITE
1098 PROTOTYPE: $$$$$;$
1099 PPCODE:
1100 {
1101 aio_req req;
1102 STRLEN svlen;
1103 char *svptr = SvPVbyte (data, svlen);
1104
1105 SvUPGRADE (data, SVt_PV);
1106 SvPOK_on (data);
1107
1108 if (dataoffset < 0)
1109 dataoffset += svlen;
1110
1111 if (dataoffset < 0 || dataoffset > svlen)
1112 croak ("data offset outside of string");
1113
1114 if (ix == REQ_WRITE)
1115 {
1116 /* write: check length and adjust. */
1117 if (length < 0 || length + dataoffset > svlen)
1118 length = svlen - dataoffset;
1119 }
1120 else
1121 {
1122 /* read: grow scalar as necessary */
1123 svptr = SvGROW (data, length + dataoffset);
1124 }
1125
1126 if (length < 0)
1127 croak ("length must not be negative");
1128
1129 {
1130 dREQ;
1131
1132 req->type = ix;
1133 req->fh = newSVsv (fh);
1134 req->fd = PerlIO_fileno (ix == REQ_READ ? IoIFP (sv_2io (fh))
1135 : IoOFP (sv_2io (fh)));
1136 req->offset = offset;
1137 req->length = length;
1138 req->data = SvREFCNT_inc (data);
1139 req->dataptr = (char *)svptr + dataoffset;
1140
1141 if (!SvREADONLY (data))
1142 {
1143 SvREADONLY_on (data);
1144 req->data2ptr = (void *)data;
1145 }
1146
1147 REQ_SEND;
1148 }
1149 }
1150
1151 void
1152 aio_sendfile (out_fh,in_fh,in_offset,length,callback=&PL_sv_undef)
1153 SV * out_fh
1154 SV * in_fh
1155 UV in_offset
1156 UV length
1157 SV * callback
1158 PROTOTYPE: $$$$;$
1159 PPCODE:
1160 {
1161 dREQ;
1162
1163 req->type = REQ_SENDFILE;
1164 req->fh = newSVsv (out_fh);
1165 req->fd = PerlIO_fileno (IoIFP (sv_2io (out_fh)));
1166 req->fh2 = newSVsv (in_fh);
1167 req->fd2 = PerlIO_fileno (IoIFP (sv_2io (in_fh)));
1168 req->offset = in_offset;
1169 req->length = length;
1170
1171 REQ_SEND;
1172 }
1173
1174 void
1175 aio_readahead (fh,offset,length,callback=&PL_sv_undef)
1176 SV * fh
1177 UV offset
1178 IV length
1179 SV * callback
1180 PROTOTYPE: $$$;$
1181 PPCODE:
1182 {
1183 dREQ;
1184
1185 req->type = REQ_READAHEAD;
1186 req->fh = newSVsv (fh);
1187 req->fd = PerlIO_fileno (IoIFP (sv_2io (fh)));
1188 req->offset = offset;
1189 req->length = length;
1190
1191 REQ_SEND;
1192 }
1193
1194 void
1195 aio_stat (fh_or_path,callback=&PL_sv_undef)
1196 SV * fh_or_path
1197 SV * callback
1198 ALIAS:
1199 aio_stat = REQ_STAT
1200 aio_lstat = REQ_LSTAT
1201 PPCODE:
1202 {
1203 dREQ;
1204
1205 New (0, req->statdata, 1, Stat_t);
1206 if (!req->statdata)
1207 {
1208 req_free (req);
1209 croak ("out of memory during aio_req->statdata allocation");
1210 }
1211
1212 if (SvPOK (fh_or_path))
1213 {
1214 req->type = ix;
1215 req->data = newSVsv (fh_or_path);
1216 req->dataptr = SvPVbyte_nolen (req->data);
1217 }
1218 else
1219 {
1220 req->type = REQ_FSTAT;
1221 req->fh = newSVsv (fh_or_path);
1222 req->fd = PerlIO_fileno (IoIFP (sv_2io (fh_or_path)));
1223 }
1224
1225 REQ_SEND;
1226 }
1227
1228 void
1229 aio_unlink (pathname,callback=&PL_sv_undef)
1230 SV * pathname
1231 SV * callback
1232 ALIAS:
1233 aio_unlink = REQ_UNLINK
1234 aio_rmdir = REQ_RMDIR
1235 aio_readdir = REQ_READDIR
1236 PPCODE:
1237 {
1238 dREQ;
1239
1240 req->type = ix;
1241 req->data = newSVsv (pathname);
1242 req->dataptr = SvPVbyte_nolen (req->data);
1243
1244 REQ_SEND;
1245 }
1246
1247 void
1248 aio_link (oldpath,newpath,callback=&PL_sv_undef)
1249 SV * oldpath
1250 SV * newpath
1251 SV * callback
1252 ALIAS:
1253 aio_link = REQ_LINK
1254 aio_symlink = REQ_SYMLINK
1255 aio_rename = REQ_RENAME
1256 PPCODE:
1257 {
1258 dREQ;
1259
1260 req->type = ix;
1261 req->fh = newSVsv (oldpath);
1262 req->data2ptr = SvPVbyte_nolen (req->fh);
1263 req->data = newSVsv (newpath);
1264 req->dataptr = SvPVbyte_nolen (req->data);
1265
1266 REQ_SEND;
1267 }
1268
1269 void
1270 aio_busy (delay,callback=&PL_sv_undef)
1271 double delay
1272 SV * callback
1273 PPCODE:
1274 {
1275 dREQ;
1276
1277 req->type = REQ_BUSY;
1278 req->fd = delay < 0. ? 0 : delay;
1279 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd);
1280
1281 REQ_SEND;
1282 }
1283
1284 void
1285 aio_group (callback=&PL_sv_undef)
1286 SV * callback
1287 PROTOTYPE: ;$
1288 PPCODE:
1289 {
1290 dREQ;
1291
1292 req->type = REQ_GROUP;
1293 req_send (req);
1294
1295 XPUSHs (req_sv (req, AIO_GRP_KLASS));
1296 }
1297
1298 void
1299 aio_nop (callback=&PL_sv_undef)
1300 SV * callback
1301 PPCODE:
1302 {
1303 dREQ;
1304
1305 req->type = REQ_NOP;
1306
1307 REQ_SEND;
1308 }
1309
1310 void
1311 aioreq_pri (int pri = DEFAULT_PRI)
1312 CODE:
1313 if (pri < PRI_MIN) pri = PRI_MIN;
1314 if (pri > PRI_MAX) pri = PRI_MAX;
1315 next_pri = pri + PRI_BIAS;
1316
1317 void
1318 aioreq_nice (int nice = 0)
1319 CODE:
1320 nice = next_pri - nice;
1321 if (nice < PRI_MIN) nice = PRI_MIN;
1322 if (nice > PRI_MAX) nice = PRI_MAX;
1323 next_pri = nice + PRI_BIAS;
1324
1325 void
1326 flush ()
1327 PROTOTYPE:
1328 CODE:
1329 while (nreqs)
1330 {
1331 poll_wait ();
1332 poll_cb ();
1333 }
1334
1335 void
1336 poll()
1337 PROTOTYPE:
1338 CODE:
1339 if (nreqs)
1340 {
1341 poll_wait ();
1342 poll_cb ();
1343 }
1344
1345 int
1346 poll_fileno()
1347 PROTOTYPE:
1348 CODE:
1349 RETVAL = respipe [0];
1350 OUTPUT:
1351 RETVAL
1352
1353 int
1354 poll_cb(...)
1355 PROTOTYPE:
1356 CODE:
1357 RETVAL = poll_cb ();
1358 OUTPUT:
1359 RETVAL
1360
1361 void
1362 poll_wait()
1363 PROTOTYPE:
1364 CODE:
1365 if (nreqs)
1366 poll_wait ();
1367
1368 int
1369 nreqs()
1370 PROTOTYPE:
1371 CODE:
1372 RETVAL = nreqs;
1373 OUTPUT:
1374 RETVAL
1375
1376 PROTOTYPES: DISABLE
1377
1378 MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1379
1380 void
1381 cancel (aio_req_ornot req)
1382 CODE:
1383 req_cancel (req);
1384
1385 void
1386 cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1387 CODE:
1388 SvREFCNT_dec (req->callback);
1389 req->callback = newSVsv (callback);
1390
1391 MODULE = IO::AIO PACKAGE = IO::AIO::GRP
1392
1393 void
1394 add (aio_req grp, ...)
1395 PPCODE:
1396 {
1397 int i;
1398 aio_req req;
1399
1400 if (grp->fd == 2)
1401 croak ("cannot add requests to IO::AIO::GRP after the group finished");
1402
1403 for (i = 1; i < items; ++i )
1404 {
1405 if (GIMME_V != G_VOID)
1406 XPUSHs (sv_2mortal (newSVsv (ST (i))));
1407
1408 req = SvAIO_REQ (ST (i));
1409
1410 if (req)
1411 {
1412 ++grp->length;
1413 req->grp = grp;
1414
1415 req->grp_prev = 0;
1416 req->grp_next = grp->grp_first;
1417
1418 if (grp->grp_first)
1419 grp->grp_first->grp_prev = req;
1420
1421 grp->grp_first = req;
1422 }
1423 }
1424 }
1425
1426 void
1427 result (aio_req grp, ...)
1428 CODE:
1429 {
1430 int i;
1431 AV *av = newAV ();
1432
1433 for (i = 1; i < items; ++i )
1434 av_push (av, newSVsv (ST (i)));
1435
1436 SvREFCNT_dec (grp->data);
1437 grp->data = (SV *)av;
1438 }
1439
1440 void
1441 limit (aio_req grp, int limit)
1442 CODE:
1443 grp->fd2 = limit;
1444 aio_grp_feed (grp);
1445
1446 void
1447 feed (aio_req grp, SV *callback=&PL_sv_undef)
1448 CODE:
1449 {
1450 SvREFCNT_dec (grp->fh2);
1451 grp->fh2 = newSVsv (callback);
1452
1453 if (grp->fd2 <= 0)
1454 grp->fd2 = 2;
1455
1456 aio_grp_feed (grp);
1457 }
1458