ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
Revision: 1.74
Committed: Wed Oct 25 17:57:30 2006 UTC (17 years, 6 months ago) by root
Branch: MAIN
Changes since 1.73: +1 -20 lines
Log Message:
*** empty log message ***

File Contents

# Content
1 /* solaris */
2 #define _POSIX_PTHREAD_SEMANTICS 1
3
4 #if __linux
5 # define _GNU_SOURCE
6 #endif
7
8 #define _REENTRANT 1
9
10 #include <errno.h>
11
12 #include "EXTERN.h"
13 #include "perl.h"
14 #include "XSUB.h"
15
16 #include "autoconf/config.h"
17
18 #include <pthread.h>
19
20 #include <stddef.h>
21 #include <errno.h>
22 #include <sys/time.h>
23 #include <sys/select.h>
24 #include <sys/types.h>
25 #include <sys/stat.h>
26 #include <limits.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <signal.h>
30 #include <sched.h>
31
32 #if HAVE_SENDFILE
33 # if __linux
34 # include <sys/sendfile.h>
35 # elif __freebsd
36 # include <sys/socket.h>
37 # include <sys/uio.h>
38 # elif __hpux
39 # include <sys/socket.h>
40 # elif __solaris /* not yet */
41 # include <sys/sendfile.h>
42 # else
43 # error sendfile support requested but not available
44 # endif
45 #endif
46
47 /* used for struct dirent, AIX doesn't provide it */
48 #ifndef NAME_MAX
49 # define NAME_MAX 4096
50 #endif
51
52 #if __ia64
53 # define STACKSIZE 65536
54 #elif __i386 || __x86_64 /* 16k is unreasonably high :( */
55 # define STACKSIZE PTHREAD_STACK_MIN
56 #else
57 # define STACKSIZE 16384
58 #endif
59
60 /* buffer size for various temporary buffers */
61 #define AIO_BUFSIZE 65536
62
63 #define dBUF \
64 char *aio_buf; \
65 LOCK (wrklock); \
66 self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \
67 UNLOCK (wrklock); \
68 if (!aio_buf) \
69 return -1;
70
71 enum {
72 REQ_QUIT,
73 REQ_OPEN, REQ_CLOSE,
74 REQ_READ, REQ_WRITE, REQ_READAHEAD,
75 REQ_SENDFILE,
76 REQ_STAT, REQ_LSTAT, REQ_FSTAT,
77 REQ_FSYNC, REQ_FDATASYNC,
78 REQ_UNLINK, REQ_RMDIR, REQ_RENAME,
79 REQ_READDIR,
80 REQ_LINK, REQ_SYMLINK,
81 REQ_GROUP, REQ_NOP,
82 REQ_BUSY,
83 };
84
85 #define AIO_REQ_KLASS "IO::AIO::REQ"
86 #define AIO_GRP_KLASS "IO::AIO::GRP"
87
88 typedef struct aio_cb
89 {
90 struct aio_cb *volatile next;
91
92 SV *data, *callback;
93 SV *fh, *fh2;
94 void *dataptr, *data2ptr;
95 Stat_t *statdata;
96 off_t offset;
97 size_t length;
98 ssize_t result;
99
100 STRLEN dataoffset;
101 int type;
102 int fd, fd2;
103 int errorno;
104 mode_t mode; /* open */
105
106 unsigned char flags;
107 unsigned char pri;
108
109 SV *self; /* the perl counterpart of this request, if any */
110 struct aio_cb *grp, *grp_prev, *grp_next, *grp_first;
111 } aio_cb;
112
113 enum {
114 FLAG_CANCELLED = 0x01,
115 };
116
117 typedef aio_cb *aio_req;
118 typedef aio_cb *aio_req_ornot;
119
120 enum {
121 PRI_MIN = -4,
122 PRI_MAX = 4,
123
124 DEFAULT_PRI = 0,
125 PRI_BIAS = -PRI_MIN,
126 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
127 };
128
129 static int next_pri = DEFAULT_PRI + PRI_BIAS;
130
131 static int started, wanted;
132 static volatile int nreqs;
133 static int respipe [2];
134
135 #if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)
136 # define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
137 #else
138 # define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
139 #endif
140
141 #define LOCK(mutex) pthread_mutex_lock (&(mutex))
142 #define UNLOCK(mutex) pthread_mutex_unlock (&(mutex))
143
144 /* worker threasd management */
145 static pthread_mutex_t wrklock = AIO_MUTEX_INIT;
146
147 typedef struct worker {
148 /* locked by wrklock */
149 struct worker *prev, *next;
150
151 pthread_t tid;
152
153 /* locked by reslock, reqlock or wrklock */
154 aio_req req; /* currently processed request */
155 void *dbuf;
156 DIR *dirp;
157 } worker;
158
159 static worker wrk_first = { &wrk_first, &wrk_first, 0 };
160
161 static void worker_clear (worker *wrk)
162 {
163 if (wrk->dirp)
164 {
165 closedir (wrk->dirp);
166 wrk->dirp = 0;
167 }
168
169 if (wrk->dbuf)
170 {
171 free (wrk->dbuf);
172 wrk->dbuf = 0;
173 }
174 }
175
176 static void worker_free (worker *wrk)
177 {
178 wrk->next->prev = wrk->prev;
179 wrk->prev->next = wrk->next;
180
181 free (wrk);
182 }
183
184 static pthread_mutex_t reslock = AIO_MUTEX_INIT;
185 static pthread_mutex_t reqlock = AIO_MUTEX_INIT;
186 static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER;
187
188 /*
189 * a somewhat faster data structure might be nice, but
190 * with 8 priorities this actually needs <20 insns
191 * per shift, the most expensive operation.
192 */
193 typedef struct {
194 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
195 int size;
196 } reqq;
197
198 static reqq req_queue;
199 static reqq res_queue;
200
201 int reqq_push (reqq *q, aio_req req)
202 {
203 int pri = req->pri;
204 req->next = 0;
205
206 if (q->qe[pri])
207 {
208 q->qe[pri]->next = req;
209 q->qe[pri] = req;
210 }
211 else
212 q->qe[pri] = q->qs[pri] = req;
213
214 return q->size++;
215 }
216
217 aio_req reqq_shift (reqq *q)
218 {
219 int pri;
220
221 if (!q->size)
222 return 0;
223
224 --q->size;
225
226 for (pri = NUM_PRI; pri--; )
227 {
228 aio_req req = q->qs[pri];
229
230 if (req)
231 {
232 if (!(q->qs[pri] = req->next))
233 q->qe[pri] = 0;
234
235 return req;
236 }
237 }
238
239 abort ();
240 }
241
242 static int poll_cb ();
243 static void req_invoke (aio_req req);
244 static void req_free (aio_req req);
245 static void req_cancel (aio_req req);
246
247 /* must be called at most once */
248 static SV *req_sv (aio_req req, const char *klass)
249 {
250 if (!req->self)
251 {
252 req->self = (SV *)newHV ();
253 sv_magic (req->self, 0, PERL_MAGIC_ext, (char *)req, 0);
254 }
255
256 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1)));
257 }
258
259 static aio_req SvAIO_REQ (SV *sv)
260 {
261 MAGIC *mg;
262
263 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv))
264 croak ("object of class " AIO_REQ_KLASS " expected");
265
266 mg = mg_find (SvRV (sv), PERL_MAGIC_ext);
267
268 return mg ? (aio_req)mg->mg_ptr : 0;
269 }
270
271 static void aio_grp_feed (aio_req grp)
272 {
273 while (grp->length < grp->fd2 && !(grp->flags & FLAG_CANCELLED))
274 {
275 int old_len = grp->length;
276
277 if (grp->fh2 && SvOK (grp->fh2))
278 {
279 dSP;
280
281 ENTER;
282 SAVETMPS;
283 PUSHMARK (SP);
284 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
285 PUTBACK;
286 call_sv (grp->fh2, G_VOID | G_EVAL | G_KEEPERR);
287 SPAGAIN;
288 FREETMPS;
289 LEAVE;
290 }
291
292 /* stop if no progress has been made */
293 if (old_len == grp->length)
294 {
295 SvREFCNT_dec (grp->fh2);
296 grp->fh2 = 0;
297 break;
298 }
299 }
300 }
301
302 static void aio_grp_dec (aio_req grp)
303 {
304 --grp->length;
305
306 /* call feeder, if applicable */
307 aio_grp_feed (grp);
308
309 /* finish, if done */
310 if (!grp->length && grp->fd)
311 {
312 req_invoke (grp);
313 req_free (grp);
314 }
315 }
316
317 static void poll_wait ()
318 {
319 fd_set rfd;
320
321 while (nreqs)
322 {
323 int size;
324 #if !(__i386 || __x86_64) /* safe without sempahore on these archs */
325 LOCK (reslock);
326 #endif
327 size = res_queue.size;
328 #if !(__i386 || __x86_64) /* safe without sempahore on these archs */
329 UNLOCK (reslock);
330 #endif
331
332 if (size)
333 return;
334
335 FD_ZERO(&rfd);
336 FD_SET(respipe [0], &rfd);
337
338 select (respipe [0] + 1, &rfd, 0, 0, 0);
339 }
340 }
341
342 static void req_invoke (aio_req req)
343 {
344 dSP;
345
346 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
347 {
348 errno = req->errorno;
349
350 ENTER;
351 SAVETMPS;
352 PUSHMARK (SP);
353 EXTEND (SP, 1);
354
355 switch (req->type)
356 {
357 case REQ_READDIR:
358 {
359 SV *rv = &PL_sv_undef;
360
361 if (req->result >= 0)
362 {
363 int i;
364 char *buf = req->data2ptr;
365 AV *av = newAV ();
366
367 av_extend (av, req->result - 1);
368
369 for (i = 0; i < req->result; ++i)
370 {
371 SV *sv = newSVpv (buf, 0);
372
373 av_store (av, i, sv);
374 buf += SvCUR (sv) + 1;
375 }
376
377 rv = sv_2mortal (newRV_noinc ((SV *)av));
378 }
379
380 PUSHs (rv);
381 }
382 break;
383
384 case REQ_OPEN:
385 {
386 /* convert fd to fh */
387 SV *fh;
388
389 PUSHs (sv_2mortal (newSViv (req->result)));
390 PUTBACK;
391 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
392 SPAGAIN;
393
394 fh = SvREFCNT_inc (POPs);
395
396 PUSHMARK (SP);
397 XPUSHs (sv_2mortal (fh));
398 }
399 break;
400
401 case REQ_GROUP:
402 req->fd = 2; /* mark group as finished */
403
404 if (req->data)
405 {
406 int i;
407 AV *av = (AV *)req->data;
408
409 EXTEND (SP, AvFILL (av) + 1);
410 for (i = 0; i <= AvFILL (av); ++i)
411 PUSHs (*av_fetch (av, i, 0));
412 }
413 break;
414
415 case REQ_NOP:
416 case REQ_BUSY:
417 break;
418
419 default:
420 PUSHs (sv_2mortal (newSViv (req->result)));
421 break;
422 }
423
424
425 PUTBACK;
426 call_sv (req->callback, G_VOID | G_EVAL);
427 SPAGAIN;
428
429 FREETMPS;
430 LEAVE;
431 }
432
433 if (req->grp)
434 {
435 aio_req grp = req->grp;
436
437 /* unlink request */
438 if (req->grp_next) req->grp_next->grp_prev = req->grp_prev;
439 if (req->grp_prev) req->grp_prev->grp_next = req->grp_next;
440
441 if (grp->grp_first == req)
442 grp->grp_first = req->grp_next;
443
444 aio_grp_dec (grp);
445 }
446
447 if (SvTRUE (ERRSV))
448 {
449 req_free (req);
450 croak (0);
451 }
452 }
453
454 static void req_free (aio_req req)
455 {
456 if (req->self)
457 {
458 sv_unmagic (req->self, PERL_MAGIC_ext);
459 SvREFCNT_dec (req->self);
460 }
461
462 SvREFCNT_dec (req->data);
463 SvREFCNT_dec (req->fh);
464 SvREFCNT_dec (req->fh2);
465 SvREFCNT_dec (req->callback);
466 Safefree (req->statdata);
467
468 if (req->type == REQ_READDIR)
469 free (req->data2ptr);
470
471 Safefree (req);
472 }
473
474 static void req_cancel_subs (aio_req grp)
475 {
476 aio_req sub;
477
478 if (grp->type != REQ_GROUP)
479 return;
480
481 SvREFCNT_dec (grp->fh2);
482 grp->fh2 = 0;
483
484 for (sub = grp->grp_first; sub; sub = sub->grp_next)
485 req_cancel (sub);
486 }
487
488 static void req_cancel (aio_req req)
489 {
490 req->flags |= FLAG_CANCELLED;
491
492 req_cancel_subs (req);
493 }
494
495 static int poll_cb ()
496 {
497 dSP;
498 int count = 0;
499 int do_croak = 0;
500 aio_req req;
501
502 for (;;)
503 {
504 LOCK (reslock);
505 req = reqq_shift (&res_queue);
506
507 if (req)
508 {
509 if (!res_queue.size)
510 {
511 /* read any signals sent by the worker threads */
512 char buf [32];
513 while (read (respipe [0], buf, 32) == 32)
514 ;
515 }
516 }
517
518 UNLOCK (reslock);
519
520 if (!req)
521 break;
522
523 --nreqs;
524
525 if (req->type == REQ_QUIT)
526 started--;
527 else if (req->type == REQ_GROUP && req->length)
528 {
529 req->fd = 1; /* mark request as delayed */
530 continue;
531 }
532 else
533 {
534 if (req->type == REQ_READ)
535 SvCUR_set (req->data, req->dataoffset + (req->result > 0 ? req->result : 0));
536
537 if (req->data2ptr && (req->type == REQ_READ || req->type == REQ_WRITE))
538 SvREADONLY_off (req->data);
539
540 if (req->statdata)
541 {
542 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
543 PL_laststatval = req->result;
544 PL_statcache = *(req->statdata);
545 }
546
547 req_invoke (req);
548
549 count++;
550 }
551
552 req_free (req);
553 }
554
555 return count;
556 }
557
558 static void *aio_proc(void *arg);
559
560 static void start_thread (void)
561 {
562 sigset_t fullsigset, oldsigset;
563 pthread_attr_t attr;
564
565 worker *wrk = calloc (1, sizeof (worker));
566
567 if (!wrk)
568 croak ("unable to allocate worker thread data");
569
570 pthread_attr_init (&attr);
571 pthread_attr_setstacksize (&attr, STACKSIZE);
572 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
573
574 sigfillset (&fullsigset);
575
576 LOCK (wrklock);
577 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
578
579 if (pthread_create (&wrk->tid, &attr, aio_proc, (void *)wrk) == 0)
580 {
581 wrk->prev = &wrk_first;
582 wrk->next = wrk_first.next;
583 wrk_first.next->prev = wrk;
584 wrk_first.next = wrk;
585 started++;
586 }
587 else
588 free (wrk);
589
590 sigprocmask (SIG_SETMASK, &oldsigset, 0);
591 UNLOCK (wrklock);
592 }
593
594 static void req_send (aio_req req)
595 {
596 while (started < wanted && nreqs >= started)
597 start_thread ();
598
599 ++nreqs;
600
601 LOCK (reqlock);
602 reqq_push (&req_queue, req);
603 pthread_cond_signal (&reqwait);
604 UNLOCK (reqlock);
605 }
606
607 static void end_thread (void)
608 {
609 aio_req req;
610
611 Newz (0, req, 1, aio_cb);
612
613 req->type = REQ_QUIT;
614 req->pri = PRI_MAX + PRI_BIAS;
615
616 req_send (req);
617 }
618
619 static void min_parallel (int nthreads)
620 {
621 if (wanted < nthreads)
622 wanted = nthreads;
623 }
624
625 static void max_parallel (int nthreads)
626 {
627 int cur = started;
628
629 if (wanted > nthreads)
630 wanted = nthreads;
631
632 while (cur > wanted)
633 {
634 end_thread ();
635 cur--;
636 }
637
638 while (started > wanted)
639 {
640 poll_wait ();
641 poll_cb ();
642 }
643 }
644
645 static void create_pipe ()
646 {
647 if (pipe (respipe))
648 croak ("unable to initialize result pipe");
649
650 if (fcntl (respipe [0], F_SETFL, O_NONBLOCK))
651 croak ("cannot set result pipe to nonblocking mode");
652
653 if (fcntl (respipe [1], F_SETFL, O_NONBLOCK))
654 croak ("cannot set result pipe to nonblocking mode");
655 }
656
657 /*****************************************************************************/
658 /* work around various missing functions */
659
660 #if !HAVE_PREADWRITE
661 # define pread aio_pread
662 # define pwrite aio_pwrite
663
664 /*
665 * make our pread/pwrite safe against themselves, but not against
666 * normal read/write by using a mutex. slows down execution a lot,
667 * but that's your problem, not mine.
668 */
669 static pthread_mutex_t preadwritelock = PTHREAD_MUTEX_INITIALIZER;
670
671 static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
672 {
673 ssize_t res;
674 off_t ooffset;
675
676 LOCK (preadwritelock);
677 ooffset = lseek (fd, 0, SEEK_CUR);
678 lseek (fd, offset, SEEK_SET);
679 res = read (fd, buf, count);
680 lseek (fd, ooffset, SEEK_SET);
681 UNLOCK (preadwritelock);
682
683 return res;
684 }
685
686 static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
687 {
688 ssize_t res;
689 off_t ooffset;
690
691 LOCK (preadwritelock);
692 ooffset = lseek (fd, 0, SEEK_CUR);
693 lseek (fd, offset, SEEK_SET);
694 res = write (fd, buf, count);
695 lseek (fd, offset, SEEK_SET);
696 UNLOCK (preadwritelock);
697
698 return res;
699 }
700 #endif
701
702 #if !HAVE_FDATASYNC
703 # define fdatasync fsync
704 #endif
705
706 #if !HAVE_READAHEAD
707 # define readahead aio_readahead
708
709 static ssize_t readahead (int fd, off_t offset, size_t count)
710 {
711 dBUF;
712
713 while (count > 0)
714 {
715 size_t len = count < AIO_BUFSIZE ? count : AIO_BUFSIZE;
716
717 pread (fd, aio_buf, len, offset);
718 offset += len;
719 count -= len;
720 }
721
722 errno = 0;
723 }
724 #endif
725
726 #if !HAVE_READDIR_R
727 # define readdir_r aio_readdir_r
728
729 static pthread_mutex_t readdirlock = PTHREAD_MUTEX_INITIALIZER;
730
731 static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res)
732 {
733 struct dirent *e;
734 int errorno;
735
736 LOCK (readdirlock);
737
738 e = readdir (dirp);
739 errorno = errno;
740
741 if (e)
742 {
743 *res = ent;
744 strcpy (ent->d_name, e->d_name);
745 }
746 else
747 *res = 0;
748
749 UNLOCK (readdirlock);
750
751 errno = errorno;
752 return e ? 0 : -1;
753 }
754 #endif
755
756 /* sendfile always needs emulation */
757 static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self)
758 {
759 ssize_t res;
760
761 if (!count)
762 return 0;
763
764 #if HAVE_SENDFILE
765 # if __linux
766 res = sendfile (ofd, ifd, &offset, count);
767
768 # elif __freebsd
769 /*
770 * Of course, the freebsd sendfile is a dire hack with no thoughts
771 * wasted on making it similar to other I/O functions.
772 */
773 {
774 off_t sbytes;
775 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0);
776
777 if (res < 0 && sbytes)
778 /* maybe only on EAGAIN only: as usual, the manpage leaves you guessing */
779 res = sbytes;
780 }
781
782 # elif __hpux
783 res = sendfile (ofd, ifd, offset, count, 0, 0);
784
785 # elif __solaris
786 {
787 struct sendfilevec vec;
788 size_t sbytes;
789
790 vec.sfv_fd = ifd;
791 vec.sfv_flag = 0;
792 vec.sfv_off = offset;
793 vec.sfv_len = count;
794
795 res = sendfilev (ofd, &vec, 1, &sbytes);
796
797 if (res < 0 && sbytes)
798 res = sbytes;
799 }
800
801 # endif
802 #else
803 res = -1;
804 errno = ENOSYS;
805 #endif
806
807 if (res < 0
808 && (errno == ENOSYS || errno == EINVAL || errno == ENOTSOCK
809 #if __solaris
810 || errno == EAFNOSUPPORT || errno == EPROTOTYPE
811 #endif
812 )
813 )
814 {
815 /* emulate sendfile. this is a major pain in the ass */
816 dBUF;
817
818 res = 0;
819
820 while (count)
821 {
822 ssize_t cnt;
823
824 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
825
826 if (cnt <= 0)
827 {
828 if (cnt && !res) res = -1;
829 break;
830 }
831
832 cnt = write (ofd, aio_buf, cnt);
833
834 if (cnt <= 0)
835 {
836 if (cnt && !res) res = -1;
837 break;
838 }
839
840 offset += cnt;
841 res += cnt;
842 count -= cnt;
843 }
844 }
845
846 return res;
847 }
848
849 /* read a full directory */
850 static void scandir_ (aio_req req, worker *self)
851 {
852 DIR *dirp;
853 union
854 {
855 struct dirent d;
856 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1];
857 } *u;
858 struct dirent *entp;
859 char *name, *names;
860 int memlen = 4096;
861 int memofs = 0;
862 int res = 0;
863 int errorno;
864
865 LOCK (wrklock);
866 self->dirp = dirp = opendir (req->dataptr);
867 self->dbuf = u = malloc (sizeof (*u));
868 UNLOCK (wrklock);
869
870 req->data2ptr = names = malloc (memlen);
871
872 if (dirp && u && names)
873 for (;;)
874 {
875 errno = 0;
876 readdir_r (dirp, &u->d, &entp);
877
878 if (!entp)
879 break;
880
881 name = entp->d_name;
882
883 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
884 {
885 int len = strlen (name) + 1;
886
887 res++;
888
889 while (memofs + len > memlen)
890 {
891 memlen *= 2;
892 LOCK (wrklock);
893 req->data2ptr = names = realloc (names, memlen);
894 UNLOCK (wrklock);
895
896 if (!names)
897 break;
898 }
899
900 memcpy (names + memofs, name, len);
901 memofs += len;
902 }
903 }
904
905 if (errno)
906 res = -1;
907
908 req->result = res;
909 }
910
911 /*****************************************************************************/
912
913 static void *aio_proc (void *thr_arg)
914 {
915 aio_req req;
916 int type;
917 worker *self = (worker *)thr_arg;
918
919 do
920 {
921 LOCK (reqlock);
922
923 for (;;)
924 {
925 self->req = req = reqq_shift (&req_queue);
926
927 if (req)
928 break;
929
930 pthread_cond_wait (&reqwait, &reqlock);
931 }
932
933 UNLOCK (reqlock);
934
935 errno = 0; /* strictly unnecessary */
936 type = req->type; /* remember type for QUIT check */
937
938 if (!(req->flags & FLAG_CANCELLED))
939 switch (type)
940 {
941 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break;
942 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break;
943
944 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break;
945 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length, self); break;
946
947 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break;
948 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break;
949 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break;
950
951 case REQ_OPEN: req->result = open (req->dataptr, req->fd, req->mode); break;
952 case REQ_CLOSE: req->result = close (req->fd); break;
953 case REQ_UNLINK: req->result = unlink (req->dataptr); break;
954 case REQ_RMDIR: req->result = rmdir (req->dataptr); break;
955 case REQ_RENAME: req->result = rename (req->data2ptr, req->dataptr); break;
956 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break;
957 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break;
958
959 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break;
960 case REQ_FSYNC: req->result = fsync (req->fd); break;
961 case REQ_READDIR: scandir_ (req, self); break;
962
963 case REQ_BUSY:
964 {
965 struct timeval tv;
966
967 tv.tv_sec = req->fd;
968 tv.tv_usec = req->fd2;
969
970 req->result = select (0, 0, 0, 0, &tv);
971 }
972
973 case REQ_GROUP:
974 case REQ_NOP:
975 case REQ_QUIT:
976 break;
977
978 default:
979 req->result = ENOSYS;
980 break;
981 }
982
983 req->errorno = errno;
984
985 LOCK (reslock);
986
987 if (!reqq_push (&res_queue, req))
988 /* write a dummy byte to the pipe so fh becomes ready */
989 write (respipe [1], &respipe, 1);
990
991 self->req = 0;
992 worker_clear (self);
993
994 UNLOCK (reslock);
995 }
996 while (type != REQ_QUIT);
997
998 LOCK (wrklock);
999 worker_free (self);
1000 UNLOCK (wrklock);
1001
1002 return 0;
1003 }
1004
1005 /*****************************************************************************/
1006
1007 static void atfork_prepare (void)
1008 {
1009 LOCK (wrklock);
1010 LOCK (reqlock);
1011 LOCK (reslock);
1012 #if !HAVE_PREADWRITE
1013 LOCK (preadwritelock);
1014 #endif
1015 #if !HAVE_READDIR_R
1016 LOCK (readdirlock);
1017 #endif
1018 }
1019
1020 static void atfork_parent (void)
1021 {
1022 #if !HAVE_READDIR_R
1023 UNLOCK (readdirlock);
1024 #endif
1025 #if !HAVE_PREADWRITE
1026 UNLOCK (preadwritelock);
1027 #endif
1028 UNLOCK (reslock);
1029 UNLOCK (reqlock);
1030 UNLOCK (wrklock);
1031 }
1032
1033 static void atfork_child (void)
1034 {
1035 aio_req prv;
1036
1037 while (prv = reqq_shift (&req_queue))
1038 req_free (prv);
1039
1040 while (prv = reqq_shift (&res_queue))
1041 req_free (prv);
1042
1043 while (wrk_first.next != &wrk_first)
1044 {
1045 worker *wrk = wrk_first.next;
1046
1047 if (wrk->req)
1048 req_free (wrk->req);
1049
1050 worker_clear (wrk);
1051 worker_free (wrk);
1052 }
1053
1054 started = 0;
1055 nreqs = 0;
1056
1057 close (respipe [0]);
1058 close (respipe [1]);
1059 create_pipe ();
1060
1061 atfork_parent ();
1062 }
1063
1064 #define dREQ \
1065 aio_req req; \
1066 int req_pri = next_pri; \
1067 next_pri = DEFAULT_PRI + PRI_BIAS; \
1068 \
1069 if (SvOK (callback) && !SvROK (callback)) \
1070 croak ("callback must be undef or of reference type"); \
1071 \
1072 Newz (0, req, 1, aio_cb); \
1073 if (!req) \
1074 croak ("out of memory during aio_req allocation"); \
1075 \
1076 req->callback = newSVsv (callback); \
1077 req->pri = req_pri
1078
1079 #define REQ_SEND \
1080 req_send (req); \
1081 \
1082 if (GIMME_V != G_VOID) \
1083 XPUSHs (req_sv (req, AIO_REQ_KLASS));
1084
1085 MODULE = IO::AIO PACKAGE = IO::AIO
1086
1087 PROTOTYPES: ENABLE
1088
1089 BOOT:
1090 {
1091 HV *stash = gv_stashpv ("IO::AIO", 1);
1092 newCONSTSUB (stash, "EXDEV", newSViv (EXDEV));
1093 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY));
1094 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY));
1095
1096 create_pipe ();
1097 pthread_atfork (atfork_prepare, atfork_parent, atfork_child);
1098 }
1099
1100 void
1101 min_parallel (nthreads)
1102 int nthreads
1103 PROTOTYPE: $
1104
1105 void
1106 max_parallel (nthreads)
1107 int nthreads
1108 PROTOTYPE: $
1109
1110 void
1111 aio_open (pathname,flags,mode,callback=&PL_sv_undef)
1112 SV * pathname
1113 int flags
1114 int mode
1115 SV * callback
1116 PROTOTYPE: $$$;$
1117 PPCODE:
1118 {
1119 dREQ;
1120
1121 req->type = REQ_OPEN;
1122 req->data = newSVsv (pathname);
1123 req->dataptr = SvPVbyte_nolen (req->data);
1124 req->fd = flags;
1125 req->mode = mode;
1126
1127 REQ_SEND;
1128 }
1129
1130 void
1131 aio_close (fh,callback=&PL_sv_undef)
1132 SV * fh
1133 SV * callback
1134 PROTOTYPE: $;$
1135 ALIAS:
1136 aio_close = REQ_CLOSE
1137 aio_fsync = REQ_FSYNC
1138 aio_fdatasync = REQ_FDATASYNC
1139 PPCODE:
1140 {
1141 dREQ;
1142
1143 req->type = ix;
1144 req->fh = newSVsv (fh);
1145 req->fd = PerlIO_fileno (IoIFP (sv_2io (fh)));
1146
1147 REQ_SEND (req);
1148 }
1149
1150 void
1151 aio_read (fh,offset,length,data,dataoffset,callback=&PL_sv_undef)
1152 SV * fh
1153 UV offset
1154 UV length
1155 SV * data
1156 UV dataoffset
1157 SV * callback
1158 ALIAS:
1159 aio_read = REQ_READ
1160 aio_write = REQ_WRITE
1161 PROTOTYPE: $$$$$;$
1162 PPCODE:
1163 {
1164 aio_req req;
1165 STRLEN svlen;
1166 char *svptr = SvPVbyte (data, svlen);
1167
1168 SvUPGRADE (data, SVt_PV);
1169 SvPOK_on (data);
1170
1171 if (dataoffset < 0)
1172 dataoffset += svlen;
1173
1174 if (dataoffset < 0 || dataoffset > svlen)
1175 croak ("data offset outside of string");
1176
1177 if (ix == REQ_WRITE)
1178 {
1179 /* write: check length and adjust. */
1180 if (length < 0 || length + dataoffset > svlen)
1181 length = svlen - dataoffset;
1182 }
1183 else
1184 {
1185 /* read: grow scalar as necessary */
1186 svptr = SvGROW (data, length + dataoffset);
1187 }
1188
1189 if (length < 0)
1190 croak ("length must not be negative");
1191
1192 {
1193 dREQ;
1194
1195 req->type = ix;
1196 req->fh = newSVsv (fh);
1197 req->fd = PerlIO_fileno (ix == REQ_READ ? IoIFP (sv_2io (fh))
1198 : IoOFP (sv_2io (fh)));
1199 req->offset = offset;
1200 req->length = length;
1201 req->data = SvREFCNT_inc (data);
1202 req->dataptr = (char *)svptr + dataoffset;
1203
1204 if (!SvREADONLY (data))
1205 {
1206 SvREADONLY_on (data);
1207 req->data2ptr = (void *)data;
1208 }
1209
1210 REQ_SEND;
1211 }
1212 }
1213
1214 void
1215 aio_sendfile (out_fh,in_fh,in_offset,length,callback=&PL_sv_undef)
1216 SV * out_fh
1217 SV * in_fh
1218 UV in_offset
1219 UV length
1220 SV * callback
1221 PROTOTYPE: $$$$;$
1222 PPCODE:
1223 {
1224 dREQ;
1225
1226 req->type = REQ_SENDFILE;
1227 req->fh = newSVsv (out_fh);
1228 req->fd = PerlIO_fileno (IoIFP (sv_2io (out_fh)));
1229 req->fh2 = newSVsv (in_fh);
1230 req->fd2 = PerlIO_fileno (IoIFP (sv_2io (in_fh)));
1231 req->offset = in_offset;
1232 req->length = length;
1233
1234 REQ_SEND;
1235 }
1236
1237 void
1238 aio_readahead (fh,offset,length,callback=&PL_sv_undef)
1239 SV * fh
1240 UV offset
1241 IV length
1242 SV * callback
1243 PROTOTYPE: $$$;$
1244 PPCODE:
1245 {
1246 dREQ;
1247
1248 req->type = REQ_READAHEAD;
1249 req->fh = newSVsv (fh);
1250 req->fd = PerlIO_fileno (IoIFP (sv_2io (fh)));
1251 req->offset = offset;
1252 req->length = length;
1253
1254 REQ_SEND;
1255 }
1256
1257 void
1258 aio_stat (fh_or_path,callback=&PL_sv_undef)
1259 SV * fh_or_path
1260 SV * callback
1261 ALIAS:
1262 aio_stat = REQ_STAT
1263 aio_lstat = REQ_LSTAT
1264 PPCODE:
1265 {
1266 dREQ;
1267
1268 New (0, req->statdata, 1, Stat_t);
1269 if (!req->statdata)
1270 {
1271 req_free (req);
1272 croak ("out of memory during aio_req->statdata allocation");
1273 }
1274
1275 if (SvPOK (fh_or_path))
1276 {
1277 req->type = ix;
1278 req->data = newSVsv (fh_or_path);
1279 req->dataptr = SvPVbyte_nolen (req->data);
1280 }
1281 else
1282 {
1283 req->type = REQ_FSTAT;
1284 req->fh = newSVsv (fh_or_path);
1285 req->fd = PerlIO_fileno (IoIFP (sv_2io (fh_or_path)));
1286 }
1287
1288 REQ_SEND;
1289 }
1290
1291 void
1292 aio_unlink (pathname,callback=&PL_sv_undef)
1293 SV * pathname
1294 SV * callback
1295 ALIAS:
1296 aio_unlink = REQ_UNLINK
1297 aio_rmdir = REQ_RMDIR
1298 aio_readdir = REQ_READDIR
1299 PPCODE:
1300 {
1301 dREQ;
1302
1303 req->type = ix;
1304 req->data = newSVsv (pathname);
1305 req->dataptr = SvPVbyte_nolen (req->data);
1306
1307 REQ_SEND;
1308 }
1309
1310 void
1311 aio_link (oldpath,newpath,callback=&PL_sv_undef)
1312 SV * oldpath
1313 SV * newpath
1314 SV * callback
1315 ALIAS:
1316 aio_link = REQ_LINK
1317 aio_symlink = REQ_SYMLINK
1318 aio_rename = REQ_RENAME
1319 PPCODE:
1320 {
1321 dREQ;
1322
1323 req->type = ix;
1324 req->fh = newSVsv (oldpath);
1325 req->data2ptr = SvPVbyte_nolen (req->fh);
1326 req->data = newSVsv (newpath);
1327 req->dataptr = SvPVbyte_nolen (req->data);
1328
1329 REQ_SEND;
1330 }
1331
1332 void
1333 aio_busy (delay,callback=&PL_sv_undef)
1334 double delay
1335 SV * callback
1336 PPCODE:
1337 {
1338 dREQ;
1339
1340 req->type = REQ_BUSY;
1341 req->fd = delay < 0. ? 0 : delay;
1342 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd);
1343
1344 REQ_SEND;
1345 }
1346
1347 void
1348 aio_group (callback=&PL_sv_undef)
1349 SV * callback
1350 PROTOTYPE: ;$
1351 PPCODE:
1352 {
1353 dREQ;
1354
1355 req->type = REQ_GROUP;
1356 req_send (req);
1357
1358 XPUSHs (req_sv (req, AIO_GRP_KLASS));
1359 }
1360
1361 void
1362 aio_nop (callback=&PL_sv_undef)
1363 SV * callback
1364 PPCODE:
1365 {
1366 dREQ;
1367
1368 req->type = REQ_NOP;
1369
1370 REQ_SEND;
1371 }
1372
1373 void
1374 aioreq_pri (int pri = DEFAULT_PRI)
1375 CODE:
1376 if (pri < PRI_MIN) pri = PRI_MIN;
1377 if (pri > PRI_MAX) pri = PRI_MAX;
1378 next_pri = pri + PRI_BIAS;
1379
1380 void
1381 aioreq_nice (int nice = 0)
1382 CODE:
1383 nice = next_pri - nice;
1384 if (nice < PRI_MIN) nice = PRI_MIN;
1385 if (nice > PRI_MAX) nice = PRI_MAX;
1386 next_pri = nice + PRI_BIAS;
1387
1388 void
1389 flush ()
1390 PROTOTYPE:
1391 CODE:
1392 while (nreqs)
1393 {
1394 poll_wait ();
1395 poll_cb ();
1396 }
1397
1398 void
1399 poll()
1400 PROTOTYPE:
1401 CODE:
1402 if (nreqs)
1403 {
1404 poll_wait ();
1405 poll_cb ();
1406 }
1407
1408 int
1409 poll_fileno()
1410 PROTOTYPE:
1411 CODE:
1412 RETVAL = respipe [0];
1413 OUTPUT:
1414 RETVAL
1415
1416 int
1417 poll_cb(...)
1418 PROTOTYPE:
1419 CODE:
1420 RETVAL = poll_cb ();
1421 OUTPUT:
1422 RETVAL
1423
1424 void
1425 poll_wait()
1426 PROTOTYPE:
1427 CODE:
1428 if (nreqs)
1429 poll_wait ();
1430
1431 int
1432 nreqs()
1433 PROTOTYPE:
1434 CODE:
1435 RETVAL = nreqs;
1436 OUTPUT:
1437 RETVAL
1438
1439 PROTOTYPES: DISABLE
1440
1441 MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1442
1443 void
1444 cancel (aio_req_ornot req)
1445 CODE:
1446 req_cancel (req);
1447
1448 void
1449 cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1450 CODE:
1451 SvREFCNT_dec (req->callback);
1452 req->callback = newSVsv (callback);
1453
1454 MODULE = IO::AIO PACKAGE = IO::AIO::GRP
1455
1456 void
1457 add (aio_req grp, ...)
1458 PPCODE:
1459 {
1460 int i;
1461 aio_req req;
1462
1463 if (grp->fd == 2)
1464 croak ("cannot add requests to IO::AIO::GRP after the group finished");
1465
1466 for (i = 1; i < items; ++i )
1467 {
1468 if (GIMME_V != G_VOID)
1469 XPUSHs (sv_2mortal (newSVsv (ST (i))));
1470
1471 req = SvAIO_REQ (ST (i));
1472
1473 if (req)
1474 {
1475 ++grp->length;
1476 req->grp = grp;
1477
1478 req->grp_prev = 0;
1479 req->grp_next = grp->grp_first;
1480
1481 if (grp->grp_first)
1482 grp->grp_first->grp_prev = req;
1483
1484 grp->grp_first = req;
1485 }
1486 }
1487 }
1488
1489 void
1490 cancel_subs (aio_req_ornot req)
1491 CODE:
1492 req_cancel_subs (req);
1493
1494 void
1495 result (aio_req grp, ...)
1496 CODE:
1497 {
1498 int i;
1499 AV *av = newAV ();
1500
1501 for (i = 1; i < items; ++i )
1502 av_push (av, newSVsv (ST (i)));
1503
1504 SvREFCNT_dec (grp->data);
1505 grp->data = (SV *)av;
1506 }
1507
1508 void
1509 limit (aio_req grp, int limit)
1510 CODE:
1511 grp->fd2 = limit;
1512 aio_grp_feed (grp);
1513
1514 void
1515 feed (aio_req grp, SV *callback=&PL_sv_undef)
1516 CODE:
1517 {
1518 SvREFCNT_dec (grp->fh2);
1519 grp->fh2 = newSVsv (callback);
1520
1521 if (grp->fd2 <= 0)
1522 grp->fd2 = 2;
1523
1524 aio_grp_feed (grp);
1525 }
1526