ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
Revision: 1.88
Committed: Mon Oct 30 23:43:17 2006 UTC (17 years, 6 months ago) by root
Branch: MAIN
Changes since 1.87: +1 -1 lines
Log Message:
*** empty log message ***

File Contents

# Content
1 /* solaris */
2 #define _POSIX_PTHREAD_SEMANTICS 1
3
4 #if __linux && !defined(_GNU_SOURCE)
5 # define _GNU_SOURCE
6 #endif
7
8 /* just in case */
9 #define _REENTRANT 1
10
11 #include <errno.h>
12
13 #include "EXTERN.h"
14 #include "perl.h"
15 #include "XSUB.h"
16
17 #include "autoconf/config.h"
18
19 #include <pthread.h>
20
21 #include <stddef.h>
22 #include <errno.h>
23 #include <sys/time.h>
24 #include <sys/select.h>
25 #include <sys/types.h>
26 #include <sys/stat.h>
27 #include <limits.h>
28 #include <unistd.h>
29 #include <fcntl.h>
30 #include <signal.h>
31 #include <sched.h>
32
33 #if HAVE_SENDFILE
34 # if __linux
35 # include <sys/sendfile.h>
36 # elif __freebsd
37 # include <sys/socket.h>
38 # include <sys/uio.h>
39 # elif __hpux
40 # include <sys/socket.h>
41 # elif __solaris /* not yet */
42 # include <sys/sendfile.h>
43 # else
44 # error sendfile support requested but not available
45 # endif
46 #endif
47
48 /* number of seconds after which idle threads exit */
49 #define IDLE_TIMEOUT 10
50
51 /* used for struct dirent, AIX doesn't provide it */
52 #ifndef NAME_MAX
53 # define NAME_MAX 4096
54 #endif
55
56 #ifndef PTHREAD_STACK_MIN
57 /* care for broken platforms, e.g. windows */
58 # define PTHREAD_STACK_MIN 16384
59 #endif
60
61 #if __ia64
62 # define STACKSIZE 65536
63 #elif __i386 || __x86_64 /* 16k is unreasonably high :( */
64 # define STACKSIZE PTHREAD_STACK_MIN
65 #else
66 # define STACKSIZE 16384
67 #endif
68
69 /* wether word reads are potentially non-atomic.
70 * this is conservatice, likely most arches this runs
71 * on have atomic word read/writes.
72 */
73 #ifndef WORDACCESS_UNSAFE
74 # if __i386 || __x86_64
75 # define WORDACCESS_UNSAFE 0
76 # else
77 # define WORDACCESS_UNSAFE 1
78 # endif
79 #endif
80
81 /* buffer size for various temporary buffers */
82 #define AIO_BUFSIZE 65536
83
84 #define dBUF \
85 char *aio_buf; \
86 LOCK (wrklock); \
87 self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \
88 UNLOCK (wrklock); \
89 if (!aio_buf) \
90 return -1;
91
92 enum {
93 REQ_QUIT,
94 REQ_OPEN, REQ_CLOSE,
95 REQ_READ, REQ_WRITE, REQ_READAHEAD,
96 REQ_SENDFILE,
97 REQ_STAT, REQ_LSTAT, REQ_FSTAT,
98 REQ_FSYNC, REQ_FDATASYNC,
99 REQ_UNLINK, REQ_RMDIR, REQ_RENAME,
100 REQ_MKNOD, REQ_READDIR,
101 REQ_LINK, REQ_SYMLINK, REQ_READLINK,
102 REQ_GROUP, REQ_NOP,
103 REQ_BUSY,
104 };
105
106 #define AIO_REQ_KLASS "IO::AIO::REQ"
107 #define AIO_GRP_KLASS "IO::AIO::GRP"
108
109 typedef struct aio_cb
110 {
111 struct aio_cb *volatile next;
112
113 SV *callback, *fh;
114 SV *sv1, *sv2;
115 void *ptr1, *ptr2;
116 off_t offs;
117 size_t size;
118 ssize_t result;
119
120 STRLEN stroffset;
121 int type;
122 int int1, int2;
123 int errorno;
124 mode_t mode; /* open */
125
126 unsigned char flags;
127 unsigned char pri;
128
129 SV *self; /* the perl counterpart of this request, if any */
130 struct aio_cb *grp, *grp_prev, *grp_next, *grp_first;
131 } aio_cb;
132
133 enum {
134 FLAG_CANCELLED = 0x01,
135 FLAG_SV1_RO_OFF = 0x40, /* data was set readonly */
136 FLAG_PTR2_FREE = 0x80, /* need free(ptr2) */
137 };
138
139 typedef aio_cb *aio_req;
140 typedef aio_cb *aio_req_ornot;
141
142 enum {
143 PRI_MIN = -4,
144 PRI_MAX = 4,
145
146 DEFAULT_PRI = 0,
147 PRI_BIAS = -PRI_MIN,
148 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
149 };
150
151 #define AIO_TICKS ((1000000 + 1023) >> 10)
152
153 static unsigned int max_poll_time = 0;
154 static unsigned int max_poll_reqs = 0;
155
156 /* calculcate time difference in ~1/AIO_TICKS of a second */
157 static int tvdiff (struct timeval *tv1, struct timeval *tv2)
158 {
159 return (tv2->tv_sec - tv1->tv_sec ) * AIO_TICKS
160 + ((tv2->tv_usec - tv1->tv_usec) >> 10);
161 }
162
163 static int next_pri = DEFAULT_PRI + PRI_BIAS;
164
165 static unsigned int started, idle, wanted;
166
167 #if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)
168 # define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
169 #else
170 # define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
171 #endif
172
173 #define LOCK(mutex) pthread_mutex_lock (&(mutex))
174 #define UNLOCK(mutex) pthread_mutex_unlock (&(mutex))
175
176 /* worker threads management */
177 static pthread_mutex_t wrklock = AIO_MUTEX_INIT;
178
179 typedef struct worker {
180 /* locked by wrklock */
181 struct worker *prev, *next;
182
183 pthread_t tid;
184
185 /* locked by reslock, reqlock or wrklock */
186 aio_req req; /* currently processed request */
187 void *dbuf;
188 DIR *dirp;
189 } worker;
190
191 static worker wrk_first = { &wrk_first, &wrk_first, 0 };
192
193 static void worker_clear (worker *wrk)
194 {
195 if (wrk->dirp)
196 {
197 closedir (wrk->dirp);
198 wrk->dirp = 0;
199 }
200
201 if (wrk->dbuf)
202 {
203 free (wrk->dbuf);
204 wrk->dbuf = 0;
205 }
206 }
207
208 static void worker_free (worker *wrk)
209 {
210 wrk->next->prev = wrk->prev;
211 wrk->prev->next = wrk->next;
212
213 free (wrk);
214 }
215
216 static volatile unsigned int nreqs, nready, npending;
217 static volatile unsigned int max_idle = 4;
218 static volatile unsigned int max_outstanding = 0xffffffff;
219 static int respipe [2];
220
221 static pthread_mutex_t reslock = AIO_MUTEX_INIT;
222 static pthread_mutex_t reqlock = AIO_MUTEX_INIT;
223 static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER;
224
225 #if WORDACCESS_UNSAFE
226
227 static unsigned int get_nready ()
228 {
229 unsigned int retval;
230
231 LOCK (reqlock);
232 retval = nready;
233 UNLOCK (reqlock);
234
235 return retval;
236 }
237
238 static unsigned int get_npending ()
239 {
240 unsigned int retval;
241
242 LOCK (reslock);
243 retval = npending;
244 UNLOCK (reslock);
245
246 return retval;
247 }
248
249 static unsigned int get_nthreads ()
250 {
251 unsigned int retval;
252
253 LOCK (wrklock);
254 retval = started;
255 UNLOCK (wrklock);
256
257 return retval;
258 }
259
260 #else
261
262 # define get_nready() nready
263 # define get_npending() npending
264 # define get_nthreads() started
265
266 #endif
267
268 /*
269 * a somewhat faster data structure might be nice, but
270 * with 8 priorities this actually needs <20 insns
271 * per shift, the most expensive operation.
272 */
273 typedef struct {
274 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
275 int size;
276 } reqq;
277
278 static reqq req_queue;
279 static reqq res_queue;
280
281 int reqq_push (reqq *q, aio_req req)
282 {
283 int pri = req->pri;
284 req->next = 0;
285
286 if (q->qe[pri])
287 {
288 q->qe[pri]->next = req;
289 q->qe[pri] = req;
290 }
291 else
292 q->qe[pri] = q->qs[pri] = req;
293
294 return q->size++;
295 }
296
297 aio_req reqq_shift (reqq *q)
298 {
299 int pri;
300
301 if (!q->size)
302 return 0;
303
304 --q->size;
305
306 for (pri = NUM_PRI; pri--; )
307 {
308 aio_req req = q->qs[pri];
309
310 if (req)
311 {
312 if (!(q->qs[pri] = req->next))
313 q->qe[pri] = 0;
314
315 return req;
316 }
317 }
318
319 abort ();
320 }
321
322 static int poll_cb ();
323 static void req_invoke (aio_req req);
324 static void req_free (aio_req req);
325 static void req_cancel (aio_req req);
326
327 /* must be called at most once */
328 static SV *req_sv (aio_req req, const char *klass)
329 {
330 if (!req->self)
331 {
332 req->self = (SV *)newHV ();
333 sv_magic (req->self, 0, PERL_MAGIC_ext, (char *)req, 0);
334 }
335
336 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1)));
337 }
338
339 static aio_req SvAIO_REQ (SV *sv)
340 {
341 MAGIC *mg;
342
343 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv))
344 croak ("object of class " AIO_REQ_KLASS " expected");
345
346 mg = mg_find (SvRV (sv), PERL_MAGIC_ext);
347
348 return mg ? (aio_req)mg->mg_ptr : 0;
349 }
350
351 static void aio_grp_feed (aio_req grp)
352 {
353 while (grp->size < grp->int2 && !(grp->flags & FLAG_CANCELLED))
354 {
355 int old_len = grp->size;
356
357 if (grp->sv2 && SvOK (grp->sv2))
358 {
359 dSP;
360
361 ENTER;
362 SAVETMPS;
363 PUSHMARK (SP);
364 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
365 PUTBACK;
366 call_sv (grp->sv2, G_VOID | G_EVAL | G_KEEPERR);
367 SPAGAIN;
368 FREETMPS;
369 LEAVE;
370 }
371
372 /* stop if no progress has been made */
373 if (old_len == grp->size)
374 {
375 SvREFCNT_dec (grp->sv2);
376 grp->sv2 = 0;
377 break;
378 }
379 }
380 }
381
382 static void aio_grp_dec (aio_req grp)
383 {
384 --grp->size;
385
386 /* call feeder, if applicable */
387 aio_grp_feed (grp);
388
389 /* finish, if done */
390 if (!grp->size && grp->int1)
391 {
392 req_invoke (grp);
393 req_free (grp);
394 }
395 }
396
397 static void req_invoke (aio_req req)
398 {
399 dSP;
400
401 if (req->flags & FLAG_SV1_RO_OFF)
402 SvREADONLY_off (req->sv1);
403
404 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
405 {
406 ENTER;
407 SAVETMPS;
408 PUSHMARK (SP);
409 EXTEND (SP, 1);
410
411 switch (req->type)
412 {
413 case REQ_READDIR:
414 {
415 SV *rv = &PL_sv_undef;
416
417 if (req->result >= 0)
418 {
419 int i;
420 char *buf = req->ptr2;
421 AV *av = newAV ();
422
423 av_extend (av, req->result - 1);
424
425 for (i = 0; i < req->result; ++i)
426 {
427 SV *sv = newSVpv (buf, 0);
428
429 av_store (av, i, sv);
430 buf += SvCUR (sv) + 1;
431 }
432
433 rv = sv_2mortal (newRV_noinc ((SV *)av));
434 }
435
436 PUSHs (rv);
437 }
438 break;
439
440 case REQ_OPEN:
441 {
442 /* convert fd to fh */
443 SV *fh;
444
445 PUSHs (sv_2mortal (newSViv (req->result)));
446 PUTBACK;
447 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
448 SPAGAIN;
449
450 fh = SvREFCNT_inc (POPs);
451
452 PUSHMARK (SP);
453 XPUSHs (sv_2mortal (fh));
454 }
455 break;
456
457 case REQ_GROUP:
458 req->int1 = 2; /* mark group as finished */
459
460 if (req->sv1)
461 {
462 int i;
463 AV *av = (AV *)req->sv1;
464
465 EXTEND (SP, AvFILL (av) + 1);
466 for (i = 0; i <= AvFILL (av); ++i)
467 PUSHs (*av_fetch (av, i, 0));
468 }
469 break;
470
471 case REQ_NOP:
472 case REQ_BUSY:
473 break;
474
475 case REQ_READLINK:
476 if (req->result > 0)
477 {
478 SvCUR_set (req->sv1, req->result);
479 *SvEND (req->sv1) = 0;
480 PUSHs (req->sv1);
481 }
482 break;
483
484 case REQ_STAT:
485 case REQ_LSTAT:
486 case REQ_FSTAT:
487 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
488 PL_laststatval = req->result;
489 PL_statcache = *(Stat_t *)(req->ptr2);
490 PUSHs (sv_2mortal (newSViv (req->result)));
491 break;
492
493 case REQ_READ:
494 SvCUR_set (req->sv1, req->stroffset + (req->result > 0 ? req->result : 0));
495 *SvEND (req->sv1) = 0;
496 PUSHs (sv_2mortal (newSViv (req->result)));
497 break;
498
499 default:
500 PUSHs (sv_2mortal (newSViv (req->result)));
501 break;
502 }
503
504 errno = req->errorno;
505
506 PUTBACK;
507 call_sv (req->callback, G_VOID | G_EVAL);
508 SPAGAIN;
509
510 FREETMPS;
511 LEAVE;
512 }
513
514 if (req->grp)
515 {
516 aio_req grp = req->grp;
517
518 /* unlink request */
519 if (req->grp_next) req->grp_next->grp_prev = req->grp_prev;
520 if (req->grp_prev) req->grp_prev->grp_next = req->grp_next;
521
522 if (grp->grp_first == req)
523 grp->grp_first = req->grp_next;
524
525 aio_grp_dec (grp);
526 }
527
528 if (SvTRUE (ERRSV))
529 {
530 req_free (req);
531 croak (0);
532 }
533 }
534
535 static void req_free (aio_req req)
536 {
537 if (req->self)
538 {
539 sv_unmagic (req->self, PERL_MAGIC_ext);
540 SvREFCNT_dec (req->self);
541 }
542
543 SvREFCNT_dec (req->fh);
544 SvREFCNT_dec (req->sv1);
545 SvREFCNT_dec (req->sv2);
546 SvREFCNT_dec (req->callback);
547
548 if (req->flags & FLAG_PTR2_FREE)
549 free (req->ptr2);
550
551 Safefree (req);
552 }
553
554 static void req_cancel_subs (aio_req grp)
555 {
556 aio_req sub;
557
558 if (grp->type != REQ_GROUP)
559 return;
560
561 SvREFCNT_dec (grp->sv2);
562 grp->sv2 = 0;
563
564 for (sub = grp->grp_first; sub; sub = sub->grp_next)
565 req_cancel (sub);
566 }
567
568 static void req_cancel (aio_req req)
569 {
570 req->flags |= FLAG_CANCELLED;
571
572 req_cancel_subs (req);
573 }
574
575 static void *aio_proc(void *arg);
576
577 static void start_thread (void)
578 {
579 sigset_t fullsigset, oldsigset;
580 pthread_attr_t attr;
581
582 worker *wrk = calloc (1, sizeof (worker));
583
584 if (!wrk)
585 croak ("unable to allocate worker thread data");
586
587 pthread_attr_init (&attr);
588 pthread_attr_setstacksize (&attr, STACKSIZE);
589 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
590 #ifdef PTHREAD_SCOPE_PROCESS
591 pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS);
592 #endif
593
594 sigfillset (&fullsigset);
595
596 LOCK (wrklock);
597 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
598
599 if (pthread_create (&wrk->tid, &attr, aio_proc, (void *)wrk) == 0)
600 {
601 wrk->prev = &wrk_first;
602 wrk->next = wrk_first.next;
603 wrk_first.next->prev = wrk;
604 wrk_first.next = wrk;
605 ++started;
606 }
607 else
608 free (wrk);
609
610 sigprocmask (SIG_SETMASK, &oldsigset, 0);
611 UNLOCK (wrklock);
612 }
613
614 static void maybe_start_thread ()
615 {
616 if (get_nthreads () >= wanted)
617 return;
618
619 /* todo: maybe use idle here, but might be less exact */
620 if (0 <= (int)get_nthreads () + (int)get_npending () - (int)nreqs)
621 return;
622
623 start_thread ();
624 }
625
626 static void req_send (aio_req req)
627 {
628 ++nreqs;
629
630 LOCK (reqlock);
631 ++nready;
632 reqq_push (&req_queue, req);
633 pthread_cond_signal (&reqwait);
634 UNLOCK (reqlock);
635
636 maybe_start_thread ();
637 }
638
639 static void end_thread (void)
640 {
641 aio_req req;
642
643 Newz (0, req, 1, aio_cb);
644
645 req->type = REQ_QUIT;
646 req->pri = PRI_MAX + PRI_BIAS;
647
648 LOCK (reqlock);
649 reqq_push (&req_queue, req);
650 pthread_cond_signal (&reqwait);
651 UNLOCK (reqlock);
652
653 LOCK (wrklock);
654 --started;
655 UNLOCK (wrklock);
656 }
657
658 static void set_max_idle (int nthreads)
659 {
660 if (WORDACCESS_UNSAFE) LOCK (reqlock);
661 max_idle = nthreads <= 0 ? 1 : nthreads;
662 if (WORDACCESS_UNSAFE) UNLOCK (reqlock);
663 }
664
665 static void min_parallel (int nthreads)
666 {
667 if (wanted < nthreads)
668 wanted = nthreads;
669 }
670
671 static void max_parallel (int nthreads)
672 {
673 if (wanted > nthreads)
674 wanted = nthreads;
675
676 while (started > wanted)
677 end_thread ();
678 }
679
680 static void poll_wait ()
681 {
682 fd_set rfd;
683
684 while (nreqs)
685 {
686 int size;
687 if (WORDACCESS_UNSAFE) LOCK (reslock);
688 size = res_queue.size;
689 if (WORDACCESS_UNSAFE) UNLOCK (reslock);
690
691 if (size)
692 return;
693
694 maybe_start_thread ();
695
696 FD_ZERO(&rfd);
697 FD_SET(respipe [0], &rfd);
698
699 select (respipe [0] + 1, &rfd, 0, 0, 0);
700 }
701 }
702
703 static int poll_cb ()
704 {
705 dSP;
706 int count = 0;
707 int maxreqs = max_poll_reqs;
708 int do_croak = 0;
709 struct timeval tv_start, tv_now;
710 aio_req req;
711
712 if (max_poll_time)
713 gettimeofday (&tv_start, 0);
714
715 for (;;)
716 {
717 for (;;)
718 {
719 maybe_start_thread ();
720
721 LOCK (reslock);
722 req = reqq_shift (&res_queue);
723
724 if (req)
725 {
726 --npending;
727
728 if (!res_queue.size)
729 {
730 /* read any signals sent by the worker threads */
731 char buf [32];
732 while (read (respipe [0], buf, 32) == 32)
733 ;
734 }
735 }
736
737 UNLOCK (reslock);
738
739 if (!req)
740 break;
741
742 --nreqs;
743
744 if (req->type == REQ_GROUP && req->size)
745 {
746 req->int1 = 1; /* mark request as delayed */
747 continue;
748 }
749 else
750 {
751 req_invoke (req);
752
753 count++;
754 }
755
756 req_free (req);
757
758 if (maxreqs && !--maxreqs)
759 break;
760
761 if (max_poll_time)
762 {
763 gettimeofday (&tv_now, 0);
764
765 if (tvdiff (&tv_start, &tv_now) >= max_poll_time)
766 break;
767 }
768 }
769
770 if (nreqs <= max_outstanding)
771 break;
772
773 poll_wait ();
774
775 ++maxreqs;
776 }
777
778 return count;
779 }
780
781 static void create_pipe ()
782 {
783 if (pipe (respipe))
784 croak ("unable to initialize result pipe");
785
786 if (fcntl (respipe [0], F_SETFL, O_NONBLOCK))
787 croak ("cannot set result pipe to nonblocking mode");
788
789 if (fcntl (respipe [1], F_SETFL, O_NONBLOCK))
790 croak ("cannot set result pipe to nonblocking mode");
791 }
792
793 /*****************************************************************************/
794 /* work around various missing functions */
795
796 #if !HAVE_PREADWRITE
797 # define pread aio_pread
798 # define pwrite aio_pwrite
799
800 /*
801 * make our pread/pwrite safe against themselves, but not against
802 * normal read/write by using a mutex. slows down execution a lot,
803 * but that's your problem, not mine.
804 */
805 static pthread_mutex_t preadwritelock = PTHREAD_MUTEX_INITIALIZER;
806
807 static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
808 {
809 ssize_t res;
810 off_t ooffset;
811
812 LOCK (preadwritelock);
813 ooffset = lseek (fd, 0, SEEK_CUR);
814 lseek (fd, offset, SEEK_SET);
815 res = read (fd, buf, count);
816 lseek (fd, ooffset, SEEK_SET);
817 UNLOCK (preadwritelock);
818
819 return res;
820 }
821
822 static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
823 {
824 ssize_t res;
825 off_t ooffset;
826
827 LOCK (preadwritelock);
828 ooffset = lseek (fd, 0, SEEK_CUR);
829 lseek (fd, offset, SEEK_SET);
830 res = write (fd, buf, count);
831 lseek (fd, offset, SEEK_SET);
832 UNLOCK (preadwritelock);
833
834 return res;
835 }
836 #endif
837
838 #if !HAVE_FDATASYNC
839 # define fdatasync fsync
840 #endif
841
842 #if !HAVE_READAHEAD
843 # define readahead(fd,offset,count) aio_readahead (fd, offset, count, self)
844
845 static ssize_t aio_readahead (int fd, off_t offset, size_t count, worker *self)
846 {
847 dBUF;
848
849 while (count > 0)
850 {
851 size_t len = count < AIO_BUFSIZE ? count : AIO_BUFSIZE;
852
853 pread (fd, aio_buf, len, offset);
854 offset += len;
855 count -= len;
856 }
857
858 errno = 0;
859 }
860
861 #endif
862
863 #if !HAVE_READDIR_R
864 # define readdir_r aio_readdir_r
865
866 static pthread_mutex_t readdirlock = PTHREAD_MUTEX_INITIALIZER;
867
868 static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res)
869 {
870 struct dirent *e;
871 int errorno;
872
873 LOCK (readdirlock);
874
875 e = readdir (dirp);
876 errorno = errno;
877
878 if (e)
879 {
880 *res = ent;
881 strcpy (ent->d_name, e->d_name);
882 }
883 else
884 *res = 0;
885
886 UNLOCK (readdirlock);
887
888 errno = errorno;
889 return e ? 0 : -1;
890 }
891 #endif
892
893 /* sendfile always needs emulation */
894 static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self)
895 {
896 ssize_t res;
897
898 if (!count)
899 return 0;
900
901 #if HAVE_SENDFILE
902 # if __linux
903 res = sendfile (ofd, ifd, &offset, count);
904
905 # elif __freebsd
906 /*
907 * Of course, the freebsd sendfile is a dire hack with no thoughts
908 * wasted on making it similar to other I/O functions.
909 */
910 {
911 off_t sbytes;
912 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0);
913
914 if (res < 0 && sbytes)
915 /* maybe only on EAGAIN: as usual, the manpage leaves you guessing */
916 res = sbytes;
917 }
918
919 # elif __hpux
920 res = sendfile (ofd, ifd, offset, count, 0, 0);
921
922 # elif __solaris
923 {
924 struct sendfilevec vec;
925 size_t sbytes;
926
927 vec.sfv_fd = ifd;
928 vec.sfv_flag = 0;
929 vec.sfv_off = offset;
930 vec.sfv_len = count;
931
932 res = sendfilev (ofd, &vec, 1, &sbytes);
933
934 if (res < 0 && sbytes)
935 res = sbytes;
936 }
937
938 # endif
939 #else
940 res = -1;
941 errno = ENOSYS;
942 #endif
943
944 if (res < 0
945 && (errno == ENOSYS || errno == EINVAL || errno == ENOTSOCK
946 #if __solaris
947 || errno == EAFNOSUPPORT || errno == EPROTOTYPE
948 #endif
949 )
950 )
951 {
952 /* emulate sendfile. this is a major pain in the ass */
953 dBUF;
954
955 res = 0;
956
957 while (count)
958 {
959 ssize_t cnt;
960
961 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
962
963 if (cnt <= 0)
964 {
965 if (cnt && !res) res = -1;
966 break;
967 }
968
969 cnt = write (ofd, aio_buf, cnt);
970
971 if (cnt <= 0)
972 {
973 if (cnt && !res) res = -1;
974 break;
975 }
976
977 offset += cnt;
978 res += cnt;
979 count -= cnt;
980 }
981 }
982
983 return res;
984 }
985
986 /* read a full directory */
987 static void scandir_ (aio_req req, worker *self)
988 {
989 DIR *dirp;
990 union
991 {
992 struct dirent d;
993 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1];
994 } *u;
995 struct dirent *entp;
996 char *name, *names;
997 int memlen = 4096;
998 int memofs = 0;
999 int res = 0;
1000 int errorno;
1001
1002 LOCK (wrklock);
1003 self->dirp = dirp = opendir (req->ptr1);
1004 self->dbuf = u = malloc (sizeof (*u));
1005 req->flags |= FLAG_PTR2_FREE;
1006 req->ptr2 = names = malloc (memlen);
1007 UNLOCK (wrklock);
1008
1009 if (dirp && u && names)
1010 for (;;)
1011 {
1012 errno = 0;
1013 readdir_r (dirp, &u->d, &entp);
1014
1015 if (!entp)
1016 break;
1017
1018 name = entp->d_name;
1019
1020 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
1021 {
1022 int len = strlen (name) + 1;
1023
1024 res++;
1025
1026 while (memofs + len > memlen)
1027 {
1028 memlen *= 2;
1029 LOCK (wrklock);
1030 req->ptr2 = names = realloc (names, memlen);
1031 UNLOCK (wrklock);
1032
1033 if (!names)
1034 break;
1035 }
1036
1037 memcpy (names + memofs, name, len);
1038 memofs += len;
1039 }
1040 }
1041
1042 if (errno)
1043 res = -1;
1044
1045 req->result = res;
1046 }
1047
1048 /*****************************************************************************/
1049
1050 static void *aio_proc (void *thr_arg)
1051 {
1052 aio_req req;
1053 struct timespec ts;
1054 worker *self = (worker *)thr_arg;
1055
1056 /* try to distribute timeouts somewhat evenly */
1057 ts.tv_nsec = (((unsigned long)self + (unsigned long)ts.tv_sec) & 1023UL)
1058 * (1000000000UL / 1024UL);
1059
1060 for (;;)
1061 {
1062 ts.tv_sec = time (0) + IDLE_TIMEOUT;
1063
1064 LOCK (reqlock);
1065
1066 for (;;)
1067 {
1068 self->req = req = reqq_shift (&req_queue);
1069
1070 if (req)
1071 break;
1072
1073 ++idle;
1074
1075 if (pthread_cond_timedwait (&reqwait, &reqlock, &ts)
1076 == ETIMEDOUT)
1077 {
1078 if (idle > max_idle)
1079 {
1080 --idle;
1081 UNLOCK (reqlock);
1082 LOCK (wrklock);
1083 --started;
1084 UNLOCK (wrklock);
1085 goto quit;
1086 }
1087
1088 /* we are allowed to idle, so do so without any timeout */
1089 pthread_cond_wait (&reqwait, &reqlock);
1090 ts.tv_sec = time (0) + IDLE_TIMEOUT;
1091 }
1092
1093 --idle;
1094 }
1095
1096 --nready;
1097
1098 UNLOCK (reqlock);
1099
1100 errno = 0; /* strictly unnecessary */
1101
1102 if (!(req->flags & FLAG_CANCELLED))
1103 switch (req->type)
1104 {
1105 case REQ_READ: req->result = pread (req->int1, req->ptr1, req->size, req->offs); break;
1106 case REQ_WRITE: req->result = pwrite (req->int1, req->ptr1, req->size, req->offs); break;
1107
1108 case REQ_READAHEAD: req->result = readahead (req->int1, req->offs, req->size); break;
1109 case REQ_SENDFILE: req->result = sendfile_ (req->int1, req->int2, req->offs, req->size, self); break;
1110
1111 case REQ_STAT: req->result = stat (req->ptr1, (Stat_t *)req->ptr2); break;
1112 case REQ_LSTAT: req->result = lstat (req->ptr1, (Stat_t *)req->ptr2); break;
1113 case REQ_FSTAT: req->result = fstat (req->int1, (Stat_t *)req->ptr2); break;
1114
1115 case REQ_OPEN: req->result = open (req->ptr1, req->int1, req->mode); break;
1116 case REQ_CLOSE: req->result = close (req->int1); break;
1117 case REQ_UNLINK: req->result = unlink (req->ptr1); break;
1118 case REQ_RMDIR: req->result = rmdir (req->ptr1); break;
1119 case REQ_RENAME: req->result = rename (req->ptr2, req->ptr1); break;
1120 case REQ_LINK: req->result = link (req->ptr2, req->ptr1); break;
1121 case REQ_SYMLINK: req->result = symlink (req->ptr2, req->ptr1); break;
1122 case REQ_MKNOD: req->result = mknod (req->ptr2, req->mode, (dev_t)req->offs); break;
1123 case REQ_READLINK: req->result = readlink (req->ptr2, req->ptr1, NAME_MAX); break;
1124
1125 case REQ_FDATASYNC: req->result = fdatasync (req->int1); break;
1126 case REQ_FSYNC: req->result = fsync (req->int1); break;
1127 case REQ_READDIR: scandir_ (req, self); break;
1128
1129 case REQ_BUSY:
1130 {
1131 struct timeval tv;
1132
1133 tv.tv_sec = req->int1;
1134 tv.tv_usec = req->int2;
1135
1136 req->result = select (0, 0, 0, 0, &tv);
1137 }
1138
1139 case REQ_GROUP:
1140 case REQ_NOP:
1141 break;
1142
1143 case REQ_QUIT:
1144 goto quit;
1145
1146 default:
1147 req->result = ENOSYS;
1148 break;
1149 }
1150
1151 req->errorno = errno;
1152
1153 LOCK (reslock);
1154
1155 ++npending;
1156
1157 if (!reqq_push (&res_queue, req))
1158 /* write a dummy byte to the pipe so fh becomes ready */
1159 write (respipe [1], &respipe, 1);
1160
1161 self->req = 0;
1162 worker_clear (self);
1163
1164 UNLOCK (reslock);
1165 }
1166
1167 quit:
1168 LOCK (wrklock);
1169 worker_free (self);
1170 UNLOCK (wrklock);
1171
1172 return 0;
1173 }
1174
1175 /*****************************************************************************/
1176
1177 static void atfork_prepare (void)
1178 {
1179 LOCK (wrklock);
1180 LOCK (reqlock);
1181 LOCK (reslock);
1182 #if !HAVE_PREADWRITE
1183 LOCK (preadwritelock);
1184 #endif
1185 #if !HAVE_READDIR_R
1186 LOCK (readdirlock);
1187 #endif
1188 }
1189
1190 static void atfork_parent (void)
1191 {
1192 #if !HAVE_READDIR_R
1193 UNLOCK (readdirlock);
1194 #endif
1195 #if !HAVE_PREADWRITE
1196 UNLOCK (preadwritelock);
1197 #endif
1198 UNLOCK (reslock);
1199 UNLOCK (reqlock);
1200 UNLOCK (wrklock);
1201 }
1202
1203 static void atfork_child (void)
1204 {
1205 aio_req prv;
1206
1207 while (prv = reqq_shift (&req_queue))
1208 req_free (prv);
1209
1210 while (prv = reqq_shift (&res_queue))
1211 req_free (prv);
1212
1213 while (wrk_first.next != &wrk_first)
1214 {
1215 worker *wrk = wrk_first.next;
1216
1217 if (wrk->req)
1218 req_free (wrk->req);
1219
1220 worker_clear (wrk);
1221 worker_free (wrk);
1222 }
1223
1224 started = 0;
1225 idle = 0;
1226 nreqs = 0;
1227 nready = 0;
1228 npending = 0;
1229
1230 close (respipe [0]);
1231 close (respipe [1]);
1232 create_pipe ();
1233
1234 atfork_parent ();
1235 }
1236
1237 #define dREQ \
1238 aio_req req; \
1239 int req_pri = next_pri; \
1240 next_pri = DEFAULT_PRI + PRI_BIAS; \
1241 \
1242 if (SvOK (callback) && !SvROK (callback)) \
1243 croak ("callback must be undef or of reference type"); \
1244 \
1245 Newz (0, req, 1, aio_cb); \
1246 if (!req) \
1247 croak ("out of memory during aio_req allocation"); \
1248 \
1249 req->callback = newSVsv (callback); \
1250 req->pri = req_pri
1251
1252 #define REQ_SEND \
1253 req_send (req); \
1254 \
1255 if (GIMME_V != G_VOID) \
1256 XPUSHs (req_sv (req, AIO_REQ_KLASS));
1257
1258 MODULE = IO::AIO PACKAGE = IO::AIO
1259
1260 PROTOTYPES: ENABLE
1261
1262 BOOT:
1263 {
1264 HV *stash = gv_stashpv ("IO::AIO", 1);
1265
1266 newCONSTSUB (stash, "EXDEV", newSViv (EXDEV));
1267 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY));
1268 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY));
1269 newCONSTSUB (stash, "O_CREAT", newSViv (O_CREAT));
1270 newCONSTSUB (stash, "O_TRUNC", newSViv (O_TRUNC));
1271 newCONSTSUB (stash, "S_IFIFO", newSViv (S_IFIFO));
1272
1273 create_pipe ();
1274 pthread_atfork (atfork_prepare, atfork_parent, atfork_child);
1275 }
1276
1277 void
1278 max_poll_reqs (int nreqs)
1279 PROTOTYPE: $
1280 CODE:
1281 max_poll_reqs = nreqs;
1282
1283 void
1284 max_poll_time (double nseconds)
1285 PROTOTYPE: $
1286 CODE:
1287 max_poll_time = nseconds * AIO_TICKS;
1288
1289 void
1290 min_parallel (int nthreads)
1291 PROTOTYPE: $
1292
1293 void
1294 max_parallel (int nthreads)
1295 PROTOTYPE: $
1296
1297 void
1298 max_idle (int nthreads)
1299 PROTOTYPE: $
1300 CODE:
1301 set_max_idle (nthreads);
1302
1303 int
1304 max_outstanding (int maxreqs)
1305 PROTOTYPE: $
1306 CODE:
1307 RETVAL = max_outstanding;
1308 max_outstanding = maxreqs;
1309 OUTPUT:
1310 RETVAL
1311
1312 void
1313 aio_open (pathname,flags,mode,callback=&PL_sv_undef)
1314 SV * pathname
1315 int flags
1316 int mode
1317 SV * callback
1318 PROTOTYPE: $$$;$
1319 PPCODE:
1320 {
1321 dREQ;
1322
1323 req->type = REQ_OPEN;
1324 req->sv1 = newSVsv (pathname);
1325 req->ptr1 = SvPVbyte_nolen (pathname);
1326 req->int1 = flags;
1327 req->mode = mode;
1328
1329 REQ_SEND;
1330 }
1331
1332 void
1333 aio_close (fh,callback=&PL_sv_undef)
1334 SV * fh
1335 SV * callback
1336 PROTOTYPE: $;$
1337 ALIAS:
1338 aio_close = REQ_CLOSE
1339 aio_fsync = REQ_FSYNC
1340 aio_fdatasync = REQ_FDATASYNC
1341 PPCODE:
1342 {
1343 dREQ;
1344
1345 req->type = ix;
1346 req->fh = newSVsv (fh);
1347 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh)));
1348
1349 REQ_SEND (req);
1350 }
1351
1352 void
1353 aio_read (fh,offset,length,data,dataoffset,callback=&PL_sv_undef)
1354 SV * fh
1355 UV offset
1356 UV length
1357 SV * data
1358 UV dataoffset
1359 SV * callback
1360 ALIAS:
1361 aio_read = REQ_READ
1362 aio_write = REQ_WRITE
1363 PROTOTYPE: $$$$$;$
1364 PPCODE:
1365 {
1366 STRLEN svlen;
1367 char *svptr = SvPVbyte (data, svlen);
1368
1369 SvUPGRADE (data, SVt_PV);
1370 SvPOK_on (data);
1371
1372 if (dataoffset < 0)
1373 dataoffset += svlen;
1374
1375 if (dataoffset < 0 || dataoffset > svlen)
1376 croak ("data offset outside of string");
1377
1378 if (ix == REQ_WRITE)
1379 {
1380 /* write: check length and adjust. */
1381 if (length < 0 || length + dataoffset > svlen)
1382 length = svlen - dataoffset;
1383 }
1384 else
1385 {
1386 /* read: grow scalar as necessary */
1387 svptr = SvGROW (data, length + dataoffset);
1388 }
1389
1390 if (length < 0)
1391 croak ("length must not be negative");
1392
1393 {
1394 dREQ;
1395
1396 req->type = ix;
1397 req->fh = newSVsv (fh);
1398 req->int1 = PerlIO_fileno (ix == REQ_READ ? IoIFP (sv_2io (fh))
1399 : IoOFP (sv_2io (fh)));
1400 req->offs = offset;
1401 req->size = length;
1402 req->sv1 = SvREFCNT_inc (data);
1403 req->ptr1 = (char *)svptr + dataoffset;
1404 req->stroffset = dataoffset;
1405
1406 if (!SvREADONLY (data))
1407 {
1408 SvREADONLY_on (data);
1409 req->flags |= FLAG_SV1_RO_OFF;
1410 }
1411
1412 REQ_SEND;
1413 }
1414 }
1415
1416 void
1417 aio_readlink (path,callback=&PL_sv_undef)
1418 SV * path
1419 SV * callback
1420 PROTOTYPE: $$;$
1421 PPCODE:
1422 {
1423 SV *data;
1424 dREQ;
1425
1426 data = newSV (NAME_MAX);
1427 SvPOK_on (data);
1428
1429 req->type = REQ_READLINK;
1430 req->fh = newSVsv (path);
1431 req->ptr2 = SvPVbyte_nolen (path);
1432 req->sv1 = data;
1433 req->ptr1 = SvPVbyte_nolen (data);
1434
1435 REQ_SEND;
1436 }
1437
1438 void
1439 aio_sendfile (out_fh,in_fh,in_offset,length,callback=&PL_sv_undef)
1440 SV * out_fh
1441 SV * in_fh
1442 UV in_offset
1443 UV length
1444 SV * callback
1445 PROTOTYPE: $$$$;$
1446 PPCODE:
1447 {
1448 dREQ;
1449
1450 req->type = REQ_SENDFILE;
1451 req->fh = newSVsv (out_fh);
1452 req->int1 = PerlIO_fileno (IoIFP (sv_2io (out_fh)));
1453 req->sv2 = newSVsv (in_fh);
1454 req->int2 = PerlIO_fileno (IoIFP (sv_2io (in_fh)));
1455 req->offs = in_offset;
1456 req->size = length;
1457
1458 REQ_SEND;
1459 }
1460
1461 void
1462 aio_readahead (fh,offset,length,callback=&PL_sv_undef)
1463 SV * fh
1464 UV offset
1465 IV length
1466 SV * callback
1467 PROTOTYPE: $$$;$
1468 PPCODE:
1469 {
1470 dREQ;
1471
1472 req->type = REQ_READAHEAD;
1473 req->fh = newSVsv (fh);
1474 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh)));
1475 req->offs = offset;
1476 req->size = length;
1477
1478 REQ_SEND;
1479 }
1480
1481 void
1482 aio_stat (fh_or_path,callback=&PL_sv_undef)
1483 SV * fh_or_path
1484 SV * callback
1485 ALIAS:
1486 aio_stat = REQ_STAT
1487 aio_lstat = REQ_LSTAT
1488 PPCODE:
1489 {
1490 dREQ;
1491
1492 req->ptr2 = malloc (sizeof (Stat_t));
1493 if (!req->ptr2)
1494 {
1495 req_free (req);
1496 croak ("out of memory during aio_stat statdata allocation");
1497 }
1498
1499 req->flags |= FLAG_PTR2_FREE;
1500
1501 if (SvPOK (fh_or_path))
1502 {
1503 req->type = ix;
1504 req->sv1 = newSVsv (fh_or_path);
1505 req->ptr1 = SvPVbyte_nolen (fh_or_path);
1506 }
1507 else
1508 {
1509 req->type = REQ_FSTAT;
1510 req->fh = newSVsv (fh_or_path);
1511 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh_or_path)));
1512 }
1513
1514 REQ_SEND;
1515 }
1516
1517 void
1518 aio_unlink (pathname,callback=&PL_sv_undef)
1519 SV * pathname
1520 SV * callback
1521 ALIAS:
1522 aio_unlink = REQ_UNLINK
1523 aio_rmdir = REQ_RMDIR
1524 aio_readdir = REQ_READDIR
1525 PPCODE:
1526 {
1527 dREQ;
1528
1529 req->type = ix;
1530 req->sv1 = newSVsv (pathname);
1531 req->ptr1 = SvPVbyte_nolen (pathname);
1532
1533 REQ_SEND;
1534 }
1535
1536 void
1537 aio_link (oldpath,newpath,callback=&PL_sv_undef)
1538 SV * oldpath
1539 SV * newpath
1540 SV * callback
1541 ALIAS:
1542 aio_link = REQ_LINK
1543 aio_symlink = REQ_SYMLINK
1544 aio_rename = REQ_RENAME
1545 PPCODE:
1546 {
1547 dREQ;
1548
1549 req->type = ix;
1550 req->fh = newSVsv (oldpath);
1551 req->ptr2 = SvPVbyte_nolen (req->fh);
1552 req->sv1 = newSVsv (newpath);
1553 req->ptr1 = SvPVbyte_nolen (newpath);
1554
1555 REQ_SEND;
1556 }
1557
1558 void
1559 aio_mknod (pathname,mode,dev,callback=&PL_sv_undef)
1560 SV * pathname
1561 SV * callback
1562 UV mode
1563 UV dev
1564 PPCODE:
1565 {
1566 dREQ;
1567
1568 req->type = REQ_MKNOD;
1569 req->sv1 = newSVsv (pathname);
1570 req->ptr1 = SvPVbyte_nolen (pathname);
1571 req->mode = (mode_t)mode;
1572 req->offs = dev;
1573
1574 REQ_SEND;
1575 }
1576
1577 void
1578 aio_busy (delay,callback=&PL_sv_undef)
1579 double delay
1580 SV * callback
1581 PPCODE:
1582 {
1583 dREQ;
1584
1585 req->type = REQ_BUSY;
1586 req->int1 = delay < 0. ? 0 : delay;
1587 req->int2 = delay < 0. ? 0 : 1000. * (delay - req->int1);
1588
1589 REQ_SEND;
1590 }
1591
1592 void
1593 aio_group (callback=&PL_sv_undef)
1594 SV * callback
1595 PROTOTYPE: ;$
1596 PPCODE:
1597 {
1598 dREQ;
1599
1600 req->type = REQ_GROUP;
1601
1602 req_send (req);
1603 XPUSHs (req_sv (req, AIO_GRP_KLASS));
1604 }
1605
1606 void
1607 aio_nop (callback=&PL_sv_undef)
1608 SV * callback
1609 PPCODE:
1610 {
1611 dREQ;
1612
1613 req->type = REQ_NOP;
1614
1615 REQ_SEND;
1616 }
1617
1618 int
1619 aioreq_pri (int pri = 0)
1620 PROTOTYPE: ;$
1621 CODE:
1622 RETVAL = next_pri - PRI_BIAS;
1623 if (items > 0)
1624 {
1625 if (pri < PRI_MIN) pri = PRI_MIN;
1626 if (pri > PRI_MAX) pri = PRI_MAX;
1627 next_pri = pri + PRI_BIAS;
1628 }
1629 OUTPUT:
1630 RETVAL
1631
1632 void
1633 aioreq_nice (int nice = 0)
1634 CODE:
1635 nice = next_pri - nice;
1636 if (nice < PRI_MIN) nice = PRI_MIN;
1637 if (nice > PRI_MAX) nice = PRI_MAX;
1638 next_pri = nice + PRI_BIAS;
1639
1640 void
1641 flush ()
1642 PROTOTYPE:
1643 CODE:
1644 while (nreqs)
1645 {
1646 poll_wait ();
1647 poll_cb (0);
1648 }
1649
1650 void
1651 poll()
1652 PROTOTYPE:
1653 CODE:
1654 if (nreqs)
1655 {
1656 poll_wait ();
1657 poll_cb (0);
1658 }
1659
1660 int
1661 poll_fileno()
1662 PROTOTYPE:
1663 CODE:
1664 RETVAL = respipe [0];
1665 OUTPUT:
1666 RETVAL
1667
1668 int
1669 poll_cb(...)
1670 PROTOTYPE:
1671 CODE:
1672 RETVAL = poll_cb ();
1673 OUTPUT:
1674 RETVAL
1675
1676 void
1677 poll_wait()
1678 PROTOTYPE:
1679 CODE:
1680 if (nreqs)
1681 poll_wait ();
1682
1683 int
1684 nreqs()
1685 PROTOTYPE:
1686 CODE:
1687 RETVAL = nreqs;
1688 OUTPUT:
1689 RETVAL
1690
1691 int
1692 nready()
1693 PROTOTYPE:
1694 CODE:
1695 RETVAL = get_nready ();
1696 OUTPUT:
1697 RETVAL
1698
1699 int
1700 npending()
1701 PROTOTYPE:
1702 CODE:
1703 RETVAL = get_npending ();
1704 OUTPUT:
1705 RETVAL
1706
1707 int
1708 nthreads()
1709 PROTOTYPE:
1710 CODE:
1711 if (WORDACCESS_UNSAFE) LOCK (wrklock);
1712 RETVAL = started;
1713 if (WORDACCESS_UNSAFE) UNLOCK (wrklock);
1714 OUTPUT:
1715 RETVAL
1716
1717 PROTOTYPES: DISABLE
1718
1719 MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1720
1721 void
1722 cancel (aio_req_ornot req)
1723 CODE:
1724 req_cancel (req);
1725
1726 void
1727 cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1728 CODE:
1729 SvREFCNT_dec (req->callback);
1730 req->callback = newSVsv (callback);
1731
1732 MODULE = IO::AIO PACKAGE = IO::AIO::GRP
1733
1734 void
1735 add (aio_req grp, ...)
1736 PPCODE:
1737 {
1738 int i;
1739 aio_req req;
1740
1741 if (grp->int1 == 2)
1742 croak ("cannot add requests to IO::AIO::GRP after the group finished");
1743
1744 for (i = 1; i < items; ++i )
1745 {
1746 if (GIMME_V != G_VOID)
1747 XPUSHs (sv_2mortal (newSVsv (ST (i))));
1748
1749 req = SvAIO_REQ (ST (i));
1750
1751 if (req)
1752 {
1753 ++grp->size;
1754 req->grp = grp;
1755
1756 req->grp_prev = 0;
1757 req->grp_next = grp->grp_first;
1758
1759 if (grp->grp_first)
1760 grp->grp_first->grp_prev = req;
1761
1762 grp->grp_first = req;
1763 }
1764 }
1765 }
1766
1767 void
1768 cancel_subs (aio_req_ornot req)
1769 CODE:
1770 req_cancel_subs (req);
1771
1772 void
1773 result (aio_req grp, ...)
1774 CODE:
1775 {
1776 int i;
1777 AV *av;
1778
1779 grp->errorno = errno;
1780
1781 av = newAV ();
1782
1783 for (i = 1; i < items; ++i )
1784 av_push (av, newSVsv (ST (i)));
1785
1786 SvREFCNT_dec (grp->sv1);
1787 grp->sv1 = (SV *)av;
1788 }
1789
1790 void
1791 errno (aio_req grp, int errorno = errno)
1792 CODE:
1793 grp->errorno = errorno;
1794
1795 void
1796 limit (aio_req grp, int limit)
1797 CODE:
1798 grp->int2 = limit;
1799 aio_grp_feed (grp);
1800
1801 void
1802 feed (aio_req grp, SV *callback=&PL_sv_undef)
1803 CODE:
1804 {
1805 SvREFCNT_dec (grp->sv2);
1806 grp->sv2 = newSVsv (callback);
1807
1808 if (grp->int2 <= 0)
1809 grp->int2 = 2;
1810
1811 aio_grp_feed (grp);
1812 }
1813