ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
Revision: 1.91
Committed: Wed Nov 8 01:57:43 2006 UTC (17 years, 6 months ago) by root
Branch: MAIN
Changes since 1.90: +7 -3 lines
Log Message:
*** empty log message ***

File Contents

# Content
1 /* solaris */
2 #define _POSIX_PTHREAD_SEMANTICS 1
3
4 #if __linux && !defined(_GNU_SOURCE)
5 # define _GNU_SOURCE
6 #endif
7
8 /* just in case */
9 #define _REENTRANT 1
10
11 #include <errno.h>
12
13 #include "EXTERN.h"
14 #include "perl.h"
15 #include "XSUB.h"
16
17 #include "autoconf/config.h"
18
19 #include <pthread.h>
20
21 #include <stddef.h>
22 #include <errno.h>
23 #include <sys/time.h>
24 #include <sys/select.h>
25 #include <sys/types.h>
26 #include <sys/stat.h>
27 #include <limits.h>
28 #include <unistd.h>
29 #include <fcntl.h>
30 #include <signal.h>
31 #include <sched.h>
32
33 #if HAVE_SENDFILE
34 # if __linux
35 # include <sys/sendfile.h>
36 # elif __freebsd
37 # include <sys/socket.h>
38 # include <sys/uio.h>
39 # elif __hpux
40 # include <sys/socket.h>
41 # elif __solaris /* not yet */
42 # include <sys/sendfile.h>
43 # else
44 # error sendfile support requested but not available
45 # endif
46 #endif
47
48 /* number of seconds after which idle threads exit */
49 #define IDLE_TIMEOUT 10
50
51 /* used for struct dirent, AIX doesn't provide it */
52 #ifndef NAME_MAX
53 # define NAME_MAX 4096
54 #endif
55
56 #ifndef PTHREAD_STACK_MIN
57 /* care for broken platforms, e.g. windows */
58 # define PTHREAD_STACK_MIN 16384
59 #endif
60
61 #if __ia64
62 # define STACKSIZE 65536
63 #elif __i386 || __x86_64 /* 16k is unreasonably high :( */
64 # define STACKSIZE PTHREAD_STACK_MIN
65 #else
66 # define STACKSIZE 16384
67 #endif
68
69 /* wether word reads are potentially non-atomic.
70 * this is conservatice, likely most arches this runs
71 * on have atomic word read/writes.
72 */
73 #ifndef WORDACCESS_UNSAFE
74 # if __i386 || __x86_64
75 # define WORDACCESS_UNSAFE 0
76 # else
77 # define WORDACCESS_UNSAFE 1
78 # endif
79 #endif
80
81 /* buffer size for various temporary buffers */
82 #define AIO_BUFSIZE 65536
83
84 #define dBUF \
85 char *aio_buf; \
86 LOCK (wrklock); \
87 self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \
88 UNLOCK (wrklock); \
89 if (!aio_buf) \
90 return -1;
91
92 typedef SV SV8; /* byte-sv, used for argument-checking */
93
94 enum {
95 REQ_QUIT,
96 REQ_OPEN, REQ_CLOSE,
97 REQ_READ, REQ_WRITE, REQ_READAHEAD,
98 REQ_SENDFILE,
99 REQ_STAT, REQ_LSTAT, REQ_FSTAT,
100 REQ_FSYNC, REQ_FDATASYNC,
101 REQ_UNLINK, REQ_RMDIR, REQ_RENAME,
102 REQ_MKNOD, REQ_READDIR,
103 REQ_LINK, REQ_SYMLINK, REQ_READLINK,
104 REQ_GROUP, REQ_NOP,
105 REQ_BUSY,
106 };
107
108 #define AIO_REQ_KLASS "IO::AIO::REQ"
109 #define AIO_GRP_KLASS "IO::AIO::GRP"
110
111 typedef struct aio_cb
112 {
113 struct aio_cb *volatile next;
114
115 SV *callback, *fh;
116 SV *sv1, *sv2;
117 void *ptr1, *ptr2;
118 off_t offs;
119 size_t size;
120 ssize_t result;
121
122 STRLEN stroffset;
123 int type;
124 int int1, int2;
125 int errorno;
126 mode_t mode; /* open */
127
128 unsigned char flags;
129 unsigned char pri;
130
131 SV *self; /* the perl counterpart of this request, if any */
132 struct aio_cb *grp, *grp_prev, *grp_next, *grp_first;
133 } aio_cb;
134
135 enum {
136 FLAG_CANCELLED = 0x01, /* request was cancelled */
137 FLAG_SV1_RO_OFF = 0x40, /* data was set readonly */
138 FLAG_PTR2_FREE = 0x80, /* need to free(ptr2) */
139 };
140
141 typedef aio_cb *aio_req;
142 typedef aio_cb *aio_req_ornot;
143
144 enum {
145 PRI_MIN = -4,
146 PRI_MAX = 4,
147
148 DEFAULT_PRI = 0,
149 PRI_BIAS = -PRI_MIN,
150 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
151 };
152
153 #define AIO_TICKS ((1000000 + 1023) >> 10)
154
155 static unsigned int max_poll_time = 0;
156 static unsigned int max_poll_reqs = 0;
157
158 /* calculcate time difference in ~1/AIO_TICKS of a second */
159 static int tvdiff (struct timeval *tv1, struct timeval *tv2)
160 {
161 return (tv2->tv_sec - tv1->tv_sec ) * AIO_TICKS
162 + ((tv2->tv_usec - tv1->tv_usec) >> 10);
163 }
164
165 static int next_pri = DEFAULT_PRI + PRI_BIAS;
166
167 static unsigned int started, idle, wanted;
168
169 #if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)
170 # define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
171 #else
172 # define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
173 #endif
174
175 #define LOCK(mutex) pthread_mutex_lock (&(mutex))
176 #define UNLOCK(mutex) pthread_mutex_unlock (&(mutex))
177
178 /* worker threads management */
179 static pthread_mutex_t wrklock = AIO_MUTEX_INIT;
180
181 typedef struct worker {
182 /* locked by wrklock */
183 struct worker *prev, *next;
184
185 pthread_t tid;
186
187 /* locked by reslock, reqlock or wrklock */
188 aio_req req; /* currently processed request */
189 void *dbuf;
190 DIR *dirp;
191 } worker;
192
193 static worker wrk_first = { &wrk_first, &wrk_first, 0 };
194
195 static void worker_clear (worker *wrk)
196 {
197 if (wrk->dirp)
198 {
199 closedir (wrk->dirp);
200 wrk->dirp = 0;
201 }
202
203 if (wrk->dbuf)
204 {
205 free (wrk->dbuf);
206 wrk->dbuf = 0;
207 }
208 }
209
210 static void worker_free (worker *wrk)
211 {
212 wrk->next->prev = wrk->prev;
213 wrk->prev->next = wrk->next;
214
215 free (wrk);
216 }
217
218 static volatile unsigned int nreqs, nready, npending;
219 static volatile unsigned int max_idle = 4;
220 static volatile unsigned int max_outstanding = 0xffffffff;
221 static int respipe [2];
222
223 static pthread_mutex_t reslock = AIO_MUTEX_INIT;
224 static pthread_mutex_t reqlock = AIO_MUTEX_INIT;
225 static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER;
226
227 #if WORDACCESS_UNSAFE
228
229 static unsigned int get_nready ()
230 {
231 unsigned int retval;
232
233 LOCK (reqlock);
234 retval = nready;
235 UNLOCK (reqlock);
236
237 return retval;
238 }
239
240 static unsigned int get_npending ()
241 {
242 unsigned int retval;
243
244 LOCK (reslock);
245 retval = npending;
246 UNLOCK (reslock);
247
248 return retval;
249 }
250
251 static unsigned int get_nthreads ()
252 {
253 unsigned int retval;
254
255 LOCK (wrklock);
256 retval = started;
257 UNLOCK (wrklock);
258
259 return retval;
260 }
261
262 #else
263
264 # define get_nready() nready
265 # define get_npending() npending
266 # define get_nthreads() started
267
268 #endif
269
270 /*
271 * a somewhat faster data structure might be nice, but
272 * with 8 priorities this actually needs <20 insns
273 * per shift, the most expensive operation.
274 */
275 typedef struct {
276 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
277 int size;
278 } reqq;
279
280 static reqq req_queue;
281 static reqq res_queue;
282
283 int reqq_push (reqq *q, aio_req req)
284 {
285 int pri = req->pri;
286 req->next = 0;
287
288 if (q->qe[pri])
289 {
290 q->qe[pri]->next = req;
291 q->qe[pri] = req;
292 }
293 else
294 q->qe[pri] = q->qs[pri] = req;
295
296 return q->size++;
297 }
298
299 aio_req reqq_shift (reqq *q)
300 {
301 int pri;
302
303 if (!q->size)
304 return 0;
305
306 --q->size;
307
308 for (pri = NUM_PRI; pri--; )
309 {
310 aio_req req = q->qs[pri];
311
312 if (req)
313 {
314 if (!(q->qs[pri] = req->next))
315 q->qe[pri] = 0;
316
317 return req;
318 }
319 }
320
321 abort ();
322 }
323
324 static int poll_cb ();
325 static void req_invoke (aio_req req);
326 static void req_free (aio_req req);
327 static void req_cancel (aio_req req);
328
329 /* must be called at most once */
330 static SV *req_sv (aio_req req, const char *klass)
331 {
332 if (!req->self)
333 {
334 req->self = (SV *)newHV ();
335 sv_magic (req->self, 0, PERL_MAGIC_ext, (char *)req, 0);
336 }
337
338 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1)));
339 }
340
341 static aio_req SvAIO_REQ (SV *sv)
342 {
343 MAGIC *mg;
344
345 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv))
346 croak ("object of class " AIO_REQ_KLASS " expected");
347
348 mg = mg_find (SvRV (sv), PERL_MAGIC_ext);
349
350 return mg ? (aio_req)mg->mg_ptr : 0;
351 }
352
353 static void aio_grp_feed (aio_req grp)
354 {
355 while (grp->size < grp->int2 && !(grp->flags & FLAG_CANCELLED))
356 {
357 int old_len = grp->size;
358
359 if (grp->sv2 && SvOK (grp->sv2))
360 {
361 dSP;
362
363 ENTER;
364 SAVETMPS;
365 PUSHMARK (SP);
366 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
367 PUTBACK;
368 call_sv (grp->sv2, G_VOID | G_EVAL | G_KEEPERR);
369 SPAGAIN;
370 FREETMPS;
371 LEAVE;
372 }
373
374 /* stop if no progress has been made */
375 if (old_len == grp->size)
376 {
377 SvREFCNT_dec (grp->sv2);
378 grp->sv2 = 0;
379 break;
380 }
381 }
382 }
383
384 static void aio_grp_dec (aio_req grp)
385 {
386 --grp->size;
387
388 /* call feeder, if applicable */
389 aio_grp_feed (grp);
390
391 /* finish, if done */
392 if (!grp->size && grp->int1)
393 {
394 req_invoke (grp);
395 req_free (grp);
396 }
397 }
398
399 static void req_invoke (aio_req req)
400 {
401 dSP;
402
403 if (req->flags & FLAG_SV1_RO_OFF)
404 SvREADONLY_off (req->sv1);
405
406 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
407 {
408 ENTER;
409 SAVETMPS;
410 PUSHMARK (SP);
411 EXTEND (SP, 1);
412
413 switch (req->type)
414 {
415 case REQ_READDIR:
416 {
417 SV *rv = &PL_sv_undef;
418
419 if (req->result >= 0)
420 {
421 int i;
422 char *buf = req->ptr2;
423 AV *av = newAV ();
424
425 av_extend (av, req->result - 1);
426
427 for (i = 0; i < req->result; ++i)
428 {
429 SV *sv = newSVpv (buf, 0);
430
431 av_store (av, i, sv);
432 buf += SvCUR (sv) + 1;
433 }
434
435 rv = sv_2mortal (newRV_noinc ((SV *)av));
436 }
437
438 PUSHs (rv);
439 }
440 break;
441
442 case REQ_OPEN:
443 {
444 /* convert fd to fh */
445 SV *fh;
446
447 PUSHs (sv_2mortal (newSViv (req->result)));
448 PUTBACK;
449 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
450 SPAGAIN;
451
452 fh = SvREFCNT_inc (POPs);
453
454 PUSHMARK (SP);
455 XPUSHs (sv_2mortal (fh));
456 }
457 break;
458
459 case REQ_GROUP:
460 req->int1 = 2; /* mark group as finished */
461
462 if (req->sv1)
463 {
464 int i;
465 AV *av = (AV *)req->sv1;
466
467 EXTEND (SP, AvFILL (av) + 1);
468 for (i = 0; i <= AvFILL (av); ++i)
469 PUSHs (*av_fetch (av, i, 0));
470 }
471 break;
472
473 case REQ_NOP:
474 case REQ_BUSY:
475 break;
476
477 case REQ_READLINK:
478 if (req->result > 0)
479 {
480 SvCUR_set (req->sv1, req->result);
481 *SvEND (req->sv1) = 0;
482 PUSHs (req->sv1);
483 }
484 break;
485
486 case REQ_STAT:
487 case REQ_LSTAT:
488 case REQ_FSTAT:
489 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
490 PL_laststatval = req->result;
491 PL_statcache = *(Stat_t *)(req->ptr2);
492 PUSHs (sv_2mortal (newSViv (req->result)));
493 break;
494
495 case REQ_READ:
496 SvCUR_set (req->sv1, req->stroffset + (req->result > 0 ? req->result : 0));
497 *SvEND (req->sv1) = 0;
498 PUSHs (sv_2mortal (newSViv (req->result)));
499 break;
500
501 default:
502 PUSHs (sv_2mortal (newSViv (req->result)));
503 break;
504 }
505
506 errno = req->errorno;
507
508 PUTBACK;
509 call_sv (req->callback, G_VOID | G_EVAL);
510 SPAGAIN;
511
512 FREETMPS;
513 LEAVE;
514 }
515
516 if (req->grp)
517 {
518 aio_req grp = req->grp;
519
520 /* unlink request */
521 if (req->grp_next) req->grp_next->grp_prev = req->grp_prev;
522 if (req->grp_prev) req->grp_prev->grp_next = req->grp_next;
523
524 if (grp->grp_first == req)
525 grp->grp_first = req->grp_next;
526
527 aio_grp_dec (grp);
528 }
529
530 if (SvTRUE (ERRSV))
531 {
532 req_free (req);
533 croak (0);
534 }
535 }
536
537 static void req_free (aio_req req)
538 {
539 if (req->self)
540 {
541 sv_unmagic (req->self, PERL_MAGIC_ext);
542 SvREFCNT_dec (req->self);
543 }
544
545 SvREFCNT_dec (req->fh);
546 SvREFCNT_dec (req->sv1);
547 SvREFCNT_dec (req->sv2);
548 SvREFCNT_dec (req->callback);
549
550 if (req->flags & FLAG_PTR2_FREE)
551 free (req->ptr2);
552
553 Safefree (req);
554 }
555
556 static void req_cancel_subs (aio_req grp)
557 {
558 aio_req sub;
559
560 if (grp->type != REQ_GROUP)
561 return;
562
563 SvREFCNT_dec (grp->sv2);
564 grp->sv2 = 0;
565
566 for (sub = grp->grp_first; sub; sub = sub->grp_next)
567 req_cancel (sub);
568 }
569
570 static void req_cancel (aio_req req)
571 {
572 req->flags |= FLAG_CANCELLED;
573
574 req_cancel_subs (req);
575 }
576
577 static void *aio_proc(void *arg);
578
579 static void start_thread (void)
580 {
581 sigset_t fullsigset, oldsigset;
582 pthread_attr_t attr;
583
584 worker *wrk = calloc (1, sizeof (worker));
585
586 if (!wrk)
587 croak ("unable to allocate worker thread data");
588
589 pthread_attr_init (&attr);
590 pthread_attr_setstacksize (&attr, STACKSIZE);
591 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
592 #ifdef PTHREAD_SCOPE_PROCESS
593 pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS);
594 #endif
595
596 sigfillset (&fullsigset);
597
598 LOCK (wrklock);
599 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
600
601 if (pthread_create (&wrk->tid, &attr, aio_proc, (void *)wrk) == 0)
602 {
603 wrk->prev = &wrk_first;
604 wrk->next = wrk_first.next;
605 wrk_first.next->prev = wrk;
606 wrk_first.next = wrk;
607 ++started;
608 }
609 else
610 free (wrk);
611
612 sigprocmask (SIG_SETMASK, &oldsigset, 0);
613 UNLOCK (wrklock);
614 }
615
616 static void maybe_start_thread ()
617 {
618 if (get_nthreads () >= wanted)
619 return;
620
621 /* todo: maybe use idle here, but might be less exact */
622 if (0 <= (int)get_nthreads () + (int)get_npending () - (int)nreqs)
623 return;
624
625 start_thread ();
626 }
627
628 static void req_send (aio_req req)
629 {
630 ++nreqs;
631
632 LOCK (reqlock);
633 ++nready;
634 reqq_push (&req_queue, req);
635 pthread_cond_signal (&reqwait);
636 UNLOCK (reqlock);
637
638 maybe_start_thread ();
639 }
640
641 static void end_thread (void)
642 {
643 aio_req req;
644
645 Newz (0, req, 1, aio_cb);
646
647 req->type = REQ_QUIT;
648 req->pri = PRI_MAX + PRI_BIAS;
649
650 LOCK (reqlock);
651 reqq_push (&req_queue, req);
652 pthread_cond_signal (&reqwait);
653 UNLOCK (reqlock);
654
655 LOCK (wrklock);
656 --started;
657 UNLOCK (wrklock);
658 }
659
660 static void set_max_idle (int nthreads)
661 {
662 if (WORDACCESS_UNSAFE) LOCK (reqlock);
663 max_idle = nthreads <= 0 ? 1 : nthreads;
664 if (WORDACCESS_UNSAFE) UNLOCK (reqlock);
665 }
666
667 static void min_parallel (int nthreads)
668 {
669 if (wanted < nthreads)
670 wanted = nthreads;
671 }
672
673 static void max_parallel (int nthreads)
674 {
675 if (wanted > nthreads)
676 wanted = nthreads;
677
678 while (started > wanted)
679 end_thread ();
680 }
681
682 static void poll_wait ()
683 {
684 fd_set rfd;
685
686 while (nreqs)
687 {
688 int size;
689 if (WORDACCESS_UNSAFE) LOCK (reslock);
690 size = res_queue.size;
691 if (WORDACCESS_UNSAFE) UNLOCK (reslock);
692
693 if (size)
694 return;
695
696 maybe_start_thread ();
697
698 FD_ZERO(&rfd);
699 FD_SET(respipe [0], &rfd);
700
701 select (respipe [0] + 1, &rfd, 0, 0, 0);
702 }
703 }
704
705 static int poll_cb ()
706 {
707 dSP;
708 int count = 0;
709 int maxreqs = max_poll_reqs;
710 int do_croak = 0;
711 struct timeval tv_start, tv_now;
712 aio_req req;
713
714 if (max_poll_time)
715 gettimeofday (&tv_start, 0);
716
717 for (;;)
718 {
719 for (;;)
720 {
721 maybe_start_thread ();
722
723 LOCK (reslock);
724 req = reqq_shift (&res_queue);
725
726 if (req)
727 {
728 --npending;
729
730 if (!res_queue.size)
731 {
732 /* read any signals sent by the worker threads */
733 char buf [32];
734 while (read (respipe [0], buf, 32) == 32)
735 ;
736 }
737 }
738
739 UNLOCK (reslock);
740
741 if (!req)
742 break;
743
744 --nreqs;
745
746 if (req->type == REQ_GROUP && req->size)
747 {
748 req->int1 = 1; /* mark request as delayed */
749 continue;
750 }
751 else
752 {
753 req_invoke (req);
754
755 count++;
756 }
757
758 req_free (req);
759
760 if (maxreqs && !--maxreqs)
761 break;
762
763 if (max_poll_time)
764 {
765 gettimeofday (&tv_now, 0);
766
767 if (tvdiff (&tv_start, &tv_now) >= max_poll_time)
768 break;
769 }
770 }
771
772 if (nreqs <= max_outstanding)
773 break;
774
775 poll_wait ();
776
777 ++maxreqs;
778 }
779
780 return count;
781 }
782
783 static void create_pipe ()
784 {
785 if (pipe (respipe))
786 croak ("unable to initialize result pipe");
787
788 if (fcntl (respipe [0], F_SETFL, O_NONBLOCK))
789 croak ("cannot set result pipe to nonblocking mode");
790
791 if (fcntl (respipe [1], F_SETFL, O_NONBLOCK))
792 croak ("cannot set result pipe to nonblocking mode");
793 }
794
795 /*****************************************************************************/
796 /* work around various missing functions */
797
798 #if !HAVE_PREADWRITE
799 # define pread aio_pread
800 # define pwrite aio_pwrite
801
802 /*
803 * make our pread/pwrite safe against themselves, but not against
804 * normal read/write by using a mutex. slows down execution a lot,
805 * but that's your problem, not mine.
806 */
807 static pthread_mutex_t preadwritelock = PTHREAD_MUTEX_INITIALIZER;
808
809 static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
810 {
811 ssize_t res;
812 off_t ooffset;
813
814 LOCK (preadwritelock);
815 ooffset = lseek (fd, 0, SEEK_CUR);
816 lseek (fd, offset, SEEK_SET);
817 res = read (fd, buf, count);
818 lseek (fd, ooffset, SEEK_SET);
819 UNLOCK (preadwritelock);
820
821 return res;
822 }
823
824 static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
825 {
826 ssize_t res;
827 off_t ooffset;
828
829 LOCK (preadwritelock);
830 ooffset = lseek (fd, 0, SEEK_CUR);
831 lseek (fd, offset, SEEK_SET);
832 res = write (fd, buf, count);
833 lseek (fd, offset, SEEK_SET);
834 UNLOCK (preadwritelock);
835
836 return res;
837 }
838 #endif
839
840 #if !HAVE_FDATASYNC
841 # define fdatasync fsync
842 #endif
843
844 #if !HAVE_READAHEAD
845 # define readahead(fd,offset,count) aio_readahead (fd, offset, count, self)
846
847 static ssize_t aio_readahead (int fd, off_t offset, size_t count, worker *self)
848 {
849 dBUF;
850
851 while (count > 0)
852 {
853 size_t len = count < AIO_BUFSIZE ? count : AIO_BUFSIZE;
854
855 pread (fd, aio_buf, len, offset);
856 offset += len;
857 count -= len;
858 }
859
860 errno = 0;
861 }
862
863 #endif
864
865 #if !HAVE_READDIR_R
866 # define readdir_r aio_readdir_r
867
868 static pthread_mutex_t readdirlock = PTHREAD_MUTEX_INITIALIZER;
869
870 static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res)
871 {
872 struct dirent *e;
873 int errorno;
874
875 LOCK (readdirlock);
876
877 e = readdir (dirp);
878 errorno = errno;
879
880 if (e)
881 {
882 *res = ent;
883 strcpy (ent->d_name, e->d_name);
884 }
885 else
886 *res = 0;
887
888 UNLOCK (readdirlock);
889
890 errno = errorno;
891 return e ? 0 : -1;
892 }
893 #endif
894
895 /* sendfile always needs emulation */
896 static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self)
897 {
898 ssize_t res;
899
900 if (!count)
901 return 0;
902
903 #if HAVE_SENDFILE
904 # if __linux
905 res = sendfile (ofd, ifd, &offset, count);
906
907 # elif __freebsd
908 /*
909 * Of course, the freebsd sendfile is a dire hack with no thoughts
910 * wasted on making it similar to other I/O functions.
911 */
912 {
913 off_t sbytes;
914 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0);
915
916 if (res < 0 && sbytes)
917 /* maybe only on EAGAIN: as usual, the manpage leaves you guessing */
918 res = sbytes;
919 }
920
921 # elif __hpux
922 res = sendfile (ofd, ifd, offset, count, 0, 0);
923
924 # elif __solaris
925 {
926 struct sendfilevec vec;
927 size_t sbytes;
928
929 vec.sfv_fd = ifd;
930 vec.sfv_flag = 0;
931 vec.sfv_off = offset;
932 vec.sfv_len = count;
933
934 res = sendfilev (ofd, &vec, 1, &sbytes);
935
936 if (res < 0 && sbytes)
937 res = sbytes;
938 }
939
940 # endif
941 #else
942 res = -1;
943 errno = ENOSYS;
944 #endif
945
946 if (res < 0
947 && (errno == ENOSYS || errno == EINVAL || errno == ENOTSOCK
948 #if __solaris
949 || errno == EAFNOSUPPORT || errno == EPROTOTYPE
950 #endif
951 )
952 )
953 {
954 /* emulate sendfile. this is a major pain in the ass */
955 dBUF;
956
957 res = 0;
958
959 while (count)
960 {
961 ssize_t cnt;
962
963 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
964
965 if (cnt <= 0)
966 {
967 if (cnt && !res) res = -1;
968 break;
969 }
970
971 cnt = write (ofd, aio_buf, cnt);
972
973 if (cnt <= 0)
974 {
975 if (cnt && !res) res = -1;
976 break;
977 }
978
979 offset += cnt;
980 res += cnt;
981 count -= cnt;
982 }
983 }
984
985 return res;
986 }
987
988 /* read a full directory */
989 static void scandir_ (aio_req req, worker *self)
990 {
991 DIR *dirp;
992 union
993 {
994 struct dirent d;
995 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1];
996 } *u;
997 struct dirent *entp;
998 char *name, *names;
999 int memlen = 4096;
1000 int memofs = 0;
1001 int res = 0;
1002 int errorno;
1003
1004 LOCK (wrklock);
1005 self->dirp = dirp = opendir (req->ptr1);
1006 self->dbuf = u = malloc (sizeof (*u));
1007 req->flags |= FLAG_PTR2_FREE;
1008 req->ptr2 = names = malloc (memlen);
1009 UNLOCK (wrklock);
1010
1011 if (dirp && u && names)
1012 for (;;)
1013 {
1014 errno = 0;
1015 readdir_r (dirp, &u->d, &entp);
1016
1017 if (!entp)
1018 break;
1019
1020 name = entp->d_name;
1021
1022 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
1023 {
1024 int len = strlen (name) + 1;
1025
1026 res++;
1027
1028 while (memofs + len > memlen)
1029 {
1030 memlen *= 2;
1031 LOCK (wrklock);
1032 req->ptr2 = names = realloc (names, memlen);
1033 UNLOCK (wrklock);
1034
1035 if (!names)
1036 break;
1037 }
1038
1039 memcpy (names + memofs, name, len);
1040 memofs += len;
1041 }
1042 }
1043
1044 if (errno)
1045 res = -1;
1046
1047 req->result = res;
1048 }
1049
1050 /*****************************************************************************/
1051
1052 static void *aio_proc (void *thr_arg)
1053 {
1054 aio_req req;
1055 struct timespec ts;
1056 worker *self = (worker *)thr_arg;
1057
1058 /* try to distribute timeouts somewhat evenly */
1059 ts.tv_nsec = (((unsigned long)self + (unsigned long)ts.tv_sec) & 1023UL)
1060 * (1000000000UL / 1024UL);
1061
1062 for (;;)
1063 {
1064 ts.tv_sec = time (0) + IDLE_TIMEOUT;
1065
1066 LOCK (reqlock);
1067
1068 for (;;)
1069 {
1070 self->req = req = reqq_shift (&req_queue);
1071
1072 if (req)
1073 break;
1074
1075 ++idle;
1076
1077 if (pthread_cond_timedwait (&reqwait, &reqlock, &ts)
1078 == ETIMEDOUT)
1079 {
1080 if (idle > max_idle)
1081 {
1082 --idle;
1083 UNLOCK (reqlock);
1084 LOCK (wrklock);
1085 --started;
1086 UNLOCK (wrklock);
1087 goto quit;
1088 }
1089
1090 /* we are allowed to idle, so do so without any timeout */
1091 pthread_cond_wait (&reqwait, &reqlock);
1092 ts.tv_sec = time (0) + IDLE_TIMEOUT;
1093 }
1094
1095 --idle;
1096 }
1097
1098 --nready;
1099
1100 UNLOCK (reqlock);
1101
1102 errno = 0; /* strictly unnecessary */
1103
1104 if (!(req->flags & FLAG_CANCELLED))
1105 switch (req->type)
1106 {
1107 case REQ_READ: req->result = pread (req->int1, req->ptr1, req->size, req->offs); break;
1108 case REQ_WRITE: req->result = pwrite (req->int1, req->ptr1, req->size, req->offs); break;
1109
1110 case REQ_READAHEAD: req->result = readahead (req->int1, req->offs, req->size); break;
1111 case REQ_SENDFILE: req->result = sendfile_ (req->int1, req->int2, req->offs, req->size, self); break;
1112
1113 case REQ_STAT: req->result = stat (req->ptr1, (Stat_t *)req->ptr2); break;
1114 case REQ_LSTAT: req->result = lstat (req->ptr1, (Stat_t *)req->ptr2); break;
1115 case REQ_FSTAT: req->result = fstat (req->int1, (Stat_t *)req->ptr2); break;
1116
1117 case REQ_OPEN: req->result = open (req->ptr1, req->int1, req->mode); break;
1118 case REQ_CLOSE: req->result = close (req->int1); break;
1119 case REQ_UNLINK: req->result = unlink (req->ptr1); break;
1120 case REQ_RMDIR: req->result = rmdir (req->ptr1); break;
1121 case REQ_RENAME: req->result = rename (req->ptr2, req->ptr1); break;
1122 case REQ_LINK: req->result = link (req->ptr2, req->ptr1); break;
1123 case REQ_SYMLINK: req->result = symlink (req->ptr2, req->ptr1); break;
1124 case REQ_MKNOD: req->result = mknod (req->ptr2, req->mode, (dev_t)req->offs); break;
1125 case REQ_READLINK: req->result = readlink (req->ptr2, req->ptr1, NAME_MAX); break;
1126
1127 case REQ_FDATASYNC: req->result = fdatasync (req->int1); break;
1128 case REQ_FSYNC: req->result = fsync (req->int1); break;
1129 case REQ_READDIR: scandir_ (req, self); break;
1130
1131 case REQ_BUSY:
1132 {
1133 struct timeval tv;
1134
1135 tv.tv_sec = req->int1;
1136 tv.tv_usec = req->int2;
1137
1138 req->result = select (0, 0, 0, 0, &tv);
1139 }
1140
1141 case REQ_GROUP:
1142 case REQ_NOP:
1143 break;
1144
1145 case REQ_QUIT:
1146 goto quit;
1147
1148 default:
1149 req->result = ENOSYS;
1150 break;
1151 }
1152
1153 req->errorno = errno;
1154
1155 LOCK (reslock);
1156
1157 ++npending;
1158
1159 if (!reqq_push (&res_queue, req))
1160 /* write a dummy byte to the pipe so fh becomes ready */
1161 write (respipe [1], &respipe, 1);
1162
1163 self->req = 0;
1164 worker_clear (self);
1165
1166 UNLOCK (reslock);
1167 }
1168
1169 quit:
1170 LOCK (wrklock);
1171 worker_free (self);
1172 UNLOCK (wrklock);
1173
1174 return 0;
1175 }
1176
1177 /*****************************************************************************/
1178
1179 static void atfork_prepare (void)
1180 {
1181 LOCK (wrklock);
1182 LOCK (reqlock);
1183 LOCK (reslock);
1184 #if !HAVE_PREADWRITE
1185 LOCK (preadwritelock);
1186 #endif
1187 #if !HAVE_READDIR_R
1188 LOCK (readdirlock);
1189 #endif
1190 }
1191
1192 static void atfork_parent (void)
1193 {
1194 #if !HAVE_READDIR_R
1195 UNLOCK (readdirlock);
1196 #endif
1197 #if !HAVE_PREADWRITE
1198 UNLOCK (preadwritelock);
1199 #endif
1200 UNLOCK (reslock);
1201 UNLOCK (reqlock);
1202 UNLOCK (wrklock);
1203 }
1204
1205 static void atfork_child (void)
1206 {
1207 aio_req prv;
1208
1209 while (prv = reqq_shift (&req_queue))
1210 req_free (prv);
1211
1212 while (prv = reqq_shift (&res_queue))
1213 req_free (prv);
1214
1215 while (wrk_first.next != &wrk_first)
1216 {
1217 worker *wrk = wrk_first.next;
1218
1219 if (wrk->req)
1220 req_free (wrk->req);
1221
1222 worker_clear (wrk);
1223 worker_free (wrk);
1224 }
1225
1226 started = 0;
1227 idle = 0;
1228 nreqs = 0;
1229 nready = 0;
1230 npending = 0;
1231
1232 close (respipe [0]);
1233 close (respipe [1]);
1234 create_pipe ();
1235
1236 atfork_parent ();
1237 }
1238
1239 #define dREQ \
1240 aio_req req; \
1241 int req_pri = next_pri; \
1242 next_pri = DEFAULT_PRI + PRI_BIAS; \
1243 \
1244 if (SvOK (callback) && !SvROK (callback)) \
1245 croak ("callback must be undef or of reference type"); \
1246 \
1247 Newz (0, req, 1, aio_cb); \
1248 if (!req) \
1249 croak ("out of memory during aio_req allocation"); \
1250 \
1251 req->callback = newSVsv (callback); \
1252 req->pri = req_pri
1253
1254 #define REQ_SEND \
1255 req_send (req); \
1256 \
1257 if (GIMME_V != G_VOID) \
1258 XPUSHs (req_sv (req, AIO_REQ_KLASS));
1259
1260 MODULE = IO::AIO PACKAGE = IO::AIO
1261
1262 PROTOTYPES: ENABLE
1263
1264 BOOT:
1265 {
1266 HV *stash = gv_stashpv ("IO::AIO", 1);
1267
1268 newCONSTSUB (stash, "EXDEV", newSViv (EXDEV));
1269 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY));
1270 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY));
1271 newCONSTSUB (stash, "O_CREAT", newSViv (O_CREAT));
1272 newCONSTSUB (stash, "O_TRUNC", newSViv (O_TRUNC));
1273 newCONSTSUB (stash, "S_IFIFO", newSViv (S_IFIFO));
1274
1275 create_pipe ();
1276 pthread_atfork (atfork_prepare, atfork_parent, atfork_child);
1277 }
1278
1279 void
1280 max_poll_reqs (int nreqs)
1281 PROTOTYPE: $
1282 CODE:
1283 max_poll_reqs = nreqs;
1284
1285 void
1286 max_poll_time (double nseconds)
1287 PROTOTYPE: $
1288 CODE:
1289 max_poll_time = nseconds * AIO_TICKS;
1290
1291 void
1292 min_parallel (int nthreads)
1293 PROTOTYPE: $
1294
1295 void
1296 max_parallel (int nthreads)
1297 PROTOTYPE: $
1298
1299 void
1300 max_idle (int nthreads)
1301 PROTOTYPE: $
1302 CODE:
1303 set_max_idle (nthreads);
1304
1305 int
1306 max_outstanding (int maxreqs)
1307 PROTOTYPE: $
1308 CODE:
1309 RETVAL = max_outstanding;
1310 max_outstanding = maxreqs;
1311 OUTPUT:
1312 RETVAL
1313
1314 void
1315 aio_open (pathname,flags,mode,callback=&PL_sv_undef)
1316 SV8 * pathname
1317 int flags
1318 int mode
1319 SV * callback
1320 PROTOTYPE: $$$;$
1321 PPCODE:
1322 {
1323 dREQ;
1324
1325 req->type = REQ_OPEN;
1326 req->sv1 = newSVsv (pathname);
1327 req->ptr1 = SvPVbyte_nolen (req->sv1);
1328 req->int1 = flags;
1329 req->mode = mode;
1330
1331 REQ_SEND;
1332 }
1333
1334 void
1335 aio_close (fh,callback=&PL_sv_undef)
1336 SV * fh
1337 SV * callback
1338 PROTOTYPE: $;$
1339 ALIAS:
1340 aio_close = REQ_CLOSE
1341 aio_fsync = REQ_FSYNC
1342 aio_fdatasync = REQ_FDATASYNC
1343 PPCODE:
1344 {
1345 dREQ;
1346
1347 req->type = ix;
1348 req->fh = newSVsv (fh);
1349 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh)));
1350
1351 REQ_SEND (req);
1352 }
1353
1354 void
1355 aio_read (fh,offset,length,data,dataoffset,callback=&PL_sv_undef)
1356 SV * fh
1357 UV offset
1358 UV length
1359 SV8 * data
1360 UV dataoffset
1361 SV * callback
1362 ALIAS:
1363 aio_read = REQ_READ
1364 aio_write = REQ_WRITE
1365 PROTOTYPE: $$$$$;$
1366 PPCODE:
1367 {
1368 STRLEN svlen;
1369 char *svptr = SvPVbyte (data, svlen);
1370
1371 SvUPGRADE (data, SVt_PV);
1372 SvPOK_on (data);
1373
1374 if (dataoffset < 0)
1375 dataoffset += svlen;
1376
1377 if (dataoffset < 0 || dataoffset > svlen)
1378 croak ("data offset outside of string");
1379
1380 if (ix == REQ_WRITE)
1381 {
1382 /* write: check length and adjust. */
1383 if (length < 0 || length + dataoffset > svlen)
1384 length = svlen - dataoffset;
1385 }
1386 else
1387 {
1388 /* read: grow scalar as necessary */
1389 svptr = SvGROW (data, length + dataoffset);
1390 }
1391
1392 if (length < 0)
1393 croak ("length must not be negative");
1394
1395 {
1396 dREQ;
1397
1398 req->type = ix;
1399 req->fh = newSVsv (fh);
1400 req->int1 = PerlIO_fileno (ix == REQ_READ ? IoIFP (sv_2io (fh))
1401 : IoOFP (sv_2io (fh)));
1402 req->offs = offset;
1403 req->size = length;
1404 req->sv1 = SvREFCNT_inc (data);
1405 req->ptr1 = (char *)svptr + dataoffset;
1406 req->stroffset = dataoffset;
1407
1408 if (!SvREADONLY (data))
1409 {
1410 SvREADONLY_on (data);
1411 req->flags |= FLAG_SV1_RO_OFF;
1412 }
1413
1414 REQ_SEND;
1415 }
1416 }
1417
1418 void
1419 aio_readlink (path,callback=&PL_sv_undef)
1420 SV8 * path
1421 SV * callback
1422 PROTOTYPE: $$;$
1423 PPCODE:
1424 {
1425 SV *data;
1426 dREQ;
1427
1428 data = newSV (NAME_MAX);
1429 SvPOK_on (data);
1430
1431 req->type = REQ_READLINK;
1432 req->fh = newSVsv (path);
1433 req->ptr2 = SvPVbyte_nolen (req->fh);
1434 req->sv1 = data;
1435 req->ptr1 = SvPVbyte_nolen (data);
1436
1437 REQ_SEND;
1438 }
1439
1440 void
1441 aio_sendfile (out_fh,in_fh,in_offset,length,callback=&PL_sv_undef)
1442 SV * out_fh
1443 SV * in_fh
1444 UV in_offset
1445 UV length
1446 SV * callback
1447 PROTOTYPE: $$$$;$
1448 PPCODE:
1449 {
1450 dREQ;
1451
1452 req->type = REQ_SENDFILE;
1453 req->fh = newSVsv (out_fh);
1454 req->int1 = PerlIO_fileno (IoIFP (sv_2io (out_fh)));
1455 req->sv2 = newSVsv (in_fh);
1456 req->int2 = PerlIO_fileno (IoIFP (sv_2io (in_fh)));
1457 req->offs = in_offset;
1458 req->size = length;
1459
1460 REQ_SEND;
1461 }
1462
1463 void
1464 aio_readahead (fh,offset,length,callback=&PL_sv_undef)
1465 SV * fh
1466 UV offset
1467 IV length
1468 SV * callback
1469 PROTOTYPE: $$$;$
1470 PPCODE:
1471 {
1472 dREQ;
1473
1474 req->type = REQ_READAHEAD;
1475 req->fh = newSVsv (fh);
1476 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh)));
1477 req->offs = offset;
1478 req->size = length;
1479
1480 REQ_SEND;
1481 }
1482
1483 void
1484 aio_stat (fh_or_path,callback=&PL_sv_undef)
1485 SV8 * fh_or_path
1486 SV * callback
1487 ALIAS:
1488 aio_stat = REQ_STAT
1489 aio_lstat = REQ_LSTAT
1490 PPCODE:
1491 {
1492 dREQ;
1493
1494 req->ptr2 = malloc (sizeof (Stat_t));
1495 if (!req->ptr2)
1496 {
1497 req_free (req);
1498 croak ("out of memory during aio_stat statdata allocation");
1499 }
1500
1501 req->flags |= FLAG_PTR2_FREE;
1502
1503 if (SvPOK (fh_or_path))
1504 {
1505 req->type = ix;
1506 req->sv1 = newSVsv (fh_or_path);
1507 req->ptr1 = SvPVbyte_nolen (req->sv1);
1508 }
1509 else
1510 {
1511 req->type = REQ_FSTAT;
1512 req->fh = newSVsv (fh_or_path);
1513 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh_or_path)));
1514 }
1515
1516 REQ_SEND;
1517 }
1518
1519 void
1520 aio_unlink (pathname,callback=&PL_sv_undef)
1521 SV8 * pathname
1522 SV * callback
1523 ALIAS:
1524 aio_unlink = REQ_UNLINK
1525 aio_rmdir = REQ_RMDIR
1526 aio_readdir = REQ_READDIR
1527 PPCODE:
1528 {
1529 dREQ;
1530
1531 req->type = ix;
1532 req->sv1 = newSVsv (pathname);
1533 req->ptr1 = SvPVbyte_nolen (req->sv1);
1534
1535 REQ_SEND;
1536 }
1537
1538 void
1539 aio_link (oldpath,newpath,callback=&PL_sv_undef)
1540 SV8 * oldpath
1541 SV8 * newpath
1542 SV * callback
1543 ALIAS:
1544 aio_link = REQ_LINK
1545 aio_symlink = REQ_SYMLINK
1546 aio_rename = REQ_RENAME
1547 PPCODE:
1548 {
1549 dREQ;
1550
1551 req->type = ix;
1552 req->fh = newSVsv (oldpath);
1553 req->ptr2 = SvPVbyte_nolen (req->fh);
1554 req->sv1 = newSVsv (newpath);
1555 req->ptr1 = SvPVbyte_nolen (req->sv1);
1556
1557 REQ_SEND;
1558 }
1559
1560 void
1561 aio_mknod (pathname,mode,dev,callback=&PL_sv_undef)
1562 SV8 * pathname
1563 SV * callback
1564 UV mode
1565 UV dev
1566 PPCODE:
1567 {
1568 dREQ;
1569
1570 req->type = REQ_MKNOD;
1571 req->sv1 = newSVsv (pathname);
1572 req->ptr1 = SvPVbyte_nolen (req->sv1);
1573 req->mode = (mode_t)mode;
1574 req->offs = dev;
1575
1576 REQ_SEND;
1577 }
1578
1579 void
1580 aio_busy (delay,callback=&PL_sv_undef)
1581 double delay
1582 SV * callback
1583 PPCODE:
1584 {
1585 dREQ;
1586
1587 req->type = REQ_BUSY;
1588 req->int1 = delay < 0. ? 0 : delay;
1589 req->int2 = delay < 0. ? 0 : 1000. * (delay - req->int1);
1590
1591 REQ_SEND;
1592 }
1593
1594 void
1595 aio_group (callback=&PL_sv_undef)
1596 SV * callback
1597 PROTOTYPE: ;$
1598 PPCODE:
1599 {
1600 dREQ;
1601
1602 req->type = REQ_GROUP;
1603
1604 req_send (req);
1605 XPUSHs (req_sv (req, AIO_GRP_KLASS));
1606 }
1607
1608 void
1609 aio_nop (callback=&PL_sv_undef)
1610 SV * callback
1611 PPCODE:
1612 {
1613 dREQ;
1614
1615 req->type = REQ_NOP;
1616
1617 REQ_SEND;
1618 }
1619
1620 int
1621 aioreq_pri (int pri = 0)
1622 PROTOTYPE: ;$
1623 CODE:
1624 RETVAL = next_pri - PRI_BIAS;
1625 if (items > 0)
1626 {
1627 if (pri < PRI_MIN) pri = PRI_MIN;
1628 if (pri > PRI_MAX) pri = PRI_MAX;
1629 next_pri = pri + PRI_BIAS;
1630 }
1631 OUTPUT:
1632 RETVAL
1633
1634 void
1635 aioreq_nice (int nice = 0)
1636 CODE:
1637 nice = next_pri - nice;
1638 if (nice < PRI_MIN) nice = PRI_MIN;
1639 if (nice > PRI_MAX) nice = PRI_MAX;
1640 next_pri = nice + PRI_BIAS;
1641
1642 void
1643 flush ()
1644 PROTOTYPE:
1645 CODE:
1646 while (nreqs)
1647 {
1648 poll_wait ();
1649 poll_cb ();
1650 }
1651
1652 int
1653 poll()
1654 PROTOTYPE:
1655 CODE:
1656 if (nreqs)
1657 {
1658 poll_wait ();
1659 RETVAL = poll_cb ();
1660 }
1661 else
1662 RETVAL = 0;
1663 OUTPUT:
1664 RETVAL
1665
1666 int
1667 poll_fileno()
1668 PROTOTYPE:
1669 CODE:
1670 RETVAL = respipe [0];
1671 OUTPUT:
1672 RETVAL
1673
1674 int
1675 poll_cb(...)
1676 PROTOTYPE:
1677 CODE:
1678 RETVAL = poll_cb ();
1679 OUTPUT:
1680 RETVAL
1681
1682 void
1683 poll_wait()
1684 PROTOTYPE:
1685 CODE:
1686 if (nreqs)
1687 poll_wait ();
1688
1689 int
1690 nreqs()
1691 PROTOTYPE:
1692 CODE:
1693 RETVAL = nreqs;
1694 OUTPUT:
1695 RETVAL
1696
1697 int
1698 nready()
1699 PROTOTYPE:
1700 CODE:
1701 RETVAL = get_nready ();
1702 OUTPUT:
1703 RETVAL
1704
1705 int
1706 npending()
1707 PROTOTYPE:
1708 CODE:
1709 RETVAL = get_npending ();
1710 OUTPUT:
1711 RETVAL
1712
1713 int
1714 nthreads()
1715 PROTOTYPE:
1716 CODE:
1717 if (WORDACCESS_UNSAFE) LOCK (wrklock);
1718 RETVAL = started;
1719 if (WORDACCESS_UNSAFE) UNLOCK (wrklock);
1720 OUTPUT:
1721 RETVAL
1722
1723 PROTOTYPES: DISABLE
1724
1725 MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1726
1727 void
1728 cancel (aio_req_ornot req)
1729 CODE:
1730 req_cancel (req);
1731
1732 void
1733 cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1734 CODE:
1735 SvREFCNT_dec (req->callback);
1736 req->callback = newSVsv (callback);
1737
1738 MODULE = IO::AIO PACKAGE = IO::AIO::GRP
1739
1740 void
1741 add (aio_req grp, ...)
1742 PPCODE:
1743 {
1744 int i;
1745 aio_req req;
1746
1747 if (grp->int1 == 2)
1748 croak ("cannot add requests to IO::AIO::GRP after the group finished");
1749
1750 for (i = 1; i < items; ++i )
1751 {
1752 if (GIMME_V != G_VOID)
1753 XPUSHs (sv_2mortal (newSVsv (ST (i))));
1754
1755 req = SvAIO_REQ (ST (i));
1756
1757 if (req)
1758 {
1759 ++grp->size;
1760 req->grp = grp;
1761
1762 req->grp_prev = 0;
1763 req->grp_next = grp->grp_first;
1764
1765 if (grp->grp_first)
1766 grp->grp_first->grp_prev = req;
1767
1768 grp->grp_first = req;
1769 }
1770 }
1771 }
1772
1773 void
1774 cancel_subs (aio_req_ornot req)
1775 CODE:
1776 req_cancel_subs (req);
1777
1778 void
1779 result (aio_req grp, ...)
1780 CODE:
1781 {
1782 int i;
1783 AV *av;
1784
1785 grp->errorno = errno;
1786
1787 av = newAV ();
1788
1789 for (i = 1; i < items; ++i )
1790 av_push (av, newSVsv (ST (i)));
1791
1792 SvREFCNT_dec (grp->sv1);
1793 grp->sv1 = (SV *)av;
1794 }
1795
1796 void
1797 errno (aio_req grp, int errorno = errno)
1798 CODE:
1799 grp->errorno = errorno;
1800
1801 void
1802 limit (aio_req grp, int limit)
1803 CODE:
1804 grp->int2 = limit;
1805 aio_grp_feed (grp);
1806
1807 void
1808 feed (aio_req grp, SV *callback=&PL_sv_undef)
1809 CODE:
1810 {
1811 SvREFCNT_dec (grp->sv2);
1812 grp->sv2 = newSVsv (callback);
1813
1814 if (grp->int2 <= 0)
1815 grp->int2 = 2;
1816
1817 aio_grp_feed (grp);
1818 }
1819