ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
Revision: 1.86
Committed: Mon Oct 30 23:30:00 2006 UTC (17 years, 6 months ago) by root
Branch: MAIN
Changes since 1.85: +160 -126 lines
Log Message:
*** empty log message ***

File Contents

# Content
1 /* solaris */
2 #define _POSIX_PTHREAD_SEMANTICS 1
3
4 #if __linux && !defined(_GNU_SOURCE)
5 # define _GNU_SOURCE
6 #endif
7
8 /* just in case */
9 #define _REENTRANT 1
10
11 #include <errno.h>
12
13 #include "EXTERN.h"
14 #include "perl.h"
15 #include "XSUB.h"
16
17 #include "autoconf/config.h"
18
19 #include <pthread.h>
20
21 #include <stddef.h>
22 #include <errno.h>
23 #include <sys/time.h>
24 #include <sys/select.h>
25 #include <sys/types.h>
26 #include <sys/stat.h>
27 #include <limits.h>
28 #include <unistd.h>
29 #include <fcntl.h>
30 #include <signal.h>
31 #include <sched.h>
32
33 #if HAVE_SENDFILE
34 # if __linux
35 # include <sys/sendfile.h>
36 # elif __freebsd
37 # include <sys/socket.h>
38 # include <sys/uio.h>
39 # elif __hpux
40 # include <sys/socket.h>
41 # elif __solaris /* not yet */
42 # include <sys/sendfile.h>
43 # else
44 # error sendfile support requested but not available
45 # endif
46 #endif
47
48 /* number of seconds after which idle threads exit */
49 #define IDLE_TIMEOUT 10
50
51 /* used for struct dirent, AIX doesn't provide it */
52 #ifndef NAME_MAX
53 # define NAME_MAX 4096
54 #endif
55
56 #ifndef PTHREAD_STACK_MIN
57 /* care for broken platforms, e.g. windows */
58 # define PTHREAD_STACK_MIN 16384
59 #endif
60
61 #if __ia64
62 # define STACKSIZE 65536
63 #elif __i386 || __x86_64 /* 16k is unreasonably high :( */
64 # define STACKSIZE PTHREAD_STACK_MIN
65 #else
66 # define STACKSIZE 16384
67 #endif
68
69 /* wether word reads are potentially non-atomic.
70 * this is conservatice, likely most arches this runs
71 * on have atomic word read/writes.
72 */
73 #ifndef WORDACCESS_UNSAFE
74 # if __i386 || __x86_64
75 # define WORDACCESS_UNSAFE 0
76 # else
77 # define WORDACCESS_UNSAFE 1
78 # endif
79 #endif
80
81 /* buffer size for various temporary buffers */
82 #define AIO_BUFSIZE 65536
83
84 #define dBUF \
85 char *aio_buf; \
86 LOCK (wrklock); \
87 self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \
88 UNLOCK (wrklock); \
89 if (!aio_buf) \
90 return -1;
91
92 enum {
93 REQ_QUIT,
94 REQ_OPEN, REQ_CLOSE,
95 REQ_READ, REQ_WRITE, REQ_READAHEAD,
96 REQ_SENDFILE,
97 REQ_STAT, REQ_LSTAT, REQ_FSTAT,
98 REQ_FSYNC, REQ_FDATASYNC,
99 REQ_UNLINK, REQ_RMDIR, REQ_RENAME,
100 REQ_MKNOD, REQ_READDIR,
101 REQ_LINK, REQ_SYMLINK, REQ_READLINK,
102 REQ_GROUP, REQ_NOP,
103 REQ_BUSY,
104 };
105
106 #define AIO_REQ_KLASS "IO::AIO::REQ"
107 #define AIO_GRP_KLASS "IO::AIO::GRP"
108
109 typedef struct aio_cb
110 {
111 struct aio_cb *volatile next;
112
113 SV *callback, *fh;
114 SV *sv1, *sv2;
115 void *ptr1, *ptr2;
116 Stat_t *statdata;
117 off_t offs;
118 size_t size;
119 ssize_t result;
120
121 STRLEN stroffset;
122 int type;
123 int int1, int2;
124 int errorno;
125 mode_t mode; /* open */
126
127 unsigned char flags;
128 unsigned char pri;
129
130 SV *self; /* the perl counterpart of this request, if any */
131 struct aio_cb *grp, *grp_prev, *grp_next, *grp_first;
132 } aio_cb;
133
134 enum {
135 FLAG_CANCELLED = 0x01,
136 FLAG_DATA_RO_OFF = 0x80, /* data was set readonly */
137 };
138
139 typedef aio_cb *aio_req;
140 typedef aio_cb *aio_req_ornot;
141
142 enum {
143 PRI_MIN = -4,
144 PRI_MAX = 4,
145
146 DEFAULT_PRI = 0,
147 PRI_BIAS = -PRI_MIN,
148 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
149 };
150
151 #define AIO_TICKS ((1000000 + 1023) >> 10)
152
153 static unsigned int max_poll_time = 0;
154 static unsigned int max_poll_reqs = 0;
155
156 /* calculcate time difference in ~1/AIO_TICKS of a second */
157 static int tvdiff (struct timeval *tv1, struct timeval *tv2)
158 {
159 return (tv2->tv_sec - tv1->tv_sec ) * AIO_TICKS
160 + ((tv2->tv_usec - tv1->tv_usec) >> 10);
161 }
162
163 static int next_pri = DEFAULT_PRI + PRI_BIAS;
164
165 static unsigned int started, idle, wanted;
166
167 #if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)
168 # define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
169 #else
170 # define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
171 #endif
172
173 #define LOCK(mutex) pthread_mutex_lock (&(mutex))
174 #define UNLOCK(mutex) pthread_mutex_unlock (&(mutex))
175
176 /* worker threads management */
177 static pthread_mutex_t wrklock = AIO_MUTEX_INIT;
178
179 typedef struct worker {
180 /* locked by wrklock */
181 struct worker *prev, *next;
182
183 pthread_t tid;
184
185 /* locked by reslock, reqlock or wrklock */
186 aio_req req; /* currently processed request */
187 void *dbuf;
188 DIR *dirp;
189 } worker;
190
191 static worker wrk_first = { &wrk_first, &wrk_first, 0 };
192
193 static void worker_clear (worker *wrk)
194 {
195 if (wrk->dirp)
196 {
197 closedir (wrk->dirp);
198 wrk->dirp = 0;
199 }
200
201 if (wrk->dbuf)
202 {
203 free (wrk->dbuf);
204 wrk->dbuf = 0;
205 }
206 }
207
208 static void worker_free (worker *wrk)
209 {
210 wrk->next->prev = wrk->prev;
211 wrk->prev->next = wrk->next;
212
213 free (wrk);
214 }
215
216 static volatile unsigned int nreqs, nready, npending;
217 static volatile unsigned int max_idle = 4;
218 static volatile unsigned int max_outstanding = 0xffffffff;
219 static int respipe [2];
220
221 static pthread_mutex_t reslock = AIO_MUTEX_INIT;
222 static pthread_mutex_t reqlock = AIO_MUTEX_INIT;
223 static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER;
224
225 #if WORDACCESS_UNSAFE
226
227 static unsigned int get_nready ()
228 {
229 unsigned int retval;
230
231 LOCK (reqlock);
232 retval = nready;
233 UNLOCK (reqlock);
234
235 return retval;
236 }
237
238 static unsigned int get_npending ()
239 {
240 unsigned int retval;
241
242 LOCK (reslock);
243 retval = npending;
244 UNLOCK (reslock);
245
246 return retval;
247 }
248
249 static unsigned int get_nthreads ()
250 {
251 unsigned int retval;
252
253 LOCK (wrklock);
254 retval = started;
255 UNLOCK (wrklock);
256
257 return retval;
258 }
259
260 #else
261
262 # define get_nready() nready
263 # define get_npending() npending
264 # define get_nthreads() started
265
266 #endif
267
268 /*
269 * a somewhat faster data structure might be nice, but
270 * with 8 priorities this actually needs <20 insns
271 * per shift, the most expensive operation.
272 */
273 typedef struct {
274 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
275 int size;
276 } reqq;
277
278 static reqq req_queue;
279 static reqq res_queue;
280
281 int reqq_push (reqq *q, aio_req req)
282 {
283 int pri = req->pri;
284 req->next = 0;
285
286 if (q->qe[pri])
287 {
288 q->qe[pri]->next = req;
289 q->qe[pri] = req;
290 }
291 else
292 q->qe[pri] = q->qs[pri] = req;
293
294 return q->size++;
295 }
296
297 aio_req reqq_shift (reqq *q)
298 {
299 int pri;
300
301 if (!q->size)
302 return 0;
303
304 --q->size;
305
306 for (pri = NUM_PRI; pri--; )
307 {
308 aio_req req = q->qs[pri];
309
310 if (req)
311 {
312 if (!(q->qs[pri] = req->next))
313 q->qe[pri] = 0;
314
315 return req;
316 }
317 }
318
319 abort ();
320 }
321
322 static int poll_cb ();
323 static void req_invoke (aio_req req);
324 static void req_free (aio_req req);
325 static void req_cancel (aio_req req);
326
327 /* must be called at most once */
328 static SV *req_sv (aio_req req, const char *klass)
329 {
330 if (!req->self)
331 {
332 req->self = (SV *)newHV ();
333 sv_magic (req->self, 0, PERL_MAGIC_ext, (char *)req, 0);
334 }
335
336 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1)));
337 }
338
339 static aio_req SvAIO_REQ (SV *sv)
340 {
341 MAGIC *mg;
342
343 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv))
344 croak ("object of class " AIO_REQ_KLASS " expected");
345
346 mg = mg_find (SvRV (sv), PERL_MAGIC_ext);
347
348 return mg ? (aio_req)mg->mg_ptr : 0;
349 }
350
351 static void aio_grp_feed (aio_req grp)
352 {
353 while (grp->size < grp->int2 && !(grp->flags & FLAG_CANCELLED))
354 {
355 int old_len = grp->size;
356
357 if (grp->sv2 && SvOK (grp->sv2))
358 {
359 dSP;
360
361 ENTER;
362 SAVETMPS;
363 PUSHMARK (SP);
364 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
365 PUTBACK;
366 call_sv (grp->sv2, G_VOID | G_EVAL | G_KEEPERR);
367 SPAGAIN;
368 FREETMPS;
369 LEAVE;
370 }
371
372 /* stop if no progress has been made */
373 if (old_len == grp->size)
374 {
375 SvREFCNT_dec (grp->sv2);
376 grp->sv2 = 0;
377 break;
378 }
379 }
380 }
381
382 static void aio_grp_dec (aio_req grp)
383 {
384 --grp->size;
385
386 /* call feeder, if applicable */
387 aio_grp_feed (grp);
388
389 /* finish, if done */
390 if (!grp->size && grp->int1)
391 {
392 req_invoke (grp);
393 req_free (grp);
394 }
395 }
396
397 static void req_invoke (aio_req req)
398 {
399 dSP;
400
401 if (req->flags & FLAG_DATA_RO_OFF)
402 SvREADONLY_off (req->sv1);
403
404 if (req->statdata)
405 {
406 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
407 PL_laststatval = req->result;
408 PL_statcache = *(req->statdata);
409 }
410
411 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
412 {
413 ENTER;
414 SAVETMPS;
415 PUSHMARK (SP);
416 EXTEND (SP, 1);
417
418 switch (req->type)
419 {
420 case REQ_READDIR:
421 {
422 SV *rv = &PL_sv_undef;
423
424 if (req->result >= 0)
425 {
426 int i;
427 char *buf = req->ptr2;
428 AV *av = newAV ();
429
430 av_extend (av, req->result - 1);
431
432 for (i = 0; i < req->result; ++i)
433 {
434 SV *sv = newSVpv (buf, 0);
435
436 av_store (av, i, sv);
437 buf += SvCUR (sv) + 1;
438 }
439
440 rv = sv_2mortal (newRV_noinc ((SV *)av));
441 }
442
443 PUSHs (rv);
444 }
445 break;
446
447 case REQ_OPEN:
448 {
449 /* convert fd to fh */
450 SV *fh;
451
452 PUSHs (sv_2mortal (newSViv (req->result)));
453 PUTBACK;
454 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
455 SPAGAIN;
456
457 fh = SvREFCNT_inc (POPs);
458
459 PUSHMARK (SP);
460 XPUSHs (sv_2mortal (fh));
461 }
462 break;
463
464 case REQ_GROUP:
465 req->int1 = 2; /* mark group as finished */
466
467 if (req->sv1)
468 {
469 int i;
470 AV *av = (AV *)req->sv1;
471
472 EXTEND (SP, AvFILL (av) + 1);
473 for (i = 0; i <= AvFILL (av); ++i)
474 PUSHs (*av_fetch (av, i, 0));
475 }
476 break;
477
478 case REQ_NOP:
479 case REQ_BUSY:
480 break;
481
482 case REQ_READLINK:
483 if (req->result > 0)
484 {
485 SvCUR_set (req->sv1, req->result);
486 *SvEND (req->sv1) = 0;
487 PUSHs (req->sv1);
488 }
489 break;
490
491 case REQ_READ:
492 SvCUR_set (req->sv1, req->stroffset + (req->result > 0 ? req->result : 0));
493 *SvEND (req->sv1) = 0;
494 /* fall through */
495 default:
496 PUSHs (sv_2mortal (newSViv (req->result)));
497 break;
498 }
499
500 errno = req->errorno;
501
502 PUTBACK;
503 call_sv (req->callback, G_VOID | G_EVAL);
504 SPAGAIN;
505
506 FREETMPS;
507 LEAVE;
508 }
509
510 if (req->grp)
511 {
512 aio_req grp = req->grp;
513
514 /* unlink request */
515 if (req->grp_next) req->grp_next->grp_prev = req->grp_prev;
516 if (req->grp_prev) req->grp_prev->grp_next = req->grp_next;
517
518 if (grp->grp_first == req)
519 grp->grp_first = req->grp_next;
520
521 aio_grp_dec (grp);
522 }
523
524 if (SvTRUE (ERRSV))
525 {
526 req_free (req);
527 croak (0);
528 }
529 }
530
531 static void req_free (aio_req req)
532 {
533 if (req->self)
534 {
535 sv_unmagic (req->self, PERL_MAGIC_ext);
536 SvREFCNT_dec (req->self);
537 }
538
539 SvREFCNT_dec (req->fh);
540 SvREFCNT_dec (req->sv1);
541 SvREFCNT_dec (req->sv2);
542 SvREFCNT_dec (req->callback);
543 Safefree (req->statdata);
544
545 if (req->type == REQ_READDIR)
546 free (req->ptr2);
547
548 Safefree (req);
549 }
550
551 static void req_cancel_subs (aio_req grp)
552 {
553 aio_req sub;
554
555 if (grp->type != REQ_GROUP)
556 return;
557
558 SvREFCNT_dec (grp->sv2);
559 grp->sv2 = 0;
560
561 for (sub = grp->grp_first; sub; sub = sub->grp_next)
562 req_cancel (sub);
563 }
564
565 static void req_cancel (aio_req req)
566 {
567 req->flags |= FLAG_CANCELLED;
568
569 req_cancel_subs (req);
570 }
571
572 static void *aio_proc(void *arg);
573
574 static void start_thread (void)
575 {
576 sigset_t fullsigset, oldsigset;
577 pthread_attr_t attr;
578
579 worker *wrk = calloc (1, sizeof (worker));
580
581 if (!wrk)
582 croak ("unable to allocate worker thread data");
583
584 pthread_attr_init (&attr);
585 pthread_attr_setstacksize (&attr, STACKSIZE);
586 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
587 #ifdef PTHREAD_SCOPE_PROCESS
588 pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS);
589 #endif
590
591 sigfillset (&fullsigset);
592
593 LOCK (wrklock);
594 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
595
596 if (pthread_create (&wrk->tid, &attr, aio_proc, (void *)wrk) == 0)
597 {
598 wrk->prev = &wrk_first;
599 wrk->next = wrk_first.next;
600 wrk_first.next->prev = wrk;
601 wrk_first.next = wrk;
602 ++started;
603 }
604 else
605 free (wrk);
606
607 sigprocmask (SIG_SETMASK, &oldsigset, 0);
608 UNLOCK (wrklock);
609 }
610
611 static void maybe_start_thread ()
612 {
613 if (get_nthreads () >= wanted)
614 return;
615
616 /* todo: maybe use idle here, but might be less exact */
617 if (0 <= (int)get_nthreads () + (int)get_npending () - (int)nreqs)
618 return;
619
620 start_thread ();
621 }
622
623 static void req_send (aio_req req)
624 {
625 ++nreqs;
626
627 LOCK (reqlock);
628 ++nready;
629 reqq_push (&req_queue, req);
630 pthread_cond_signal (&reqwait);
631 UNLOCK (reqlock);
632
633 maybe_start_thread ();
634 }
635
636 static void end_thread (void)
637 {
638 aio_req req;
639
640 Newz (0, req, 1, aio_cb);
641
642 req->type = REQ_QUIT;
643 req->pri = PRI_MAX + PRI_BIAS;
644
645 LOCK (reqlock);
646 reqq_push (&req_queue, req);
647 pthread_cond_signal (&reqwait);
648 UNLOCK (reqlock);
649
650 LOCK (wrklock);
651 --started;
652 UNLOCK (wrklock);
653 }
654
655 static void set_max_idle (int nthreads)
656 {
657 if (WORDACCESS_UNSAFE) LOCK (reqlock);
658 max_idle = nthreads <= 0 ? 1 : nthreads;
659 if (WORDACCESS_UNSAFE) UNLOCK (reqlock);
660 }
661
662 static void min_parallel (int nthreads)
663 {
664 if (wanted < nthreads)
665 wanted = nthreads;
666 }
667
668 static void max_parallel (int nthreads)
669 {
670 if (wanted > nthreads)
671 wanted = nthreads;
672
673 while (started > wanted)
674 end_thread ();
675 }
676
677 static void poll_wait ()
678 {
679 fd_set rfd;
680
681 while (nreqs)
682 {
683 int size;
684 if (WORDACCESS_UNSAFE) LOCK (reslock);
685 size = res_queue.size;
686 if (WORDACCESS_UNSAFE) UNLOCK (reslock);
687
688 if (size)
689 return;
690
691 maybe_start_thread ();
692
693 FD_ZERO(&rfd);
694 FD_SET(respipe [0], &rfd);
695
696 select (respipe [0] + 1, &rfd, 0, 0, 0);
697 }
698 }
699
700 static int poll_cb ()
701 {
702 dSP;
703 int count = 0;
704 int maxreqs = max_poll_reqs;
705 int do_croak = 0;
706 struct timeval tv_start, tv_now;
707 aio_req req;
708
709 if (max_poll_time)
710 gettimeofday (&tv_start, 0);
711
712 for (;;)
713 {
714 for (;;)
715 {
716 maybe_start_thread ();
717
718 LOCK (reslock);
719 req = reqq_shift (&res_queue);
720
721 if (req)
722 {
723 --npending;
724
725 if (!res_queue.size)
726 {
727 /* read any signals sent by the worker threads */
728 char buf [32];
729 while (read (respipe [0], buf, 32) == 32)
730 ;
731 }
732 }
733
734 UNLOCK (reslock);
735
736 if (!req)
737 break;
738
739 --nreqs;
740
741 if (req->type == REQ_GROUP && req->size)
742 {
743 req->int1 = 1; /* mark request as delayed */
744 continue;
745 }
746 else
747 {
748 req_invoke (req);
749
750 count++;
751 }
752
753 req_free (req);
754
755 if (maxreqs && !--maxreqs)
756 break;
757
758 if (max_poll_time)
759 {
760 gettimeofday (&tv_now, 0);
761
762 if (tvdiff (&tv_start, &tv_now) >= max_poll_time)
763 break;
764 }
765 }
766
767 if (nreqs <= max_outstanding)
768 break;
769
770 poll_wait ();
771
772 ++maxreqs;
773 }
774
775 return count;
776 }
777
778 static void create_pipe ()
779 {
780 if (pipe (respipe))
781 croak ("unable to initialize result pipe");
782
783 if (fcntl (respipe [0], F_SETFL, O_NONBLOCK))
784 croak ("cannot set result pipe to nonblocking mode");
785
786 if (fcntl (respipe [1], F_SETFL, O_NONBLOCK))
787 croak ("cannot set result pipe to nonblocking mode");
788 }
789
790 /*****************************************************************************/
791 /* work around various missing functions */
792
793 #if !HAVE_PREADWRITE
794 # define pread aio_pread
795 # define pwrite aio_pwrite
796
797 /*
798 * make our pread/pwrite safe against themselves, but not against
799 * normal read/write by using a mutex. slows down execution a lot,
800 * but that's your problem, not mine.
801 */
802 static pthread_mutex_t preadwritelock = PTHREAD_MUTEX_INITIALIZER;
803
804 static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
805 {
806 ssize_t res;
807 off_t ooffset;
808
809 LOCK (preadwritelock);
810 ooffset = lseek (fd, 0, SEEK_CUR);
811 lseek (fd, offset, SEEK_SET);
812 res = read (fd, buf, count);
813 lseek (fd, ooffset, SEEK_SET);
814 UNLOCK (preadwritelock);
815
816 return res;
817 }
818
819 static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
820 {
821 ssize_t res;
822 off_t ooffset;
823
824 LOCK (preadwritelock);
825 ooffset = lseek (fd, 0, SEEK_CUR);
826 lseek (fd, offset, SEEK_SET);
827 res = write (fd, buf, count);
828 lseek (fd, offset, SEEK_SET);
829 UNLOCK (preadwritelock);
830
831 return res;
832 }
833 #endif
834
835 #if !HAVE_FDATASYNC
836 # define fdatasync fsync
837 #endif
838
839 #if !HAVE_READAHEAD
840 # define readahead(fd,offset,count) aio_readahead (fd, offset, count, self)
841
842 static ssize_t aio_readahead (int fd, off_t offset, size_t count, worker *self)
843 {
844 dBUF;
845
846 while (count > 0)
847 {
848 size_t len = count < AIO_BUFSIZE ? count : AIO_BUFSIZE;
849
850 pread (fd, aio_buf, len, offset);
851 offset += len;
852 count -= len;
853 }
854
855 errno = 0;
856 }
857
858 #endif
859
860 #if !HAVE_READDIR_R
861 # define readdir_r aio_readdir_r
862
863 static pthread_mutex_t readdirlock = PTHREAD_MUTEX_INITIALIZER;
864
865 static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res)
866 {
867 struct dirent *e;
868 int errorno;
869
870 LOCK (readdirlock);
871
872 e = readdir (dirp);
873 errorno = errno;
874
875 if (e)
876 {
877 *res = ent;
878 strcpy (ent->d_name, e->d_name);
879 }
880 else
881 *res = 0;
882
883 UNLOCK (readdirlock);
884
885 errno = errorno;
886 return e ? 0 : -1;
887 }
888 #endif
889
890 /* sendfile always needs emulation */
891 static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self)
892 {
893 ssize_t res;
894
895 if (!count)
896 return 0;
897
898 #if HAVE_SENDFILE
899 # if __linux
900 res = sendfile (ofd, ifd, &offset, count);
901
902 # elif __freebsd
903 /*
904 * Of course, the freebsd sendfile is a dire hack with no thoughts
905 * wasted on making it similar to other I/O functions.
906 */
907 {
908 off_t sbytes;
909 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0);
910
911 if (res < 0 && sbytes)
912 /* maybe only on EAGAIN: as usual, the manpage leaves you guessing */
913 res = sbytes;
914 }
915
916 # elif __hpux
917 res = sendfile (ofd, ifd, offset, count, 0, 0);
918
919 # elif __solaris
920 {
921 struct sendfilevec vec;
922 size_t sbytes;
923
924 vec.sfv_fd = ifd;
925 vec.sfv_flag = 0;
926 vec.sfv_off = offset;
927 vec.sfv_len = count;
928
929 res = sendfilev (ofd, &vec, 1, &sbytes);
930
931 if (res < 0 && sbytes)
932 res = sbytes;
933 }
934
935 # endif
936 #else
937 res = -1;
938 errno = ENOSYS;
939 #endif
940
941 if (res < 0
942 && (errno == ENOSYS || errno == EINVAL || errno == ENOTSOCK
943 #if __solaris
944 || errno == EAFNOSUPPORT || errno == EPROTOTYPE
945 #endif
946 )
947 )
948 {
949 /* emulate sendfile. this is a major pain in the ass */
950 dBUF;
951
952 res = 0;
953
954 while (count)
955 {
956 ssize_t cnt;
957
958 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
959
960 if (cnt <= 0)
961 {
962 if (cnt && !res) res = -1;
963 break;
964 }
965
966 cnt = write (ofd, aio_buf, cnt);
967
968 if (cnt <= 0)
969 {
970 if (cnt && !res) res = -1;
971 break;
972 }
973
974 offset += cnt;
975 res += cnt;
976 count -= cnt;
977 }
978 }
979
980 return res;
981 }
982
983 /* read a full directory */
984 static void scandir_ (aio_req req, worker *self)
985 {
986 DIR *dirp;
987 union
988 {
989 struct dirent d;
990 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1];
991 } *u;
992 struct dirent *entp;
993 char *name, *names;
994 int memlen = 4096;
995 int memofs = 0;
996 int res = 0;
997 int errorno;
998
999 LOCK (wrklock);
1000 self->dirp = dirp = opendir (req->ptr1);
1001 self->dbuf = u = malloc (sizeof (*u));
1002 req->ptr2 = names = malloc (memlen);
1003 UNLOCK (wrklock);
1004
1005 if (dirp && u && names)
1006 for (;;)
1007 {
1008 errno = 0;
1009 readdir_r (dirp, &u->d, &entp);
1010
1011 if (!entp)
1012 break;
1013
1014 name = entp->d_name;
1015
1016 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
1017 {
1018 int len = strlen (name) + 1;
1019
1020 res++;
1021
1022 while (memofs + len > memlen)
1023 {
1024 memlen *= 2;
1025 LOCK (wrklock);
1026 req->ptr2 = names = realloc (names, memlen);
1027 UNLOCK (wrklock);
1028
1029 if (!names)
1030 break;
1031 }
1032
1033 memcpy (names + memofs, name, len);
1034 memofs += len;
1035 }
1036 }
1037
1038 if (errno)
1039 res = -1;
1040
1041 req->result = res;
1042 }
1043
1044 /*****************************************************************************/
1045
1046 static void *aio_proc (void *thr_arg)
1047 {
1048 aio_req req;
1049 struct timespec ts;
1050 worker *self = (worker *)thr_arg;
1051
1052 /* try to distribute timeouts somewhat evenly */
1053 ts.tv_nsec = (((unsigned long)self + (unsigned long)ts.tv_sec) & 1023UL)
1054 * (1000000000UL / 1024UL);
1055
1056 for (;;)
1057 {
1058 ts.tv_sec = time (0) + IDLE_TIMEOUT;
1059
1060 LOCK (reqlock);
1061
1062 for (;;)
1063 {
1064 self->req = req = reqq_shift (&req_queue);
1065
1066 if (req)
1067 break;
1068
1069 ++idle;
1070
1071 if (pthread_cond_timedwait (&reqwait, &reqlock, &ts)
1072 == ETIMEDOUT)
1073 {
1074 if (idle > max_idle)
1075 {
1076 --idle;
1077 UNLOCK (reqlock);
1078 LOCK (wrklock);
1079 --started;
1080 UNLOCK (wrklock);
1081 goto quit;
1082 }
1083
1084 /* we are allowed to idle, so do so without any timeout */
1085 pthread_cond_wait (&reqwait, &reqlock);
1086 ts.tv_sec = time (0) + IDLE_TIMEOUT;
1087 }
1088
1089 --idle;
1090 }
1091
1092 --nready;
1093
1094 UNLOCK (reqlock);
1095
1096 errno = 0; /* strictly unnecessary */
1097
1098 if (!(req->flags & FLAG_CANCELLED))
1099 switch (req->type)
1100 {
1101 case REQ_READ: req->result = pread (req->int1, req->ptr1, req->size, req->offs); break;
1102 case REQ_WRITE: req->result = pwrite (req->int1, req->ptr1, req->size, req->offs); break;
1103
1104 case REQ_READAHEAD: req->result = readahead (req->int1, req->offs, req->size); break;
1105 case REQ_SENDFILE: req->result = sendfile_ (req->int1, req->int2, req->offs, req->size, self); break;
1106
1107 case REQ_STAT: req->result = stat (req->ptr1, req->statdata); break;
1108 case REQ_LSTAT: req->result = lstat (req->ptr1, req->statdata); break;
1109 case REQ_FSTAT: req->result = fstat (req->int1, req->statdata); break;
1110
1111 case REQ_OPEN: req->result = open (req->ptr1, req->int1, req->mode); break;
1112 case REQ_CLOSE: req->result = close (req->int1); break;
1113 case REQ_UNLINK: req->result = unlink (req->ptr1); break;
1114 case REQ_RMDIR: req->result = rmdir (req->ptr1); break;
1115 case REQ_RENAME: req->result = rename (req->ptr2, req->ptr1); break;
1116 case REQ_LINK: req->result = link (req->ptr2, req->ptr1); break;
1117 case REQ_SYMLINK: req->result = symlink (req->ptr2, req->ptr1); break;
1118 case REQ_MKNOD: req->result = mknod (req->ptr2, req->mode, (dev_t)req->offs); break;
1119 case REQ_READLINK: req->result = readlink (req->ptr2, req->ptr1, NAME_MAX); break;
1120
1121 case REQ_FDATASYNC: req->result = fdatasync (req->int1); break;
1122 case REQ_FSYNC: req->result = fsync (req->int1); break;
1123 case REQ_READDIR: scandir_ (req, self); break;
1124
1125 case REQ_BUSY:
1126 {
1127 struct timeval tv;
1128
1129 tv.tv_sec = req->int1;
1130 tv.tv_usec = req->int2;
1131
1132 req->result = select (0, 0, 0, 0, &tv);
1133 }
1134
1135 case REQ_GROUP:
1136 case REQ_NOP:
1137 break;
1138
1139 case REQ_QUIT:
1140 goto quit;
1141
1142 default:
1143 req->result = ENOSYS;
1144 break;
1145 }
1146
1147 req->errorno = errno;
1148
1149 LOCK (reslock);
1150
1151 ++npending;
1152
1153 if (!reqq_push (&res_queue, req))
1154 /* write a dummy byte to the pipe so fh becomes ready */
1155 write (respipe [1], &respipe, 1);
1156
1157 self->req = 0;
1158 worker_clear (self);
1159
1160 UNLOCK (reslock);
1161 }
1162
1163 quit:
1164 LOCK (wrklock);
1165 worker_free (self);
1166 UNLOCK (wrklock);
1167
1168 return 0;
1169 }
1170
1171 /*****************************************************************************/
1172
1173 static void atfork_prepare (void)
1174 {
1175 LOCK (wrklock);
1176 LOCK (reqlock);
1177 LOCK (reslock);
1178 #if !HAVE_PREADWRITE
1179 LOCK (preadwritelock);
1180 #endif
1181 #if !HAVE_READDIR_R
1182 LOCK (readdirlock);
1183 #endif
1184 }
1185
1186 static void atfork_parent (void)
1187 {
1188 #if !HAVE_READDIR_R
1189 UNLOCK (readdirlock);
1190 #endif
1191 #if !HAVE_PREADWRITE
1192 UNLOCK (preadwritelock);
1193 #endif
1194 UNLOCK (reslock);
1195 UNLOCK (reqlock);
1196 UNLOCK (wrklock);
1197 }
1198
1199 static void atfork_child (void)
1200 {
1201 aio_req prv;
1202
1203 while (prv = reqq_shift (&req_queue))
1204 req_free (prv);
1205
1206 while (prv = reqq_shift (&res_queue))
1207 req_free (prv);
1208
1209 while (wrk_first.next != &wrk_first)
1210 {
1211 worker *wrk = wrk_first.next;
1212
1213 if (wrk->req)
1214 req_free (wrk->req);
1215
1216 worker_clear (wrk);
1217 worker_free (wrk);
1218 }
1219
1220 started = 0;
1221 idle = 0;
1222 nreqs = 0;
1223 nready = 0;
1224 npending = 0;
1225
1226 close (respipe [0]);
1227 close (respipe [1]);
1228 create_pipe ();
1229
1230 atfork_parent ();
1231 }
1232
1233 #define dREQ \
1234 aio_req req; \
1235 int req_pri = next_pri; \
1236 next_pri = DEFAULT_PRI + PRI_BIAS; \
1237 \
1238 if (SvOK (callback) && !SvROK (callback)) \
1239 croak ("callback must be undef or of reference type"); \
1240 \
1241 Newz (0, req, 1, aio_cb); \
1242 if (!req) \
1243 croak ("out of memory during aio_req allocation"); \
1244 \
1245 req->callback = newSVsv (callback); \
1246 req->pri = req_pri
1247
1248 #define REQ_SEND \
1249 req_send (req); \
1250 \
1251 if (GIMME_V != G_VOID) \
1252 XPUSHs (req_sv (req, AIO_REQ_KLASS));
1253
1254 MODULE = IO::AIO PACKAGE = IO::AIO
1255
1256 PROTOTYPES: ENABLE
1257
1258 BOOT:
1259 {
1260 HV *stash = gv_stashpv ("IO::AIO", 1);
1261
1262 newCONSTSUB (stash, "EXDEV", newSViv (EXDEV));
1263 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY));
1264 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY));
1265 newCONSTSUB (stash, "O_CREAT", newSViv (O_CREAT));
1266 newCONSTSUB (stash, "O_TRUNC", newSViv (O_TRUNC));
1267 newCONSTSUB (stash, "S_IFIFO", newSViv (S_IFIFO));
1268
1269 create_pipe ();
1270 pthread_atfork (atfork_prepare, atfork_parent, atfork_child);
1271 }
1272
1273 void
1274 max_poll_reqs (int nreqs)
1275 PROTOTYPE: $
1276 CODE:
1277 max_poll_reqs = nreqs;
1278
1279 void
1280 max_poll_time (double nseconds)
1281 PROTOTYPE: $
1282 CODE:
1283 max_poll_time = nseconds * AIO_TICKS;
1284
1285 void
1286 min_parallel (int nthreads)
1287 PROTOTYPE: $
1288
1289 void
1290 max_parallel (int nthreads)
1291 PROTOTYPE: $
1292
1293 void
1294 max_idle (int nthreads)
1295 PROTOTYPE: $
1296 CODE:
1297 set_max_idle (nthreads);
1298
1299 int
1300 max_outstanding (int maxreqs)
1301 PROTOTYPE: $
1302 CODE:
1303 RETVAL = max_outstanding;
1304 max_outstanding = maxreqs;
1305 OUTPUT:
1306 RETVAL
1307
1308 void
1309 aio_open (pathname,flags,mode,callback=&PL_sv_undef)
1310 SV * pathname
1311 int flags
1312 int mode
1313 SV * callback
1314 PROTOTYPE: $$$;$
1315 PPCODE:
1316 {
1317 dREQ;
1318
1319 req->type = REQ_OPEN;
1320 req->sv1 = newSVsv (pathname);
1321 req->ptr1 = SvPVbyte_nolen (pathname);
1322 req->int1 = flags;
1323 req->mode = mode;
1324
1325 REQ_SEND;
1326 }
1327
1328 void
1329 aio_close (fh,callback=&PL_sv_undef)
1330 SV * fh
1331 SV * callback
1332 PROTOTYPE: $;$
1333 ALIAS:
1334 aio_close = REQ_CLOSE
1335 aio_fsync = REQ_FSYNC
1336 aio_fdatasync = REQ_FDATASYNC
1337 PPCODE:
1338 {
1339 dREQ;
1340
1341 req->type = ix;
1342 req->fh = newSVsv (fh);
1343 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh)));
1344
1345 REQ_SEND (req);
1346 }
1347
1348 void
1349 aio_read (fh,offset,length,data,dataoffset,callback=&PL_sv_undef)
1350 SV * fh
1351 UV offset
1352 UV length
1353 SV * data
1354 UV dataoffset
1355 SV * callback
1356 ALIAS:
1357 aio_read = REQ_READ
1358 aio_write = REQ_WRITE
1359 PROTOTYPE: $$$$$;$
1360 PPCODE:
1361 {
1362 STRLEN svlen;
1363 char *svptr = SvPVbyte (data, svlen);
1364
1365 SvUPGRADE (data, SVt_PV);
1366 SvPOK_on (data);
1367
1368 if (dataoffset < 0)
1369 dataoffset += svlen;
1370
1371 if (dataoffset < 0 || dataoffset > svlen)
1372 croak ("data offset outside of string");
1373
1374 if (ix == REQ_WRITE)
1375 {
1376 /* write: check length and adjust. */
1377 if (length < 0 || length + dataoffset > svlen)
1378 length = svlen - dataoffset;
1379 }
1380 else
1381 {
1382 /* read: grow scalar as necessary */
1383 svptr = SvGROW (data, length + dataoffset);
1384 }
1385
1386 if (length < 0)
1387 croak ("length must not be negative");
1388
1389 {
1390 dREQ;
1391
1392 req->type = ix;
1393 req->fh = newSVsv (fh);
1394 req->int1 = PerlIO_fileno (ix == REQ_READ ? IoIFP (sv_2io (fh))
1395 : IoOFP (sv_2io (fh)));
1396 req->offs = offset;
1397 req->size = length;
1398 req->sv1 = SvREFCNT_inc (data);
1399 req->ptr1 = (char *)svptr + dataoffset;
1400 req->stroffset = dataoffset;
1401
1402 if (!SvREADONLY (data))
1403 {
1404 SvREADONLY_on (data);
1405 req->flags |= FLAG_DATA_RO_OFF;
1406 }
1407
1408 REQ_SEND;
1409 }
1410 }
1411
1412 void
1413 aio_readlink (path,callback=&PL_sv_undef)
1414 SV * path
1415 SV * callback
1416 PROTOTYPE: $$;$
1417 PPCODE:
1418 {
1419 SV *data;
1420 dREQ;
1421
1422 data = newSV (NAME_MAX);
1423 SvPOK_on (data);
1424
1425 req->type = REQ_READLINK;
1426 req->fh = newSVsv (path);
1427 req->ptr2 = SvPVbyte_nolen (path);
1428 req->sv1 = data;
1429 req->ptr1 = SvPVbyte_nolen (data);
1430
1431 REQ_SEND;
1432 }
1433
1434 void
1435 aio_sendfile (out_fh,in_fh,in_offset,length,callback=&PL_sv_undef)
1436 SV * out_fh
1437 SV * in_fh
1438 UV in_offset
1439 UV length
1440 SV * callback
1441 PROTOTYPE: $$$$;$
1442 PPCODE:
1443 {
1444 dREQ;
1445
1446 req->type = REQ_SENDFILE;
1447 req->fh = newSVsv (out_fh);
1448 req->int1 = PerlIO_fileno (IoIFP (sv_2io (out_fh)));
1449 req->sv2 = newSVsv (in_fh);
1450 req->int2 = PerlIO_fileno (IoIFP (sv_2io (in_fh)));
1451 req->offs = in_offset;
1452 req->size = length;
1453
1454 REQ_SEND;
1455 }
1456
1457 void
1458 aio_readahead (fh,offset,length,callback=&PL_sv_undef)
1459 SV * fh
1460 UV offset
1461 IV length
1462 SV * callback
1463 PROTOTYPE: $$$;$
1464 PPCODE:
1465 {
1466 dREQ;
1467
1468 req->type = REQ_READAHEAD;
1469 req->fh = newSVsv (fh);
1470 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh)));
1471 req->offs = offset;
1472 req->size = length;
1473
1474 REQ_SEND;
1475 }
1476
1477 void
1478 aio_stat (fh_or_path,callback=&PL_sv_undef)
1479 SV * fh_or_path
1480 SV * callback
1481 ALIAS:
1482 aio_stat = REQ_STAT
1483 aio_lstat = REQ_LSTAT
1484 PPCODE:
1485 {
1486 dREQ;
1487
1488 New (0, req->statdata, 1, Stat_t);
1489 if (!req->statdata)
1490 {
1491 req_free (req);
1492 croak ("out of memory during aio_req->statdata allocation");
1493 }
1494
1495 if (SvPOK (fh_or_path))
1496 {
1497 req->type = ix;
1498 req->sv1 = newSVsv (fh_or_path);
1499 req->ptr1 = SvPVbyte_nolen (fh_or_path);
1500 }
1501 else
1502 {
1503 req->type = REQ_FSTAT;
1504 req->fh = newSVsv (fh_or_path);
1505 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh_or_path)));
1506 }
1507
1508 REQ_SEND;
1509 }
1510
1511 void
1512 aio_unlink (pathname,callback=&PL_sv_undef)
1513 SV * pathname
1514 SV * callback
1515 ALIAS:
1516 aio_unlink = REQ_UNLINK
1517 aio_rmdir = REQ_RMDIR
1518 aio_readdir = REQ_READDIR
1519 PPCODE:
1520 {
1521 dREQ;
1522
1523 req->type = ix;
1524 req->sv1 = newSVsv (pathname);
1525 req->ptr1 = SvPVbyte_nolen (pathname);
1526
1527 REQ_SEND;
1528 }
1529
1530 void
1531 aio_link (oldpath,newpath,callback=&PL_sv_undef)
1532 SV * oldpath
1533 SV * newpath
1534 SV * callback
1535 ALIAS:
1536 aio_link = REQ_LINK
1537 aio_symlink = REQ_SYMLINK
1538 aio_rename = REQ_RENAME
1539 PPCODE:
1540 {
1541 dREQ;
1542
1543 req->type = ix;
1544 req->fh = newSVsv (oldpath);
1545 req->ptr2 = SvPVbyte_nolen (req->fh);
1546 req->sv1 = newSVsv (newpath);
1547 req->ptr1 = SvPVbyte_nolen (newpath);
1548
1549 REQ_SEND;
1550 }
1551
1552 void
1553 aio_mknod (pathname,mode,dev,callback=&PL_sv_undef)
1554 SV * pathname
1555 SV * callback
1556 UV mode
1557 UV dev
1558 PPCODE:
1559 {
1560 dREQ;
1561
1562 req->type = REQ_MKNOD;
1563 req->sv1 = newSVsv (pathname);
1564 req->ptr1 = SvPVbyte_nolen (pathname);
1565 req->mode = (mode_t)mode;
1566 req->offs = dev;
1567
1568 REQ_SEND;
1569 }
1570
1571 void
1572 aio_busy (delay,callback=&PL_sv_undef)
1573 double delay
1574 SV * callback
1575 PPCODE:
1576 {
1577 dREQ;
1578
1579 req->type = REQ_BUSY;
1580 req->int1 = delay < 0. ? 0 : delay;
1581 req->int2 = delay < 0. ? 0 : 1000. * (delay - req->int1);
1582
1583 REQ_SEND;
1584 }
1585
1586 void
1587 aio_group (callback=&PL_sv_undef)
1588 SV * callback
1589 PROTOTYPE: ;$
1590 PPCODE:
1591 {
1592 dREQ;
1593
1594 req->type = REQ_GROUP;
1595
1596 req_send (req);
1597 XPUSHs (req_sv (req, AIO_GRP_KLASS));
1598 }
1599
1600 void
1601 aio_nop (callback=&PL_sv_undef)
1602 SV * callback
1603 PPCODE:
1604 {
1605 dREQ;
1606
1607 req->type = REQ_NOP;
1608
1609 REQ_SEND;
1610 }
1611
1612 int
1613 aioreq_pri (int pri = 0)
1614 PROTOTYPE: ;$
1615 CODE:
1616 RETVAL = next_pri - PRI_BIAS;
1617 if (items > 0)
1618 {
1619 if (pri < PRI_MIN) pri = PRI_MIN;
1620 if (pri > PRI_MAX) pri = PRI_MAX;
1621 next_pri = pri + PRI_BIAS;
1622 }
1623 OUTPUT:
1624 RETVAL
1625
1626 void
1627 aioreq_nice (int nice = 0)
1628 CODE:
1629 nice = next_pri - nice;
1630 if (nice < PRI_MIN) nice = PRI_MIN;
1631 if (nice > PRI_MAX) nice = PRI_MAX;
1632 next_pri = nice + PRI_BIAS;
1633
1634 void
1635 flush ()
1636 PROTOTYPE:
1637 CODE:
1638 while (nreqs)
1639 {
1640 poll_wait ();
1641 poll_cb (0);
1642 }
1643
1644 void
1645 poll()
1646 PROTOTYPE:
1647 CODE:
1648 if (nreqs)
1649 {
1650 poll_wait ();
1651 poll_cb (0);
1652 }
1653
1654 int
1655 poll_fileno()
1656 PROTOTYPE:
1657 CODE:
1658 RETVAL = respipe [0];
1659 OUTPUT:
1660 RETVAL
1661
1662 int
1663 poll_cb(...)
1664 PROTOTYPE:
1665 CODE:
1666 RETVAL = poll_cb ();
1667 OUTPUT:
1668 RETVAL
1669
1670 void
1671 poll_wait()
1672 PROTOTYPE:
1673 CODE:
1674 if (nreqs)
1675 poll_wait ();
1676
1677 int
1678 nreqs()
1679 PROTOTYPE:
1680 CODE:
1681 RETVAL = nreqs;
1682 OUTPUT:
1683 RETVAL
1684
1685 int
1686 nready()
1687 PROTOTYPE:
1688 CODE:
1689 RETVAL = get_nready ();
1690 OUTPUT:
1691 RETVAL
1692
1693 int
1694 npending()
1695 PROTOTYPE:
1696 CODE:
1697 RETVAL = get_npending ();
1698 OUTPUT:
1699 RETVAL
1700
1701 int
1702 nthreads()
1703 PROTOTYPE:
1704 CODE:
1705 if (WORDACCESS_UNSAFE) LOCK (wrklock);
1706 RETVAL = started;
1707 if (WORDACCESS_UNSAFE) UNLOCK (wrklock);
1708 OUTPUT:
1709 RETVAL
1710
1711 PROTOTYPES: DISABLE
1712
1713 MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1714
1715 void
1716 cancel (aio_req_ornot req)
1717 CODE:
1718 req_cancel (req);
1719
1720 void
1721 cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1722 CODE:
1723 SvREFCNT_dec (req->callback);
1724 req->callback = newSVsv (callback);
1725
1726 MODULE = IO::AIO PACKAGE = IO::AIO::GRP
1727
1728 void
1729 add (aio_req grp, ...)
1730 PPCODE:
1731 {
1732 int i;
1733 aio_req req;
1734
1735 if (grp->int1 == 2)
1736 croak ("cannot add requests to IO::AIO::GRP after the group finished");
1737
1738 for (i = 1; i < items; ++i )
1739 {
1740 if (GIMME_V != G_VOID)
1741 XPUSHs (sv_2mortal (newSVsv (ST (i))));
1742
1743 req = SvAIO_REQ (ST (i));
1744
1745 if (req)
1746 {
1747 ++grp->size;
1748 req->grp = grp;
1749
1750 req->grp_prev = 0;
1751 req->grp_next = grp->grp_first;
1752
1753 if (grp->grp_first)
1754 grp->grp_first->grp_prev = req;
1755
1756 grp->grp_first = req;
1757 }
1758 }
1759 }
1760
1761 void
1762 cancel_subs (aio_req_ornot req)
1763 CODE:
1764 req_cancel_subs (req);
1765
1766 void
1767 result (aio_req grp, ...)
1768 CODE:
1769 {
1770 int i;
1771 AV *av;
1772
1773 grp->errorno = errno;
1774
1775 av = newAV ();
1776
1777 for (i = 1; i < items; ++i )
1778 av_push (av, newSVsv (ST (i)));
1779
1780 SvREFCNT_dec (grp->sv1);
1781 grp->sv1 = (SV *)av;
1782 }
1783
1784 void
1785 errno (aio_req grp, int errorno = errno)
1786 CODE:
1787 grp->errorno = errorno;
1788
1789 void
1790 limit (aio_req grp, int limit)
1791 CODE:
1792 grp->int2 = limit;
1793 aio_grp_feed (grp);
1794
1795 void
1796 feed (aio_req grp, SV *callback=&PL_sv_undef)
1797 CODE:
1798 {
1799 SvREFCNT_dec (grp->sv2);
1800 grp->sv2 = newSVsv (callback);
1801
1802 if (grp->int2 <= 0)
1803 grp->int2 = 2;
1804
1805 aio_grp_feed (grp);
1806 }
1807