ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
Revision: 1.98
Committed: Wed May 9 06:45:12 2007 UTC (17 years ago) by root
Branch: MAIN
Changes since 1.97: +15 -73 lines
Log Message:
*** empty log message ***

File Contents

# Content
1 #include "xthread.h"
2
3 #include <errno.h>
4
5 #include "EXTERN.h"
6 #include "perl.h"
7 #include "XSUB.h"
8
9 #include "autoconf/config.h"
10
11 #include <stddef.h>
12 #include <stdlib.h>
13 #include <errno.h>
14 #include <sys/time.h>
15 #include <sys/select.h>
16 #include <sys/types.h>
17 #include <sys/stat.h>
18 #include <limits.h>
19 #include <unistd.h>
20 #include <fcntl.h>
21 #include <signal.h>
22 #include <sched.h>
23
24 #if HAVE_SENDFILE
25 # if __linux
26 # include <sys/sendfile.h>
27 # elif __freebsd
28 # include <sys/socket.h>
29 # include <sys/uio.h>
30 # elif __hpux
31 # include <sys/socket.h>
32 # elif __solaris /* not yet */
33 # include <sys/sendfile.h>
34 # else
35 # error sendfile support requested but not available
36 # endif
37 #endif
38
39 /* number of seconds after which idle threads exit */
40 #define IDLE_TIMEOUT 10
41
42 /* used for struct dirent, AIX doesn't provide it */
43 #ifndef NAME_MAX
44 # define NAME_MAX 4096
45 #endif
46
47 /* buffer size for various temporary buffers */
48 #define AIO_BUFSIZE 65536
49
50 #define dBUF \
51 char *aio_buf; \
52 LOCK (wrklock); \
53 self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \
54 UNLOCK (wrklock); \
55 if (!aio_buf) \
56 return -1;
57
58 typedef SV SV8; /* byte-sv, used for argument-checking */
59
60 enum {
61 REQ_QUIT,
62 REQ_OPEN, REQ_CLOSE,
63 REQ_READ, REQ_WRITE, REQ_READAHEAD,
64 REQ_SENDFILE,
65 REQ_STAT, REQ_LSTAT, REQ_FSTAT,
66 REQ_FSYNC, REQ_FDATASYNC,
67 REQ_UNLINK, REQ_RMDIR, REQ_MKDIR, REQ_RENAME,
68 REQ_MKNOD, REQ_READDIR,
69 REQ_LINK, REQ_SYMLINK, REQ_READLINK,
70 REQ_GROUP, REQ_NOP,
71 REQ_BUSY,
72 };
73
74 #define AIO_REQ_KLASS "IO::AIO::REQ"
75 #define AIO_GRP_KLASS "IO::AIO::GRP"
76
77 typedef struct aio_cb
78 {
79 struct aio_cb *volatile next;
80
81 SV *callback, *fh;
82 SV *sv1, *sv2;
83 void *ptr1, *ptr2;
84 off_t offs;
85 size_t size;
86 ssize_t result;
87
88 STRLEN stroffset;
89 int type;
90 int int1, int2;
91 int errorno;
92 mode_t mode; /* open */
93
94 unsigned char flags;
95 unsigned char pri;
96
97 SV *self; /* the perl counterpart of this request, if any */
98 struct aio_cb *grp, *grp_prev, *grp_next, *grp_first;
99 } aio_cb;
100
101 enum {
102 FLAG_CANCELLED = 0x01, /* request was cancelled */
103 FLAG_SV1_RO_OFF = 0x40, /* data was set readonly */
104 FLAG_PTR2_FREE = 0x80, /* need to free(ptr2) */
105 };
106
107 typedef aio_cb *aio_req;
108 typedef aio_cb *aio_req_ornot;
109
110 enum {
111 PRI_MIN = -4,
112 PRI_MAX = 4,
113
114 DEFAULT_PRI = 0,
115 PRI_BIAS = -PRI_MIN,
116 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
117 };
118
119 #define AIO_TICKS ((1000000 + 1023) >> 10)
120
121 static unsigned int max_poll_time = 0;
122 static unsigned int max_poll_reqs = 0;
123
124 /* calculcate time difference in ~1/AIO_TICKS of a second */
125 static int tvdiff (struct timeval *tv1, struct timeval *tv2)
126 {
127 return (tv2->tv_sec - tv1->tv_sec ) * AIO_TICKS
128 + ((tv2->tv_usec - tv1->tv_usec) >> 10);
129 }
130
131 static thread_t main_tid;
132 static int main_sig;
133 static int block_sig_level;
134
135 void block_sig ()
136 {
137 sigset_t ss;
138
139 if (block_sig_level++)
140 return;
141
142 if (!main_sig)
143 return;
144
145 sigemptyset (&ss);
146 sigaddset (&ss, main_sig);
147 pthread_sigmask (SIG_BLOCK, &ss, 0);
148 }
149
150 void unblock_sig ()
151 {
152 sigset_t ss;
153
154 if (--block_sig_level)
155 return;
156
157 if (!main_sig)
158 return;
159
160 sigemptyset (&ss);
161 sigaddset (&ss, main_sig);
162 pthread_sigmask (SIG_UNBLOCK, &ss, 0);
163 }
164
165 static int next_pri = DEFAULT_PRI + PRI_BIAS;
166
167 static unsigned int started, idle, wanted;
168
169 /* worker threads management */
170 static mutex_t wrklock = MUTEX_INIT;
171
172 typedef struct worker {
173 /* locked by wrklock */
174 struct worker *prev, *next;
175
176 thread_t tid;
177
178 /* locked by reslock, reqlock or wrklock */
179 aio_req req; /* currently processed request */
180 void *dbuf;
181 DIR *dirp;
182 } worker;
183
184 static worker wrk_first = { &wrk_first, &wrk_first, 0 };
185
186 static void worker_clear (worker *wrk)
187 {
188 if (wrk->dirp)
189 {
190 closedir (wrk->dirp);
191 wrk->dirp = 0;
192 }
193
194 if (wrk->dbuf)
195 {
196 free (wrk->dbuf);
197 wrk->dbuf = 0;
198 }
199 }
200
201 static void worker_free (worker *wrk)
202 {
203 wrk->next->prev = wrk->prev;
204 wrk->prev->next = wrk->next;
205
206 free (wrk);
207 }
208
209 static volatile unsigned int nreqs, nready, npending;
210 static volatile unsigned int max_idle = 4;
211 static volatile unsigned int max_outstanding = 0xffffffff;
212 static int respipe [2];
213
214 static mutex_t reslock = MUTEX_INIT;
215 static mutex_t reqlock = MUTEX_INIT;
216 static cond_t reqwait = COND_INIT;
217
218 #if WORDACCESS_UNSAFE
219
220 static unsigned int get_nready ()
221 {
222 unsigned int retval;
223
224 LOCK (reqlock);
225 retval = nready;
226 UNLOCK (reqlock);
227
228 return retval;
229 }
230
231 static unsigned int get_npending ()
232 {
233 unsigned int retval;
234
235 LOCK (reslock);
236 retval = npending;
237 UNLOCK (reslock);
238
239 return retval;
240 }
241
242 static unsigned int get_nthreads ()
243 {
244 unsigned int retval;
245
246 LOCK (wrklock);
247 retval = started;
248 UNLOCK (wrklock);
249
250 return retval;
251 }
252
253 #else
254
255 # define get_nready() nready
256 # define get_npending() npending
257 # define get_nthreads() started
258
259 #endif
260
261 /*
262 * a somewhat faster data structure might be nice, but
263 * with 8 priorities this actually needs <20 insns
264 * per shift, the most expensive operation.
265 */
266 typedef struct {
267 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
268 int size;
269 } reqq;
270
271 static reqq req_queue;
272 static reqq res_queue;
273
274 int reqq_push (reqq *q, aio_req req)
275 {
276 int pri = req->pri;
277 req->next = 0;
278
279 if (q->qe[pri])
280 {
281 q->qe[pri]->next = req;
282 q->qe[pri] = req;
283 }
284 else
285 q->qe[pri] = q->qs[pri] = req;
286
287 return q->size++;
288 }
289
290 aio_req reqq_shift (reqq *q)
291 {
292 int pri;
293
294 if (!q->size)
295 return 0;
296
297 --q->size;
298
299 for (pri = NUM_PRI; pri--; )
300 {
301 aio_req req = q->qs[pri];
302
303 if (req)
304 {
305 if (!(q->qs[pri] = req->next))
306 q->qe[pri] = 0;
307
308 return req;
309 }
310 }
311
312 abort ();
313 }
314
315 static int poll_cb ();
316 static int req_invoke (aio_req req);
317 static void req_free (aio_req req);
318 static void req_cancel (aio_req req);
319
320 /* must be called at most once */
321 static SV *req_sv (aio_req req, const char *klass)
322 {
323 if (!req->self)
324 {
325 req->self = (SV *)newHV ();
326 sv_magic (req->self, 0, PERL_MAGIC_ext, (char *)req, 0);
327 }
328
329 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1)));
330 }
331
332 static aio_req SvAIO_REQ (SV *sv)
333 {
334 MAGIC *mg;
335
336 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv))
337 croak ("object of class " AIO_REQ_KLASS " expected");
338
339 mg = mg_find (SvRV (sv), PERL_MAGIC_ext);
340
341 return mg ? (aio_req)mg->mg_ptr : 0;
342 }
343
344 static void aio_grp_feed (aio_req grp)
345 {
346 block_sig ();
347
348 while (grp->size < grp->int2 && !(grp->flags & FLAG_CANCELLED))
349 {
350 int old_len = grp->size;
351
352 if (grp->sv2 && SvOK (grp->sv2))
353 {
354 dSP;
355
356 ENTER;
357 SAVETMPS;
358 PUSHMARK (SP);
359 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
360 PUTBACK;
361 call_sv (grp->sv2, G_VOID | G_EVAL | G_KEEPERR);
362 SPAGAIN;
363 FREETMPS;
364 LEAVE;
365 }
366
367 /* stop if no progress has been made */
368 if (old_len == grp->size)
369 {
370 SvREFCNT_dec (grp->sv2);
371 grp->sv2 = 0;
372 break;
373 }
374 }
375
376 unblock_sig ();
377 }
378
379 static void aio_grp_dec (aio_req grp)
380 {
381 --grp->size;
382
383 /* call feeder, if applicable */
384 aio_grp_feed (grp);
385
386 /* finish, if done */
387 if (!grp->size && grp->int1)
388 {
389 block_sig ();
390
391 if (!req_invoke (grp))
392 {
393 req_free (grp);
394 unblock_sig ();
395 croak (0);
396 }
397
398 req_free (grp);
399 unblock_sig ();
400 }
401 }
402
403 static int req_invoke (aio_req req)
404 {
405 dSP;
406
407 if (req->flags & FLAG_SV1_RO_OFF)
408 SvREADONLY_off (req->sv1);
409
410 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
411 {
412 ENTER;
413 SAVETMPS;
414 PUSHMARK (SP);
415 EXTEND (SP, 1);
416
417 switch (req->type)
418 {
419 case REQ_READDIR:
420 {
421 SV *rv = &PL_sv_undef;
422
423 if (req->result >= 0)
424 {
425 int i;
426 char *buf = req->ptr2;
427 AV *av = newAV ();
428
429 av_extend (av, req->result - 1);
430
431 for (i = 0; i < req->result; ++i)
432 {
433 SV *sv = newSVpv (buf, 0);
434
435 av_store (av, i, sv);
436 buf += SvCUR (sv) + 1;
437 }
438
439 rv = sv_2mortal (newRV_noinc ((SV *)av));
440 }
441
442 PUSHs (rv);
443 }
444 break;
445
446 case REQ_OPEN:
447 {
448 /* convert fd to fh */
449 SV *fh;
450
451 PUSHs (sv_2mortal (newSViv (req->result)));
452 PUTBACK;
453 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
454 SPAGAIN;
455
456 fh = POPs;
457 PUSHMARK (SP);
458 XPUSHs (fh);
459 }
460 break;
461
462 case REQ_GROUP:
463 req->int1 = 2; /* mark group as finished */
464
465 if (req->sv1)
466 {
467 int i;
468 AV *av = (AV *)req->sv1;
469
470 EXTEND (SP, AvFILL (av) + 1);
471 for (i = 0; i <= AvFILL (av); ++i)
472 PUSHs (*av_fetch (av, i, 0));
473 }
474 break;
475
476 case REQ_NOP:
477 case REQ_BUSY:
478 break;
479
480 case REQ_READLINK:
481 if (req->result > 0)
482 {
483 SvCUR_set (req->sv1, req->result);
484 *SvEND (req->sv1) = 0;
485 PUSHs (req->sv1);
486 }
487 break;
488
489 case REQ_STAT:
490 case REQ_LSTAT:
491 case REQ_FSTAT:
492 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
493 PL_laststatval = req->result;
494 PL_statcache = *(Stat_t *)(req->ptr2);
495 PUSHs (sv_2mortal (newSViv (req->result)));
496 break;
497
498 case REQ_READ:
499 SvCUR_set (req->sv1, req->stroffset + (req->result > 0 ? req->result : 0));
500 *SvEND (req->sv1) = 0;
501 PUSHs (sv_2mortal (newSViv (req->result)));
502 break;
503
504 default:
505 PUSHs (sv_2mortal (newSViv (req->result)));
506 break;
507 }
508
509 errno = req->errorno;
510
511 PUTBACK;
512 call_sv (req->callback, G_VOID | G_EVAL);
513 SPAGAIN;
514
515 FREETMPS;
516 LEAVE;
517 }
518
519 if (req->grp)
520 {
521 aio_req grp = req->grp;
522
523 /* unlink request */
524 if (req->grp_next) req->grp_next->grp_prev = req->grp_prev;
525 if (req->grp_prev) req->grp_prev->grp_next = req->grp_next;
526
527 if (grp->grp_first == req)
528 grp->grp_first = req->grp_next;
529
530 aio_grp_dec (grp);
531 }
532
533 return !SvTRUE (ERRSV);
534 }
535
536 static void req_free (aio_req req)
537 {
538 if (req->self)
539 {
540 sv_unmagic (req->self, PERL_MAGIC_ext);
541 SvREFCNT_dec (req->self);
542 }
543
544 SvREFCNT_dec (req->fh);
545 SvREFCNT_dec (req->sv1);
546 SvREFCNT_dec (req->sv2);
547 SvREFCNT_dec (req->callback);
548
549 if (req->flags & FLAG_PTR2_FREE)
550 free (req->ptr2);
551
552 Safefree (req);
553 }
554
555 static void req_cancel_subs (aio_req grp)
556 {
557 aio_req sub;
558
559 if (grp->type != REQ_GROUP)
560 return;
561
562 SvREFCNT_dec (grp->sv2);
563 grp->sv2 = 0;
564
565 for (sub = grp->grp_first; sub; sub = sub->grp_next)
566 req_cancel (sub);
567 }
568
569 static void req_cancel (aio_req req)
570 {
571 req->flags |= FLAG_CANCELLED;
572
573 req_cancel_subs (req);
574 }
575
576 static void *aio_proc(void *arg);
577
578 static void start_thread (void)
579 {
580 worker *wrk = calloc (1, sizeof (worker));
581
582 if (!wrk)
583 croak ("unable to allocate worker thread data");
584
585 LOCK (wrklock);
586
587 if (thread_create (&wrk->tid, aio_proc, (void *)wrk))
588 {
589 wrk->prev = &wrk_first;
590 wrk->next = wrk_first.next;
591 wrk_first.next->prev = wrk;
592 wrk_first.next = wrk;
593 ++started;
594 }
595 else
596 free (wrk);
597
598 UNLOCK (wrklock);
599 }
600
601 static void maybe_start_thread ()
602 {
603 if (get_nthreads () >= wanted)
604 return;
605
606 /* todo: maybe use idle here, but might be less exact */
607 if (0 <= (int)get_nthreads () + (int)get_npending () - (int)nreqs)
608 return;
609
610 start_thread ();
611 }
612
613 static void req_send (aio_req req)
614 {
615 block_sig ();
616
617 ++nreqs;
618
619 LOCK (reqlock);
620 ++nready;
621 reqq_push (&req_queue, req);
622 COND_SIGNAL (reqwait);
623 UNLOCK (reqlock);
624
625 unblock_sig ();
626
627 maybe_start_thread ();
628 }
629
630 static void end_thread (void)
631 {
632 aio_req req;
633
634 Newz (0, req, 1, aio_cb);
635
636 req->type = REQ_QUIT;
637 req->pri = PRI_MAX + PRI_BIAS;
638
639 LOCK (reqlock);
640 reqq_push (&req_queue, req);
641 COND_SIGNAL (reqwait);
642 UNLOCK (reqlock);
643
644 LOCK (wrklock);
645 --started;
646 UNLOCK (wrklock);
647 }
648
649 static void set_max_idle (int nthreads)
650 {
651 if (WORDACCESS_UNSAFE) LOCK (reqlock);
652 max_idle = nthreads <= 0 ? 1 : nthreads;
653 if (WORDACCESS_UNSAFE) UNLOCK (reqlock);
654 }
655
656 static void min_parallel (int nthreads)
657 {
658 if (wanted < nthreads)
659 wanted = nthreads;
660 }
661
662 static void max_parallel (int nthreads)
663 {
664 if (wanted > nthreads)
665 wanted = nthreads;
666
667 while (started > wanted)
668 end_thread ();
669 }
670
671 static void poll_wait ()
672 {
673 fd_set rfd;
674
675 while (nreqs)
676 {
677 int size;
678 if (WORDACCESS_UNSAFE) LOCK (reslock);
679 size = res_queue.size;
680 if (WORDACCESS_UNSAFE) UNLOCK (reslock);
681
682 if (size)
683 return;
684
685 maybe_start_thread ();
686
687 FD_ZERO(&rfd);
688 FD_SET(respipe [0], &rfd);
689
690 select (respipe [0] + 1, &rfd, 0, 0, 0);
691 }
692 }
693
694 static int poll_cb ()
695 {
696 dSP;
697 int count = 0;
698 int maxreqs = max_poll_reqs;
699 int do_croak = 0;
700 struct timeval tv_start, tv_now;
701 aio_req req;
702
703 if (max_poll_time)
704 gettimeofday (&tv_start, 0);
705
706 block_sig ();
707
708 for (;;)
709 {
710 for (;;)
711 {
712 maybe_start_thread ();
713
714 LOCK (reslock);
715 req = reqq_shift (&res_queue);
716
717 if (req)
718 {
719 --npending;
720
721 if (!res_queue.size)
722 {
723 /* read any signals sent by the worker threads */
724 char buf [4];
725 while (read (respipe [0], buf, 4) == 4)
726 ;
727 }
728 }
729
730 UNLOCK (reslock);
731
732 if (!req)
733 break;
734
735 --nreqs;
736
737 if (req->type == REQ_GROUP && req->size)
738 {
739 req->int1 = 1; /* mark request as delayed */
740 continue;
741 }
742 else
743 {
744 if (!req_invoke (req))
745 {
746 req_free (req);
747 unblock_sig ();
748 croak (0);
749 }
750
751 count++;
752 }
753
754 req_free (req);
755
756 if (maxreqs && !--maxreqs)
757 break;
758
759 if (max_poll_time)
760 {
761 gettimeofday (&tv_now, 0);
762
763 if (tvdiff (&tv_start, &tv_now) >= max_poll_time)
764 break;
765 }
766 }
767
768 if (nreqs <= max_outstanding)
769 break;
770
771 poll_wait ();
772
773 ++maxreqs;
774 }
775
776 unblock_sig ();
777 return count;
778 }
779
780 static void create_pipe ()
781 {
782 if (pipe (respipe))
783 croak ("unable to initialize result pipe");
784
785 if (fcntl (respipe [0], F_SETFL, O_NONBLOCK))
786 croak ("cannot set result pipe to nonblocking mode");
787
788 if (fcntl (respipe [1], F_SETFL, O_NONBLOCK))
789 croak ("cannot set result pipe to nonblocking mode");
790 }
791
792 /*****************************************************************************/
793 /* work around various missing functions */
794
795 #if !HAVE_PREADWRITE
796 # define pread aio_pread
797 # define pwrite aio_pwrite
798
799 /*
800 * make our pread/pwrite safe against themselves, but not against
801 * normal read/write by using a mutex. slows down execution a lot,
802 * but that's your problem, not mine.
803 */
804 static mutex_t preadwritelock = MUTEX_INIT;
805
806 static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
807 {
808 ssize_t res;
809 off_t ooffset;
810
811 LOCK (preadwritelock);
812 ooffset = lseek (fd, 0, SEEK_CUR);
813 lseek (fd, offset, SEEK_SET);
814 res = read (fd, buf, count);
815 lseek (fd, ooffset, SEEK_SET);
816 UNLOCK (preadwritelock);
817
818 return res;
819 }
820
821 static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
822 {
823 ssize_t res;
824 off_t ooffset;
825
826 LOCK (preadwritelock);
827 ooffset = lseek (fd, 0, SEEK_CUR);
828 lseek (fd, offset, SEEK_SET);
829 res = write (fd, buf, count);
830 lseek (fd, offset, SEEK_SET);
831 UNLOCK (preadwritelock);
832
833 return res;
834 }
835 #endif
836
837 #if !HAVE_FDATASYNC
838 # define fdatasync fsync
839 #endif
840
841 #if !HAVE_READAHEAD
842 # define readahead(fd,offset,count) aio_readahead (fd, offset, count, self)
843
844 static ssize_t aio_readahead (int fd, off_t offset, size_t count, worker *self)
845 {
846 dBUF;
847
848 while (count > 0)
849 {
850 size_t len = count < AIO_BUFSIZE ? count : AIO_BUFSIZE;
851
852 pread (fd, aio_buf, len, offset);
853 offset += len;
854 count -= len;
855 }
856
857 errno = 0;
858 }
859
860 #endif
861
862 #if !HAVE_READDIR_R
863 # define readdir_r aio_readdir_r
864
865 static mutex_t readdirlock = MUTEX_INIT;
866
867 static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res)
868 {
869 struct dirent *e;
870 int errorno;
871
872 LOCK (readdirlock);
873
874 e = readdir (dirp);
875 errorno = errno;
876
877 if (e)
878 {
879 *res = ent;
880 strcpy (ent->d_name, e->d_name);
881 }
882 else
883 *res = 0;
884
885 UNLOCK (readdirlock);
886
887 errno = errorno;
888 return e ? 0 : -1;
889 }
890 #endif
891
892 /* sendfile always needs emulation */
893 static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self)
894 {
895 ssize_t res;
896
897 if (!count)
898 return 0;
899
900 #if HAVE_SENDFILE
901 # if __linux
902 res = sendfile (ofd, ifd, &offset, count);
903
904 # elif __freebsd
905 /*
906 * Of course, the freebsd sendfile is a dire hack with no thoughts
907 * wasted on making it similar to other I/O functions.
908 */
909 {
910 off_t sbytes;
911 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0);
912
913 if (res < 0 && sbytes)
914 /* maybe only on EAGAIN: as usual, the manpage leaves you guessing */
915 res = sbytes;
916 }
917
918 # elif __hpux
919 res = sendfile (ofd, ifd, offset, count, 0, 0);
920
921 # elif __solaris
922 {
923 struct sendfilevec vec;
924 size_t sbytes;
925
926 vec.sfv_fd = ifd;
927 vec.sfv_flag = 0;
928 vec.sfv_off = offset;
929 vec.sfv_len = count;
930
931 res = sendfilev (ofd, &vec, 1, &sbytes);
932
933 if (res < 0 && sbytes)
934 res = sbytes;
935 }
936
937 # endif
938 #else
939 res = -1;
940 errno = ENOSYS;
941 #endif
942
943 if (res < 0
944 && (errno == ENOSYS || errno == EINVAL || errno == ENOTSOCK
945 #if __solaris
946 || errno == EAFNOSUPPORT || errno == EPROTOTYPE
947 #endif
948 )
949 )
950 {
951 /* emulate sendfile. this is a major pain in the ass */
952 dBUF;
953
954 res = 0;
955
956 while (count)
957 {
958 ssize_t cnt;
959
960 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
961
962 if (cnt <= 0)
963 {
964 if (cnt && !res) res = -1;
965 break;
966 }
967
968 cnt = write (ofd, aio_buf, cnt);
969
970 if (cnt <= 0)
971 {
972 if (cnt && !res) res = -1;
973 break;
974 }
975
976 offset += cnt;
977 res += cnt;
978 count -= cnt;
979 }
980 }
981
982 return res;
983 }
984
985 /* read a full directory */
986 static void scandir_ (aio_req req, worker *self)
987 {
988 DIR *dirp;
989 union
990 {
991 struct dirent d;
992 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1];
993 } *u;
994 struct dirent *entp;
995 char *name, *names;
996 int memlen = 4096;
997 int memofs = 0;
998 int res = 0;
999 int errorno;
1000
1001 LOCK (wrklock);
1002 self->dirp = dirp = opendir (req->ptr1);
1003 self->dbuf = u = malloc (sizeof (*u));
1004 req->flags |= FLAG_PTR2_FREE;
1005 req->ptr2 = names = malloc (memlen);
1006 UNLOCK (wrklock);
1007
1008 if (dirp && u && names)
1009 for (;;)
1010 {
1011 errno = 0;
1012 readdir_r (dirp, &u->d, &entp);
1013
1014 if (!entp)
1015 break;
1016
1017 name = entp->d_name;
1018
1019 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
1020 {
1021 int len = strlen (name) + 1;
1022
1023 res++;
1024
1025 while (memofs + len > memlen)
1026 {
1027 memlen *= 2;
1028 LOCK (wrklock);
1029 req->ptr2 = names = realloc (names, memlen);
1030 UNLOCK (wrklock);
1031
1032 if (!names)
1033 break;
1034 }
1035
1036 memcpy (names + memofs, name, len);
1037 memofs += len;
1038 }
1039 }
1040
1041 if (errno)
1042 res = -1;
1043
1044 req->result = res;
1045 }
1046
1047 /*****************************************************************************/
1048
1049 static void *aio_proc (void *thr_arg)
1050 {
1051 aio_req req;
1052 struct timespec ts;
1053 worker *self = (worker *)thr_arg;
1054
1055 /* try to distribute timeouts somewhat evenly */
1056 ts.tv_nsec = (((unsigned long)self + (unsigned long)ts.tv_sec) & 1023UL)
1057 * (1000000000UL / 1024UL);
1058
1059 for (;;)
1060 {
1061 ts.tv_sec = time (0) + IDLE_TIMEOUT;
1062
1063 LOCK (reqlock);
1064
1065 for (;;)
1066 {
1067 self->req = req = reqq_shift (&req_queue);
1068
1069 if (req)
1070 break;
1071
1072 ++idle;
1073
1074 if (COND_TIMEDWAIT (reqwait, reqlock, ts)
1075 == ETIMEDOUT)
1076 {
1077 if (idle > max_idle)
1078 {
1079 --idle;
1080 UNLOCK (reqlock);
1081 LOCK (wrklock);
1082 --started;
1083 UNLOCK (wrklock);
1084 goto quit;
1085 }
1086
1087 /* we are allowed to idle, so do so without any timeout */
1088 COND_WAIT (reqwait, reqlock);
1089 ts.tv_sec = time (0) + IDLE_TIMEOUT;
1090 }
1091
1092 --idle;
1093 }
1094
1095 --nready;
1096
1097 UNLOCK (reqlock);
1098
1099 errno = 0; /* strictly unnecessary */
1100
1101 if (!(req->flags & FLAG_CANCELLED))
1102 switch (req->type)
1103 {
1104 case REQ_READ: req->result = pread (req->int1, req->ptr1, req->size, req->offs); break;
1105 case REQ_WRITE: req->result = pwrite (req->int1, req->ptr1, req->size, req->offs); break;
1106
1107 case REQ_READAHEAD: req->result = readahead (req->int1, req->offs, req->size); break;
1108 case REQ_SENDFILE: req->result = sendfile_ (req->int1, req->int2, req->offs, req->size, self); break;
1109
1110 case REQ_STAT: req->result = stat (req->ptr1, (Stat_t *)req->ptr2); break;
1111 case REQ_LSTAT: req->result = lstat (req->ptr1, (Stat_t *)req->ptr2); break;
1112 case REQ_FSTAT: req->result = fstat (req->int1, (Stat_t *)req->ptr2); break;
1113
1114 case REQ_OPEN: req->result = open (req->ptr1, req->int1, req->mode); break;
1115 case REQ_CLOSE: req->result = close (req->int1); break;
1116 case REQ_UNLINK: req->result = unlink (req->ptr1); break;
1117 case REQ_RMDIR: req->result = rmdir (req->ptr1); break;
1118 case REQ_MKDIR: req->result = mkdir (req->ptr1, req->mode); break;
1119 case REQ_RENAME: req->result = rename (req->ptr2, req->ptr1); break;
1120 case REQ_LINK: req->result = link (req->ptr2, req->ptr1); break;
1121 case REQ_SYMLINK: req->result = symlink (req->ptr2, req->ptr1); break;
1122 case REQ_MKNOD: req->result = mknod (req->ptr2, req->mode, (dev_t)req->offs); break;
1123 case REQ_READLINK: req->result = readlink (req->ptr2, req->ptr1, NAME_MAX); break;
1124
1125 case REQ_FDATASYNC: req->result = fdatasync (req->int1); break;
1126 case REQ_FSYNC: req->result = fsync (req->int1); break;
1127 case REQ_READDIR: scandir_ (req, self); break;
1128
1129 case REQ_BUSY:
1130 {
1131 struct timeval tv;
1132
1133 tv.tv_sec = req->int1;
1134 tv.tv_usec = req->int2;
1135
1136 req->result = select (0, 0, 0, 0, &tv);
1137 }
1138
1139 case REQ_GROUP:
1140 case REQ_NOP:
1141 break;
1142
1143 case REQ_QUIT:
1144 goto quit;
1145
1146 default:
1147 req->result = ENOSYS;
1148 break;
1149 }
1150
1151 req->errorno = errno;
1152
1153 LOCK (reslock);
1154
1155 ++npending;
1156
1157 if (!reqq_push (&res_queue, req))
1158 {
1159 /* write a dummy byte to the pipe so fh becomes ready */
1160 write (respipe [1], &respipe, 1);
1161
1162 /* optionally signal the main thread asynchronously */
1163 if (main_sig)
1164 pthread_kill (main_tid, main_sig);
1165 }
1166
1167 self->req = 0;
1168 worker_clear (self);
1169
1170 UNLOCK (reslock);
1171 }
1172
1173 quit:
1174 LOCK (wrklock);
1175 worker_free (self);
1176 UNLOCK (wrklock);
1177
1178 return 0;
1179 }
1180
1181 /*****************************************************************************/
1182
1183 static void atfork_prepare (void)
1184 {
1185 LOCK (wrklock);
1186 LOCK (reqlock);
1187 LOCK (reslock);
1188 #if !HAVE_PREADWRITE
1189 LOCK (preadwritelock);
1190 #endif
1191 #if !HAVE_READDIR_R
1192 LOCK (readdirlock);
1193 #endif
1194 }
1195
1196 static void atfork_parent (void)
1197 {
1198 #if !HAVE_READDIR_R
1199 UNLOCK (readdirlock);
1200 #endif
1201 #if !HAVE_PREADWRITE
1202 UNLOCK (preadwritelock);
1203 #endif
1204 UNLOCK (reslock);
1205 UNLOCK (reqlock);
1206 UNLOCK (wrklock);
1207 }
1208
1209 static void atfork_child (void)
1210 {
1211 aio_req prv;
1212
1213 while (prv = reqq_shift (&req_queue))
1214 req_free (prv);
1215
1216 while (prv = reqq_shift (&res_queue))
1217 req_free (prv);
1218
1219 while (wrk_first.next != &wrk_first)
1220 {
1221 worker *wrk = wrk_first.next;
1222
1223 if (wrk->req)
1224 req_free (wrk->req);
1225
1226 worker_clear (wrk);
1227 worker_free (wrk);
1228 }
1229
1230 started = 0;
1231 idle = 0;
1232 nreqs = 0;
1233 nready = 0;
1234 npending = 0;
1235
1236 close (respipe [0]);
1237 close (respipe [1]);
1238 create_pipe ();
1239
1240 atfork_parent ();
1241 }
1242
1243 #define dREQ \
1244 aio_req req; \
1245 int req_pri = next_pri; \
1246 next_pri = DEFAULT_PRI + PRI_BIAS; \
1247 \
1248 if (SvOK (callback) && !SvROK (callback)) \
1249 croak ("callback must be undef or of reference type"); \
1250 \
1251 Newz (0, req, 1, aio_cb); \
1252 if (!req) \
1253 croak ("out of memory during aio_req allocation"); \
1254 \
1255 req->callback = newSVsv (callback); \
1256 req->pri = req_pri
1257
1258 #define REQ_SEND \
1259 req_send (req); \
1260 \
1261 if (GIMME_V != G_VOID) \
1262 XPUSHs (req_sv (req, AIO_REQ_KLASS));
1263
1264 MODULE = IO::AIO PACKAGE = IO::AIO
1265
1266 PROTOTYPES: ENABLE
1267
1268 BOOT:
1269 {
1270 HV *stash = gv_stashpv ("IO::AIO", 1);
1271
1272 newCONSTSUB (stash, "EXDEV", newSViv (EXDEV));
1273 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY));
1274 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY));
1275 newCONSTSUB (stash, "O_CREAT", newSViv (O_CREAT));
1276 newCONSTSUB (stash, "O_TRUNC", newSViv (O_TRUNC));
1277 newCONSTSUB (stash, "S_IFIFO", newSViv (S_IFIFO));
1278 newCONSTSUB (stash, "SIGIO", newSViv (SIGIO));
1279
1280 create_pipe ();
1281 ATFORK (atfork_prepare, atfork_parent, atfork_child);
1282 }
1283
1284 void
1285 max_poll_reqs (int nreqs)
1286 PROTOTYPE: $
1287 CODE:
1288 max_poll_reqs = nreqs;
1289
1290 void
1291 max_poll_time (double nseconds)
1292 PROTOTYPE: $
1293 CODE:
1294 max_poll_time = nseconds * AIO_TICKS;
1295
1296 void
1297 min_parallel (int nthreads)
1298 PROTOTYPE: $
1299
1300 void
1301 max_parallel (int nthreads)
1302 PROTOTYPE: $
1303
1304 void
1305 max_idle (int nthreads)
1306 PROTOTYPE: $
1307 CODE:
1308 set_max_idle (nthreads);
1309
1310 int
1311 max_outstanding (int maxreqs)
1312 PROTOTYPE: $
1313 CODE:
1314 RETVAL = max_outstanding;
1315 max_outstanding = maxreqs;
1316 OUTPUT:
1317 RETVAL
1318
1319 void
1320 aio_open (pathname,flags,mode,callback=&PL_sv_undef)
1321 SV8 * pathname
1322 int flags
1323 int mode
1324 SV * callback
1325 PROTOTYPE: $$$;$
1326 PPCODE:
1327 {
1328 dREQ;
1329
1330 req->type = REQ_OPEN;
1331 req->sv1 = newSVsv (pathname);
1332 req->ptr1 = SvPVbyte_nolen (req->sv1);
1333 req->int1 = flags;
1334 req->mode = mode;
1335
1336 REQ_SEND;
1337 }
1338
1339 void
1340 aio_close (fh,callback=&PL_sv_undef)
1341 SV * fh
1342 SV * callback
1343 PROTOTYPE: $;$
1344 ALIAS:
1345 aio_close = REQ_CLOSE
1346 aio_fsync = REQ_FSYNC
1347 aio_fdatasync = REQ_FDATASYNC
1348 PPCODE:
1349 {
1350 dREQ;
1351
1352 req->type = ix;
1353 req->fh = newSVsv (fh);
1354 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh)));
1355
1356 REQ_SEND (req);
1357 }
1358
1359 void
1360 aio_read (fh,offset,length,data,dataoffset,callback=&PL_sv_undef)
1361 SV * fh
1362 UV offset
1363 UV length
1364 SV8 * data
1365 UV dataoffset
1366 SV * callback
1367 ALIAS:
1368 aio_read = REQ_READ
1369 aio_write = REQ_WRITE
1370 PROTOTYPE: $$$$$;$
1371 PPCODE:
1372 {
1373 STRLEN svlen;
1374 char *svptr = SvPVbyte (data, svlen);
1375
1376 SvUPGRADE (data, SVt_PV);
1377 SvPOK_on (data);
1378
1379 if (dataoffset < 0)
1380 dataoffset += svlen;
1381
1382 if (dataoffset < 0 || dataoffset > svlen)
1383 croak ("data offset outside of string");
1384
1385 if (ix == REQ_WRITE)
1386 {
1387 /* write: check length and adjust. */
1388 if (length < 0 || length + dataoffset > svlen)
1389 length = svlen - dataoffset;
1390 }
1391 else
1392 {
1393 /* read: grow scalar as necessary */
1394 svptr = SvGROW (data, length + dataoffset + 1);
1395 }
1396
1397 if (length < 0)
1398 croak ("length must not be negative");
1399
1400 {
1401 dREQ;
1402
1403 req->type = ix;
1404 req->fh = newSVsv (fh);
1405 req->int1 = PerlIO_fileno (ix == REQ_READ ? IoIFP (sv_2io (fh))
1406 : IoOFP (sv_2io (fh)));
1407 req->offs = offset;
1408 req->size = length;
1409 req->sv1 = SvREFCNT_inc (data);
1410 req->ptr1 = (char *)svptr + dataoffset;
1411 req->stroffset = dataoffset;
1412
1413 if (!SvREADONLY (data))
1414 {
1415 SvREADONLY_on (data);
1416 req->flags |= FLAG_SV1_RO_OFF;
1417 }
1418
1419 REQ_SEND;
1420 }
1421 }
1422
1423 void
1424 aio_readlink (path,callback=&PL_sv_undef)
1425 SV8 * path
1426 SV * callback
1427 PROTOTYPE: $$;$
1428 PPCODE:
1429 {
1430 SV *data;
1431 dREQ;
1432
1433 data = newSV (NAME_MAX);
1434 SvPOK_on (data);
1435
1436 req->type = REQ_READLINK;
1437 req->fh = newSVsv (path);
1438 req->ptr2 = SvPVbyte_nolen (req->fh);
1439 req->sv1 = data;
1440 req->ptr1 = SvPVbyte_nolen (data);
1441
1442 REQ_SEND;
1443 }
1444
1445 void
1446 aio_sendfile (out_fh,in_fh,in_offset,length,callback=&PL_sv_undef)
1447 SV * out_fh
1448 SV * in_fh
1449 UV in_offset
1450 UV length
1451 SV * callback
1452 PROTOTYPE: $$$$;$
1453 PPCODE:
1454 {
1455 dREQ;
1456
1457 req->type = REQ_SENDFILE;
1458 req->fh = newSVsv (out_fh);
1459 req->int1 = PerlIO_fileno (IoIFP (sv_2io (out_fh)));
1460 req->sv2 = newSVsv (in_fh);
1461 req->int2 = PerlIO_fileno (IoIFP (sv_2io (in_fh)));
1462 req->offs = in_offset;
1463 req->size = length;
1464
1465 REQ_SEND;
1466 }
1467
1468 void
1469 aio_readahead (fh,offset,length,callback=&PL_sv_undef)
1470 SV * fh
1471 UV offset
1472 IV length
1473 SV * callback
1474 PROTOTYPE: $$$;$
1475 PPCODE:
1476 {
1477 dREQ;
1478
1479 req->type = REQ_READAHEAD;
1480 req->fh = newSVsv (fh);
1481 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh)));
1482 req->offs = offset;
1483 req->size = length;
1484
1485 REQ_SEND;
1486 }
1487
1488 void
1489 aio_stat (fh_or_path,callback=&PL_sv_undef)
1490 SV8 * fh_or_path
1491 SV * callback
1492 ALIAS:
1493 aio_stat = REQ_STAT
1494 aio_lstat = REQ_LSTAT
1495 PPCODE:
1496 {
1497 dREQ;
1498
1499 req->ptr2 = malloc (sizeof (Stat_t));
1500 if (!req->ptr2)
1501 {
1502 req_free (req);
1503 croak ("out of memory during aio_stat statdata allocation");
1504 }
1505
1506 req->flags |= FLAG_PTR2_FREE;
1507
1508 if (SvPOK (fh_or_path))
1509 {
1510 req->type = ix;
1511 req->sv1 = newSVsv (fh_or_path);
1512 req->ptr1 = SvPVbyte_nolen (req->sv1);
1513 }
1514 else
1515 {
1516 req->type = REQ_FSTAT;
1517 req->fh = newSVsv (fh_or_path);
1518 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh_or_path)));
1519 }
1520
1521 REQ_SEND;
1522 }
1523
1524 void
1525 aio_unlink (pathname,callback=&PL_sv_undef)
1526 SV8 * pathname
1527 SV * callback
1528 ALIAS:
1529 aio_unlink = REQ_UNLINK
1530 aio_rmdir = REQ_RMDIR
1531 aio_readdir = REQ_READDIR
1532 PPCODE:
1533 {
1534 dREQ;
1535
1536 req->type = ix;
1537 req->sv1 = newSVsv (pathname);
1538 req->ptr1 = SvPVbyte_nolen (req->sv1);
1539
1540 REQ_SEND;
1541 }
1542
1543 void
1544 aio_mkdir (pathname,mode,callback=&PL_sv_undef)
1545 SV8 * pathname
1546 UV mode
1547 SV * callback
1548 PPCODE:
1549 {
1550 dREQ;
1551
1552 req->type = REQ_MKDIR;
1553 req->sv1 = newSVsv (pathname);
1554 req->ptr1 = SvPVbyte_nolen (req->sv1);
1555 req->mode = mode;
1556
1557 REQ_SEND;
1558 }
1559
1560 void
1561 aio_link (oldpath,newpath,callback=&PL_sv_undef)
1562 SV8 * oldpath
1563 SV8 * newpath
1564 SV * callback
1565 ALIAS:
1566 aio_link = REQ_LINK
1567 aio_symlink = REQ_SYMLINK
1568 aio_rename = REQ_RENAME
1569 PPCODE:
1570 {
1571 dREQ;
1572
1573 req->type = ix;
1574 req->fh = newSVsv (oldpath);
1575 req->ptr2 = SvPVbyte_nolen (req->fh);
1576 req->sv1 = newSVsv (newpath);
1577 req->ptr1 = SvPVbyte_nolen (req->sv1);
1578
1579 REQ_SEND;
1580 }
1581
1582 void
1583 aio_mknod (pathname,mode,dev,callback=&PL_sv_undef)
1584 SV8 * pathname
1585 UV mode
1586 UV dev
1587 SV * callback
1588 PPCODE:
1589 {
1590 dREQ;
1591
1592 req->type = REQ_MKNOD;
1593 req->sv1 = newSVsv (pathname);
1594 req->ptr1 = SvPVbyte_nolen (req->sv1);
1595 req->mode = (mode_t)mode;
1596 req->offs = dev;
1597
1598 REQ_SEND;
1599 }
1600
1601 void
1602 aio_busy (delay,callback=&PL_sv_undef)
1603 double delay
1604 SV * callback
1605 PPCODE:
1606 {
1607 dREQ;
1608
1609 req->type = REQ_BUSY;
1610 req->int1 = delay < 0. ? 0 : delay;
1611 req->int2 = delay < 0. ? 0 : 1000. * (delay - req->int1);
1612
1613 REQ_SEND;
1614 }
1615
1616 void
1617 aio_group (callback=&PL_sv_undef)
1618 SV * callback
1619 PROTOTYPE: ;$
1620 PPCODE:
1621 {
1622 dREQ;
1623
1624 req->type = REQ_GROUP;
1625
1626 req_send (req);
1627 XPUSHs (req_sv (req, AIO_GRP_KLASS));
1628 }
1629
1630 void
1631 aio_nop (callback=&PL_sv_undef)
1632 SV * callback
1633 PPCODE:
1634 {
1635 dREQ;
1636
1637 req->type = REQ_NOP;
1638
1639 REQ_SEND;
1640 }
1641
1642 int
1643 aioreq_pri (int pri = 0)
1644 PROTOTYPE: ;$
1645 CODE:
1646 RETVAL = next_pri - PRI_BIAS;
1647 if (items > 0)
1648 {
1649 if (pri < PRI_MIN) pri = PRI_MIN;
1650 if (pri > PRI_MAX) pri = PRI_MAX;
1651 next_pri = pri + PRI_BIAS;
1652 }
1653 OUTPUT:
1654 RETVAL
1655
1656 void
1657 aioreq_nice (int nice = 0)
1658 CODE:
1659 nice = next_pri - nice;
1660 if (nice < PRI_MIN) nice = PRI_MIN;
1661 if (nice > PRI_MAX) nice = PRI_MAX;
1662 next_pri = nice + PRI_BIAS;
1663
1664 void
1665 flush ()
1666 PROTOTYPE:
1667 CODE:
1668 while (nreqs)
1669 {
1670 poll_wait ();
1671 poll_cb ();
1672 }
1673
1674 int
1675 poll()
1676 PROTOTYPE:
1677 CODE:
1678 poll_wait ();
1679 RETVAL = poll_cb ();
1680 OUTPUT:
1681 RETVAL
1682
1683 int
1684 poll_fileno()
1685 PROTOTYPE:
1686 CODE:
1687 RETVAL = respipe [0];
1688 OUTPUT:
1689 RETVAL
1690
1691 int
1692 poll_cb(...)
1693 PROTOTYPE:
1694 CODE:
1695 RETVAL = poll_cb ();
1696 OUTPUT:
1697 RETVAL
1698
1699 void
1700 poll_wait()
1701 PROTOTYPE:
1702 CODE:
1703 poll_wait ();
1704
1705 void
1706 setsig (int signum = SIGIO)
1707 PROTOTYPE: ;$
1708 CODE:
1709 {
1710 if (block_sig_level)
1711 croak ("cannot call IO::AIO::setsig from within aio_block/callback");
1712
1713 LOCK (reslock);
1714 main_tid = pthread_self ();
1715 main_sig = signum;
1716 UNLOCK (reslock);
1717
1718 if (main_sig && npending)
1719 pthread_kill (main_tid, main_sig);
1720 }
1721
1722 void
1723 aio_block (SV *cb)
1724 PROTOTYPE: &
1725 PPCODE:
1726 {
1727 int count;
1728
1729 block_sig ();
1730 PUSHMARK (SP);
1731 PUTBACK;
1732 count = call_sv (cb, GIMME_V | G_NOARGS | G_EVAL);
1733 SPAGAIN;
1734 unblock_sig ();
1735
1736 if (SvTRUE (ERRSV))
1737 croak (0);
1738
1739 XSRETURN (count);
1740 }
1741
1742 int
1743 nreqs()
1744 PROTOTYPE:
1745 CODE:
1746 RETVAL = nreqs;
1747 OUTPUT:
1748 RETVAL
1749
1750 int
1751 nready()
1752 PROTOTYPE:
1753 CODE:
1754 RETVAL = get_nready ();
1755 OUTPUT:
1756 RETVAL
1757
1758 int
1759 npending()
1760 PROTOTYPE:
1761 CODE:
1762 RETVAL = get_npending ();
1763 OUTPUT:
1764 RETVAL
1765
1766 int
1767 nthreads()
1768 PROTOTYPE:
1769 CODE:
1770 if (WORDACCESS_UNSAFE) LOCK (wrklock);
1771 RETVAL = started;
1772 if (WORDACCESS_UNSAFE) UNLOCK (wrklock);
1773 OUTPUT:
1774 RETVAL
1775
1776 PROTOTYPES: DISABLE
1777
1778 MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1779
1780 void
1781 cancel (aio_req_ornot req)
1782 CODE:
1783 req_cancel (req);
1784
1785 void
1786 cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1787 CODE:
1788 SvREFCNT_dec (req->callback);
1789 req->callback = newSVsv (callback);
1790
1791 MODULE = IO::AIO PACKAGE = IO::AIO::GRP
1792
1793 void
1794 add (aio_req grp, ...)
1795 PPCODE:
1796 {
1797 int i;
1798 aio_req req;
1799
1800 if (main_sig && !block_sig_level)
1801 croak ("aio_group->add called outside aio_block/callback context while IO::AIO::setsig is in use");
1802
1803 if (grp->int1 == 2)
1804 croak ("cannot add requests to IO::AIO::GRP after the group finished");
1805
1806 for (i = 1; i < items; ++i )
1807 {
1808 if (GIMME_V != G_VOID)
1809 XPUSHs (sv_2mortal (newSVsv (ST (i))));
1810
1811 req = SvAIO_REQ (ST (i));
1812
1813 if (req)
1814 {
1815 ++grp->size;
1816 req->grp = grp;
1817
1818 req->grp_prev = 0;
1819 req->grp_next = grp->grp_first;
1820
1821 if (grp->grp_first)
1822 grp->grp_first->grp_prev = req;
1823
1824 grp->grp_first = req;
1825 }
1826 }
1827 }
1828
1829 void
1830 cancel_subs (aio_req_ornot req)
1831 CODE:
1832 req_cancel_subs (req);
1833
1834 void
1835 result (aio_req grp, ...)
1836 CODE:
1837 {
1838 int i;
1839 AV *av;
1840
1841 grp->errorno = errno;
1842
1843 av = newAV ();
1844
1845 for (i = 1; i < items; ++i )
1846 av_push (av, newSVsv (ST (i)));
1847
1848 SvREFCNT_dec (grp->sv1);
1849 grp->sv1 = (SV *)av;
1850 }
1851
1852 void
1853 errno (aio_req grp, int errorno = errno)
1854 CODE:
1855 grp->errorno = errorno;
1856
1857 void
1858 limit (aio_req grp, int limit)
1859 CODE:
1860 grp->int2 = limit;
1861 aio_grp_feed (grp);
1862
1863 void
1864 feed (aio_req grp, SV *callback=&PL_sv_undef)
1865 CODE:
1866 {
1867 SvREFCNT_dec (grp->sv2);
1868 grp->sv2 = newSVsv (callback);
1869
1870 if (grp->int2 <= 0)
1871 grp->int2 = 2;
1872
1873 aio_grp_feed (grp);
1874 }
1875