ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
Revision: 1.97
Committed: Sun Jan 7 22:59:57 2007 UTC (17 years, 4 months ago) by root
Branch: MAIN
CVS Tags: rel-2_32, rel-2_33
Changes since 1.96: +20 -2 lines
Log Message:
*** empty log message ***

File Contents

# Content
1 /* solaris */
2 #define _POSIX_PTHREAD_SEMANTICS 1
3
4 #if __linux && !defined(_GNU_SOURCE)
5 # define _GNU_SOURCE
6 #endif
7
8 /* just in case */
9 #define _REENTRANT 1
10
11 #include <errno.h>
12
13 #include "EXTERN.h"
14 #include "perl.h"
15 #include "XSUB.h"
16
17 #include "autoconf/config.h"
18
19 #include <pthread.h>
20
21 #include <stddef.h>
22 #include <stdlib.h>
23 #include <errno.h>
24 #include <sys/time.h>
25 #include <sys/select.h>
26 #include <sys/types.h>
27 #include <sys/stat.h>
28 #include <limits.h>
29 #include <unistd.h>
30 #include <fcntl.h>
31 #include <signal.h>
32 #include <sched.h>
33
34 #if HAVE_SENDFILE
35 # if __linux
36 # include <sys/sendfile.h>
37 # elif __freebsd
38 # include <sys/socket.h>
39 # include <sys/uio.h>
40 # elif __hpux
41 # include <sys/socket.h>
42 # elif __solaris /* not yet */
43 # include <sys/sendfile.h>
44 # else
45 # error sendfile support requested but not available
46 # endif
47 #endif
48
49 /* number of seconds after which idle threads exit */
50 #define IDLE_TIMEOUT 10
51
52 /* used for struct dirent, AIX doesn't provide it */
53 #ifndef NAME_MAX
54 # define NAME_MAX 4096
55 #endif
56
57 #ifndef PTHREAD_STACK_MIN
58 /* care for broken platforms, e.g. windows */
59 # define PTHREAD_STACK_MIN 16384
60 #endif
61
62 #if __ia64
63 # define STACKSIZE 65536
64 #elif __i386 || __x86_64 /* 16k is unreasonably high :( */
65 # define STACKSIZE PTHREAD_STACK_MIN
66 #else
67 # define STACKSIZE 16384
68 #endif
69
70 /* wether word reads are potentially non-atomic.
71 * this is conservatice, likely most arches this runs
72 * on have atomic word read/writes.
73 */
74 #ifndef WORDACCESS_UNSAFE
75 # if __i386 || __x86_64
76 # define WORDACCESS_UNSAFE 0
77 # else
78 # define WORDACCESS_UNSAFE 1
79 # endif
80 #endif
81
82 /* buffer size for various temporary buffers */
83 #define AIO_BUFSIZE 65536
84
85 #define dBUF \
86 char *aio_buf; \
87 LOCK (wrklock); \
88 self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \
89 UNLOCK (wrklock); \
90 if (!aio_buf) \
91 return -1;
92
93 typedef SV SV8; /* byte-sv, used for argument-checking */
94
95 enum {
96 REQ_QUIT,
97 REQ_OPEN, REQ_CLOSE,
98 REQ_READ, REQ_WRITE, REQ_READAHEAD,
99 REQ_SENDFILE,
100 REQ_STAT, REQ_LSTAT, REQ_FSTAT,
101 REQ_FSYNC, REQ_FDATASYNC,
102 REQ_UNLINK, REQ_RMDIR, REQ_MKDIR, REQ_RENAME,
103 REQ_MKNOD, REQ_READDIR,
104 REQ_LINK, REQ_SYMLINK, REQ_READLINK,
105 REQ_GROUP, REQ_NOP,
106 REQ_BUSY,
107 };
108
109 #define AIO_REQ_KLASS "IO::AIO::REQ"
110 #define AIO_GRP_KLASS "IO::AIO::GRP"
111
112 typedef struct aio_cb
113 {
114 struct aio_cb *volatile next;
115
116 SV *callback, *fh;
117 SV *sv1, *sv2;
118 void *ptr1, *ptr2;
119 off_t offs;
120 size_t size;
121 ssize_t result;
122
123 STRLEN stroffset;
124 int type;
125 int int1, int2;
126 int errorno;
127 mode_t mode; /* open */
128
129 unsigned char flags;
130 unsigned char pri;
131
132 SV *self; /* the perl counterpart of this request, if any */
133 struct aio_cb *grp, *grp_prev, *grp_next, *grp_first;
134 } aio_cb;
135
136 enum {
137 FLAG_CANCELLED = 0x01, /* request was cancelled */
138 FLAG_SV1_RO_OFF = 0x40, /* data was set readonly */
139 FLAG_PTR2_FREE = 0x80, /* need to free(ptr2) */
140 };
141
142 typedef aio_cb *aio_req;
143 typedef aio_cb *aio_req_ornot;
144
145 enum {
146 PRI_MIN = -4,
147 PRI_MAX = 4,
148
149 DEFAULT_PRI = 0,
150 PRI_BIAS = -PRI_MIN,
151 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
152 };
153
154 #define AIO_TICKS ((1000000 + 1023) >> 10)
155
156 static unsigned int max_poll_time = 0;
157 static unsigned int max_poll_reqs = 0;
158
159 /* calculcate time difference in ~1/AIO_TICKS of a second */
160 static int tvdiff (struct timeval *tv1, struct timeval *tv2)
161 {
162 return (tv2->tv_sec - tv1->tv_sec ) * AIO_TICKS
163 + ((tv2->tv_usec - tv1->tv_usec) >> 10);
164 }
165
166 static pthread_t main_tid;
167 static int main_sig;
168 static int block_sig_level;
169
170 void block_sig ()
171 {
172 sigset_t ss;
173
174 if (block_sig_level++)
175 return;
176
177 if (!main_sig)
178 return;
179
180 sigemptyset (&ss);
181 sigaddset (&ss, main_sig);
182 pthread_sigmask (SIG_BLOCK, &ss, 0);
183 }
184
185 void unblock_sig ()
186 {
187 sigset_t ss;
188
189 if (--block_sig_level)
190 return;
191
192 if (!main_sig)
193 return;
194
195 sigemptyset (&ss);
196 sigaddset (&ss, main_sig);
197 pthread_sigmask (SIG_UNBLOCK, &ss, 0);
198 }
199
200 static int next_pri = DEFAULT_PRI + PRI_BIAS;
201
202 static unsigned int started, idle, wanted;
203
204 #if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)
205 # define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
206 #else
207 # define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
208 #endif
209
210 #define LOCK(mutex) pthread_mutex_lock (&(mutex))
211 #define UNLOCK(mutex) pthread_mutex_unlock (&(mutex))
212
213 /* worker threads management */
214 static pthread_mutex_t wrklock = AIO_MUTEX_INIT;
215
216 typedef struct worker {
217 /* locked by wrklock */
218 struct worker *prev, *next;
219
220 pthread_t tid;
221
222 /* locked by reslock, reqlock or wrklock */
223 aio_req req; /* currently processed request */
224 void *dbuf;
225 DIR *dirp;
226 } worker;
227
228 static worker wrk_first = { &wrk_first, &wrk_first, 0 };
229
230 static void worker_clear (worker *wrk)
231 {
232 if (wrk->dirp)
233 {
234 closedir (wrk->dirp);
235 wrk->dirp = 0;
236 }
237
238 if (wrk->dbuf)
239 {
240 free (wrk->dbuf);
241 wrk->dbuf = 0;
242 }
243 }
244
245 static void worker_free (worker *wrk)
246 {
247 wrk->next->prev = wrk->prev;
248 wrk->prev->next = wrk->next;
249
250 free (wrk);
251 }
252
253 static volatile unsigned int nreqs, nready, npending;
254 static volatile unsigned int max_idle = 4;
255 static volatile unsigned int max_outstanding = 0xffffffff;
256 static int respipe [2];
257
258 static pthread_mutex_t reslock = AIO_MUTEX_INIT;
259 static pthread_mutex_t reqlock = AIO_MUTEX_INIT;
260 static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER;
261
262 #if WORDACCESS_UNSAFE
263
264 static unsigned int get_nready ()
265 {
266 unsigned int retval;
267
268 LOCK (reqlock);
269 retval = nready;
270 UNLOCK (reqlock);
271
272 return retval;
273 }
274
275 static unsigned int get_npending ()
276 {
277 unsigned int retval;
278
279 LOCK (reslock);
280 retval = npending;
281 UNLOCK (reslock);
282
283 return retval;
284 }
285
286 static unsigned int get_nthreads ()
287 {
288 unsigned int retval;
289
290 LOCK (wrklock);
291 retval = started;
292 UNLOCK (wrklock);
293
294 return retval;
295 }
296
297 #else
298
299 # define get_nready() nready
300 # define get_npending() npending
301 # define get_nthreads() started
302
303 #endif
304
305 /*
306 * a somewhat faster data structure might be nice, but
307 * with 8 priorities this actually needs <20 insns
308 * per shift, the most expensive operation.
309 */
310 typedef struct {
311 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
312 int size;
313 } reqq;
314
315 static reqq req_queue;
316 static reqq res_queue;
317
318 int reqq_push (reqq *q, aio_req req)
319 {
320 int pri = req->pri;
321 req->next = 0;
322
323 if (q->qe[pri])
324 {
325 q->qe[pri]->next = req;
326 q->qe[pri] = req;
327 }
328 else
329 q->qe[pri] = q->qs[pri] = req;
330
331 return q->size++;
332 }
333
334 aio_req reqq_shift (reqq *q)
335 {
336 int pri;
337
338 if (!q->size)
339 return 0;
340
341 --q->size;
342
343 for (pri = NUM_PRI; pri--; )
344 {
345 aio_req req = q->qs[pri];
346
347 if (req)
348 {
349 if (!(q->qs[pri] = req->next))
350 q->qe[pri] = 0;
351
352 return req;
353 }
354 }
355
356 abort ();
357 }
358
359 static int poll_cb ();
360 static int req_invoke (aio_req req);
361 static void req_free (aio_req req);
362 static void req_cancel (aio_req req);
363
364 /* must be called at most once */
365 static SV *req_sv (aio_req req, const char *klass)
366 {
367 if (!req->self)
368 {
369 req->self = (SV *)newHV ();
370 sv_magic (req->self, 0, PERL_MAGIC_ext, (char *)req, 0);
371 }
372
373 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1)));
374 }
375
376 static aio_req SvAIO_REQ (SV *sv)
377 {
378 MAGIC *mg;
379
380 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv))
381 croak ("object of class " AIO_REQ_KLASS " expected");
382
383 mg = mg_find (SvRV (sv), PERL_MAGIC_ext);
384
385 return mg ? (aio_req)mg->mg_ptr : 0;
386 }
387
388 static void aio_grp_feed (aio_req grp)
389 {
390 block_sig ();
391
392 while (grp->size < grp->int2 && !(grp->flags & FLAG_CANCELLED))
393 {
394 int old_len = grp->size;
395
396 if (grp->sv2 && SvOK (grp->sv2))
397 {
398 dSP;
399
400 ENTER;
401 SAVETMPS;
402 PUSHMARK (SP);
403 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
404 PUTBACK;
405 call_sv (grp->sv2, G_VOID | G_EVAL | G_KEEPERR);
406 SPAGAIN;
407 FREETMPS;
408 LEAVE;
409 }
410
411 /* stop if no progress has been made */
412 if (old_len == grp->size)
413 {
414 SvREFCNT_dec (grp->sv2);
415 grp->sv2 = 0;
416 break;
417 }
418 }
419
420 unblock_sig ();
421 }
422
423 static void aio_grp_dec (aio_req grp)
424 {
425 --grp->size;
426
427 /* call feeder, if applicable */
428 aio_grp_feed (grp);
429
430 /* finish, if done */
431 if (!grp->size && grp->int1)
432 {
433 block_sig ();
434
435 if (!req_invoke (grp))
436 {
437 req_free (grp);
438 unblock_sig ();
439 croak (0);
440 }
441
442 req_free (grp);
443 unblock_sig ();
444 }
445 }
446
447 static int req_invoke (aio_req req)
448 {
449 dSP;
450
451 if (req->flags & FLAG_SV1_RO_OFF)
452 SvREADONLY_off (req->sv1);
453
454 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
455 {
456 ENTER;
457 SAVETMPS;
458 PUSHMARK (SP);
459 EXTEND (SP, 1);
460
461 switch (req->type)
462 {
463 case REQ_READDIR:
464 {
465 SV *rv = &PL_sv_undef;
466
467 if (req->result >= 0)
468 {
469 int i;
470 char *buf = req->ptr2;
471 AV *av = newAV ();
472
473 av_extend (av, req->result - 1);
474
475 for (i = 0; i < req->result; ++i)
476 {
477 SV *sv = newSVpv (buf, 0);
478
479 av_store (av, i, sv);
480 buf += SvCUR (sv) + 1;
481 }
482
483 rv = sv_2mortal (newRV_noinc ((SV *)av));
484 }
485
486 PUSHs (rv);
487 }
488 break;
489
490 case REQ_OPEN:
491 {
492 /* convert fd to fh */
493 SV *fh;
494
495 PUSHs (sv_2mortal (newSViv (req->result)));
496 PUTBACK;
497 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
498 SPAGAIN;
499
500 fh = POPs;
501 PUSHMARK (SP);
502 XPUSHs (fh);
503 }
504 break;
505
506 case REQ_GROUP:
507 req->int1 = 2; /* mark group as finished */
508
509 if (req->sv1)
510 {
511 int i;
512 AV *av = (AV *)req->sv1;
513
514 EXTEND (SP, AvFILL (av) + 1);
515 for (i = 0; i <= AvFILL (av); ++i)
516 PUSHs (*av_fetch (av, i, 0));
517 }
518 break;
519
520 case REQ_NOP:
521 case REQ_BUSY:
522 break;
523
524 case REQ_READLINK:
525 if (req->result > 0)
526 {
527 SvCUR_set (req->sv1, req->result);
528 *SvEND (req->sv1) = 0;
529 PUSHs (req->sv1);
530 }
531 break;
532
533 case REQ_STAT:
534 case REQ_LSTAT:
535 case REQ_FSTAT:
536 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
537 PL_laststatval = req->result;
538 PL_statcache = *(Stat_t *)(req->ptr2);
539 PUSHs (sv_2mortal (newSViv (req->result)));
540 break;
541
542 case REQ_READ:
543 SvCUR_set (req->sv1, req->stroffset + (req->result > 0 ? req->result : 0));
544 *SvEND (req->sv1) = 0;
545 PUSHs (sv_2mortal (newSViv (req->result)));
546 break;
547
548 default:
549 PUSHs (sv_2mortal (newSViv (req->result)));
550 break;
551 }
552
553 errno = req->errorno;
554
555 PUTBACK;
556 call_sv (req->callback, G_VOID | G_EVAL);
557 SPAGAIN;
558
559 FREETMPS;
560 LEAVE;
561 }
562
563 if (req->grp)
564 {
565 aio_req grp = req->grp;
566
567 /* unlink request */
568 if (req->grp_next) req->grp_next->grp_prev = req->grp_prev;
569 if (req->grp_prev) req->grp_prev->grp_next = req->grp_next;
570
571 if (grp->grp_first == req)
572 grp->grp_first = req->grp_next;
573
574 aio_grp_dec (grp);
575 }
576
577 return !SvTRUE (ERRSV);
578 }
579
580 static void req_free (aio_req req)
581 {
582 if (req->self)
583 {
584 sv_unmagic (req->self, PERL_MAGIC_ext);
585 SvREFCNT_dec (req->self);
586 }
587
588 SvREFCNT_dec (req->fh);
589 SvREFCNT_dec (req->sv1);
590 SvREFCNT_dec (req->sv2);
591 SvREFCNT_dec (req->callback);
592
593 if (req->flags & FLAG_PTR2_FREE)
594 free (req->ptr2);
595
596 Safefree (req);
597 }
598
599 static void req_cancel_subs (aio_req grp)
600 {
601 aio_req sub;
602
603 if (grp->type != REQ_GROUP)
604 return;
605
606 SvREFCNT_dec (grp->sv2);
607 grp->sv2 = 0;
608
609 for (sub = grp->grp_first; sub; sub = sub->grp_next)
610 req_cancel (sub);
611 }
612
613 static void req_cancel (aio_req req)
614 {
615 req->flags |= FLAG_CANCELLED;
616
617 req_cancel_subs (req);
618 }
619
620 static void *aio_proc(void *arg);
621
622 static void start_thread (void)
623 {
624 sigset_t fullsigset, oldsigset;
625 pthread_attr_t attr;
626
627 worker *wrk = calloc (1, sizeof (worker));
628
629 if (!wrk)
630 croak ("unable to allocate worker thread data");
631
632 pthread_attr_init (&attr);
633 pthread_attr_setstacksize (&attr, STACKSIZE);
634 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
635 #ifdef PTHREAD_SCOPE_PROCESS
636 pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS);
637 #endif
638
639 sigfillset (&fullsigset);
640
641 LOCK (wrklock);
642 pthread_sigmask (SIG_SETMASK, &fullsigset, &oldsigset);
643
644 if (pthread_create (&wrk->tid, &attr, aio_proc, (void *)wrk) == 0)
645 {
646 wrk->prev = &wrk_first;
647 wrk->next = wrk_first.next;
648 wrk_first.next->prev = wrk;
649 wrk_first.next = wrk;
650 ++started;
651 }
652 else
653 free (wrk);
654
655 pthread_sigmask (SIG_SETMASK, &oldsigset, 0);
656 UNLOCK (wrklock);
657 }
658
659 static void maybe_start_thread ()
660 {
661 if (get_nthreads () >= wanted)
662 return;
663
664 /* todo: maybe use idle here, but might be less exact */
665 if (0 <= (int)get_nthreads () + (int)get_npending () - (int)nreqs)
666 return;
667
668 start_thread ();
669 }
670
671 static void req_send (aio_req req)
672 {
673 block_sig ();
674
675 ++nreqs;
676
677 LOCK (reqlock);
678 ++nready;
679 reqq_push (&req_queue, req);
680 pthread_cond_signal (&reqwait);
681 UNLOCK (reqlock);
682
683 unblock_sig ();
684
685 maybe_start_thread ();
686 }
687
688 static void end_thread (void)
689 {
690 aio_req req;
691
692 Newz (0, req, 1, aio_cb);
693
694 req->type = REQ_QUIT;
695 req->pri = PRI_MAX + PRI_BIAS;
696
697 LOCK (reqlock);
698 reqq_push (&req_queue, req);
699 pthread_cond_signal (&reqwait);
700 UNLOCK (reqlock);
701
702 LOCK (wrklock);
703 --started;
704 UNLOCK (wrklock);
705 }
706
707 static void set_max_idle (int nthreads)
708 {
709 if (WORDACCESS_UNSAFE) LOCK (reqlock);
710 max_idle = nthreads <= 0 ? 1 : nthreads;
711 if (WORDACCESS_UNSAFE) UNLOCK (reqlock);
712 }
713
714 static void min_parallel (int nthreads)
715 {
716 if (wanted < nthreads)
717 wanted = nthreads;
718 }
719
720 static void max_parallel (int nthreads)
721 {
722 if (wanted > nthreads)
723 wanted = nthreads;
724
725 while (started > wanted)
726 end_thread ();
727 }
728
729 static void poll_wait ()
730 {
731 fd_set rfd;
732
733 while (nreqs)
734 {
735 int size;
736 if (WORDACCESS_UNSAFE) LOCK (reslock);
737 size = res_queue.size;
738 if (WORDACCESS_UNSAFE) UNLOCK (reslock);
739
740 if (size)
741 return;
742
743 maybe_start_thread ();
744
745 FD_ZERO(&rfd);
746 FD_SET(respipe [0], &rfd);
747
748 select (respipe [0] + 1, &rfd, 0, 0, 0);
749 }
750 }
751
752 static int poll_cb ()
753 {
754 dSP;
755 int count = 0;
756 int maxreqs = max_poll_reqs;
757 int do_croak = 0;
758 struct timeval tv_start, tv_now;
759 aio_req req;
760
761 if (max_poll_time)
762 gettimeofday (&tv_start, 0);
763
764 block_sig ();
765
766 for (;;)
767 {
768 for (;;)
769 {
770 maybe_start_thread ();
771
772 LOCK (reslock);
773 req = reqq_shift (&res_queue);
774
775 if (req)
776 {
777 --npending;
778
779 if (!res_queue.size)
780 {
781 /* read any signals sent by the worker threads */
782 char buf [4];
783 while (read (respipe [0], buf, 4) == 4)
784 ;
785 }
786 }
787
788 UNLOCK (reslock);
789
790 if (!req)
791 break;
792
793 --nreqs;
794
795 if (req->type == REQ_GROUP && req->size)
796 {
797 req->int1 = 1; /* mark request as delayed */
798 continue;
799 }
800 else
801 {
802 if (!req_invoke (req))
803 {
804 req_free (req);
805 unblock_sig ();
806 croak (0);
807 }
808
809 count++;
810 }
811
812 req_free (req);
813
814 if (maxreqs && !--maxreqs)
815 break;
816
817 if (max_poll_time)
818 {
819 gettimeofday (&tv_now, 0);
820
821 if (tvdiff (&tv_start, &tv_now) >= max_poll_time)
822 break;
823 }
824 }
825
826 if (nreqs <= max_outstanding)
827 break;
828
829 poll_wait ();
830
831 ++maxreqs;
832 }
833
834 unblock_sig ();
835 return count;
836 }
837
838 static void create_pipe ()
839 {
840 if (pipe (respipe))
841 croak ("unable to initialize result pipe");
842
843 if (fcntl (respipe [0], F_SETFL, O_NONBLOCK))
844 croak ("cannot set result pipe to nonblocking mode");
845
846 if (fcntl (respipe [1], F_SETFL, O_NONBLOCK))
847 croak ("cannot set result pipe to nonblocking mode");
848 }
849
850 /*****************************************************************************/
851 /* work around various missing functions */
852
853 #if !HAVE_PREADWRITE
854 # define pread aio_pread
855 # define pwrite aio_pwrite
856
857 /*
858 * make our pread/pwrite safe against themselves, but not against
859 * normal read/write by using a mutex. slows down execution a lot,
860 * but that's your problem, not mine.
861 */
862 static pthread_mutex_t preadwritelock = PTHREAD_MUTEX_INITIALIZER;
863
864 static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
865 {
866 ssize_t res;
867 off_t ooffset;
868
869 LOCK (preadwritelock);
870 ooffset = lseek (fd, 0, SEEK_CUR);
871 lseek (fd, offset, SEEK_SET);
872 res = read (fd, buf, count);
873 lseek (fd, ooffset, SEEK_SET);
874 UNLOCK (preadwritelock);
875
876 return res;
877 }
878
879 static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
880 {
881 ssize_t res;
882 off_t ooffset;
883
884 LOCK (preadwritelock);
885 ooffset = lseek (fd, 0, SEEK_CUR);
886 lseek (fd, offset, SEEK_SET);
887 res = write (fd, buf, count);
888 lseek (fd, offset, SEEK_SET);
889 UNLOCK (preadwritelock);
890
891 return res;
892 }
893 #endif
894
895 #if !HAVE_FDATASYNC
896 # define fdatasync fsync
897 #endif
898
899 #if !HAVE_READAHEAD
900 # define readahead(fd,offset,count) aio_readahead (fd, offset, count, self)
901
902 static ssize_t aio_readahead (int fd, off_t offset, size_t count, worker *self)
903 {
904 dBUF;
905
906 while (count > 0)
907 {
908 size_t len = count < AIO_BUFSIZE ? count : AIO_BUFSIZE;
909
910 pread (fd, aio_buf, len, offset);
911 offset += len;
912 count -= len;
913 }
914
915 errno = 0;
916 }
917
918 #endif
919
920 #if !HAVE_READDIR_R
921 # define readdir_r aio_readdir_r
922
923 static pthread_mutex_t readdirlock = PTHREAD_MUTEX_INITIALIZER;
924
925 static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res)
926 {
927 struct dirent *e;
928 int errorno;
929
930 LOCK (readdirlock);
931
932 e = readdir (dirp);
933 errorno = errno;
934
935 if (e)
936 {
937 *res = ent;
938 strcpy (ent->d_name, e->d_name);
939 }
940 else
941 *res = 0;
942
943 UNLOCK (readdirlock);
944
945 errno = errorno;
946 return e ? 0 : -1;
947 }
948 #endif
949
950 /* sendfile always needs emulation */
951 static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self)
952 {
953 ssize_t res;
954
955 if (!count)
956 return 0;
957
958 #if HAVE_SENDFILE
959 # if __linux
960 res = sendfile (ofd, ifd, &offset, count);
961
962 # elif __freebsd
963 /*
964 * Of course, the freebsd sendfile is a dire hack with no thoughts
965 * wasted on making it similar to other I/O functions.
966 */
967 {
968 off_t sbytes;
969 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0);
970
971 if (res < 0 && sbytes)
972 /* maybe only on EAGAIN: as usual, the manpage leaves you guessing */
973 res = sbytes;
974 }
975
976 # elif __hpux
977 res = sendfile (ofd, ifd, offset, count, 0, 0);
978
979 # elif __solaris
980 {
981 struct sendfilevec vec;
982 size_t sbytes;
983
984 vec.sfv_fd = ifd;
985 vec.sfv_flag = 0;
986 vec.sfv_off = offset;
987 vec.sfv_len = count;
988
989 res = sendfilev (ofd, &vec, 1, &sbytes);
990
991 if (res < 0 && sbytes)
992 res = sbytes;
993 }
994
995 # endif
996 #else
997 res = -1;
998 errno = ENOSYS;
999 #endif
1000
1001 if (res < 0
1002 && (errno == ENOSYS || errno == EINVAL || errno == ENOTSOCK
1003 #if __solaris
1004 || errno == EAFNOSUPPORT || errno == EPROTOTYPE
1005 #endif
1006 )
1007 )
1008 {
1009 /* emulate sendfile. this is a major pain in the ass */
1010 dBUF;
1011
1012 res = 0;
1013
1014 while (count)
1015 {
1016 ssize_t cnt;
1017
1018 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
1019
1020 if (cnt <= 0)
1021 {
1022 if (cnt && !res) res = -1;
1023 break;
1024 }
1025
1026 cnt = write (ofd, aio_buf, cnt);
1027
1028 if (cnt <= 0)
1029 {
1030 if (cnt && !res) res = -1;
1031 break;
1032 }
1033
1034 offset += cnt;
1035 res += cnt;
1036 count -= cnt;
1037 }
1038 }
1039
1040 return res;
1041 }
1042
1043 /* read a full directory */
1044 static void scandir_ (aio_req req, worker *self)
1045 {
1046 DIR *dirp;
1047 union
1048 {
1049 struct dirent d;
1050 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1];
1051 } *u;
1052 struct dirent *entp;
1053 char *name, *names;
1054 int memlen = 4096;
1055 int memofs = 0;
1056 int res = 0;
1057 int errorno;
1058
1059 LOCK (wrklock);
1060 self->dirp = dirp = opendir (req->ptr1);
1061 self->dbuf = u = malloc (sizeof (*u));
1062 req->flags |= FLAG_PTR2_FREE;
1063 req->ptr2 = names = malloc (memlen);
1064 UNLOCK (wrklock);
1065
1066 if (dirp && u && names)
1067 for (;;)
1068 {
1069 errno = 0;
1070 readdir_r (dirp, &u->d, &entp);
1071
1072 if (!entp)
1073 break;
1074
1075 name = entp->d_name;
1076
1077 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
1078 {
1079 int len = strlen (name) + 1;
1080
1081 res++;
1082
1083 while (memofs + len > memlen)
1084 {
1085 memlen *= 2;
1086 LOCK (wrklock);
1087 req->ptr2 = names = realloc (names, memlen);
1088 UNLOCK (wrklock);
1089
1090 if (!names)
1091 break;
1092 }
1093
1094 memcpy (names + memofs, name, len);
1095 memofs += len;
1096 }
1097 }
1098
1099 if (errno)
1100 res = -1;
1101
1102 req->result = res;
1103 }
1104
1105 /*****************************************************************************/
1106
1107 static void *aio_proc (void *thr_arg)
1108 {
1109 aio_req req;
1110 struct timespec ts;
1111 worker *self = (worker *)thr_arg;
1112
1113 /* try to distribute timeouts somewhat evenly */
1114 ts.tv_nsec = (((unsigned long)self + (unsigned long)ts.tv_sec) & 1023UL)
1115 * (1000000000UL / 1024UL);
1116
1117 for (;;)
1118 {
1119 ts.tv_sec = time (0) + IDLE_TIMEOUT;
1120
1121 LOCK (reqlock);
1122
1123 for (;;)
1124 {
1125 self->req = req = reqq_shift (&req_queue);
1126
1127 if (req)
1128 break;
1129
1130 ++idle;
1131
1132 if (pthread_cond_timedwait (&reqwait, &reqlock, &ts)
1133 == ETIMEDOUT)
1134 {
1135 if (idle > max_idle)
1136 {
1137 --idle;
1138 UNLOCK (reqlock);
1139 LOCK (wrklock);
1140 --started;
1141 UNLOCK (wrklock);
1142 goto quit;
1143 }
1144
1145 /* we are allowed to idle, so do so without any timeout */
1146 pthread_cond_wait (&reqwait, &reqlock);
1147 ts.tv_sec = time (0) + IDLE_TIMEOUT;
1148 }
1149
1150 --idle;
1151 }
1152
1153 --nready;
1154
1155 UNLOCK (reqlock);
1156
1157 errno = 0; /* strictly unnecessary */
1158
1159 if (!(req->flags & FLAG_CANCELLED))
1160 switch (req->type)
1161 {
1162 case REQ_READ: req->result = pread (req->int1, req->ptr1, req->size, req->offs); break;
1163 case REQ_WRITE: req->result = pwrite (req->int1, req->ptr1, req->size, req->offs); break;
1164
1165 case REQ_READAHEAD: req->result = readahead (req->int1, req->offs, req->size); break;
1166 case REQ_SENDFILE: req->result = sendfile_ (req->int1, req->int2, req->offs, req->size, self); break;
1167
1168 case REQ_STAT: req->result = stat (req->ptr1, (Stat_t *)req->ptr2); break;
1169 case REQ_LSTAT: req->result = lstat (req->ptr1, (Stat_t *)req->ptr2); break;
1170 case REQ_FSTAT: req->result = fstat (req->int1, (Stat_t *)req->ptr2); break;
1171
1172 case REQ_OPEN: req->result = open (req->ptr1, req->int1, req->mode); break;
1173 case REQ_CLOSE: req->result = close (req->int1); break;
1174 case REQ_UNLINK: req->result = unlink (req->ptr1); break;
1175 case REQ_RMDIR: req->result = rmdir (req->ptr1); break;
1176 case REQ_MKDIR: req->result = mkdir (req->ptr1, req->mode); break;
1177 case REQ_RENAME: req->result = rename (req->ptr2, req->ptr1); break;
1178 case REQ_LINK: req->result = link (req->ptr2, req->ptr1); break;
1179 case REQ_SYMLINK: req->result = symlink (req->ptr2, req->ptr1); break;
1180 case REQ_MKNOD: req->result = mknod (req->ptr2, req->mode, (dev_t)req->offs); break;
1181 case REQ_READLINK: req->result = readlink (req->ptr2, req->ptr1, NAME_MAX); break;
1182
1183 case REQ_FDATASYNC: req->result = fdatasync (req->int1); break;
1184 case REQ_FSYNC: req->result = fsync (req->int1); break;
1185 case REQ_READDIR: scandir_ (req, self); break;
1186
1187 case REQ_BUSY:
1188 {
1189 struct timeval tv;
1190
1191 tv.tv_sec = req->int1;
1192 tv.tv_usec = req->int2;
1193
1194 req->result = select (0, 0, 0, 0, &tv);
1195 }
1196
1197 case REQ_GROUP:
1198 case REQ_NOP:
1199 break;
1200
1201 case REQ_QUIT:
1202 goto quit;
1203
1204 default:
1205 req->result = ENOSYS;
1206 break;
1207 }
1208
1209 req->errorno = errno;
1210
1211 LOCK (reslock);
1212
1213 ++npending;
1214
1215 if (!reqq_push (&res_queue, req))
1216 {
1217 /* write a dummy byte to the pipe so fh becomes ready */
1218 write (respipe [1], &respipe, 1);
1219
1220 /* optionally signal the main thread asynchronously */
1221 if (main_sig)
1222 pthread_kill (main_tid, main_sig);
1223 }
1224
1225 self->req = 0;
1226 worker_clear (self);
1227
1228 UNLOCK (reslock);
1229 }
1230
1231 quit:
1232 LOCK (wrklock);
1233 worker_free (self);
1234 UNLOCK (wrklock);
1235
1236 return 0;
1237 }
1238
1239 /*****************************************************************************/
1240
1241 static void atfork_prepare (void)
1242 {
1243 LOCK (wrklock);
1244 LOCK (reqlock);
1245 LOCK (reslock);
1246 #if !HAVE_PREADWRITE
1247 LOCK (preadwritelock);
1248 #endif
1249 #if !HAVE_READDIR_R
1250 LOCK (readdirlock);
1251 #endif
1252 }
1253
1254 static void atfork_parent (void)
1255 {
1256 #if !HAVE_READDIR_R
1257 UNLOCK (readdirlock);
1258 #endif
1259 #if !HAVE_PREADWRITE
1260 UNLOCK (preadwritelock);
1261 #endif
1262 UNLOCK (reslock);
1263 UNLOCK (reqlock);
1264 UNLOCK (wrklock);
1265 }
1266
1267 static void atfork_child (void)
1268 {
1269 aio_req prv;
1270
1271 while (prv = reqq_shift (&req_queue))
1272 req_free (prv);
1273
1274 while (prv = reqq_shift (&res_queue))
1275 req_free (prv);
1276
1277 while (wrk_first.next != &wrk_first)
1278 {
1279 worker *wrk = wrk_first.next;
1280
1281 if (wrk->req)
1282 req_free (wrk->req);
1283
1284 worker_clear (wrk);
1285 worker_free (wrk);
1286 }
1287
1288 started = 0;
1289 idle = 0;
1290 nreqs = 0;
1291 nready = 0;
1292 npending = 0;
1293
1294 close (respipe [0]);
1295 close (respipe [1]);
1296 create_pipe ();
1297
1298 atfork_parent ();
1299 }
1300
1301 #define dREQ \
1302 aio_req req; \
1303 int req_pri = next_pri; \
1304 next_pri = DEFAULT_PRI + PRI_BIAS; \
1305 \
1306 if (SvOK (callback) && !SvROK (callback)) \
1307 croak ("callback must be undef or of reference type"); \
1308 \
1309 Newz (0, req, 1, aio_cb); \
1310 if (!req) \
1311 croak ("out of memory during aio_req allocation"); \
1312 \
1313 req->callback = newSVsv (callback); \
1314 req->pri = req_pri
1315
1316 #define REQ_SEND \
1317 req_send (req); \
1318 \
1319 if (GIMME_V != G_VOID) \
1320 XPUSHs (req_sv (req, AIO_REQ_KLASS));
1321
1322 MODULE = IO::AIO PACKAGE = IO::AIO
1323
1324 PROTOTYPES: ENABLE
1325
1326 BOOT:
1327 {
1328 HV *stash = gv_stashpv ("IO::AIO", 1);
1329
1330 newCONSTSUB (stash, "EXDEV", newSViv (EXDEV));
1331 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY));
1332 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY));
1333 newCONSTSUB (stash, "O_CREAT", newSViv (O_CREAT));
1334 newCONSTSUB (stash, "O_TRUNC", newSViv (O_TRUNC));
1335 newCONSTSUB (stash, "S_IFIFO", newSViv (S_IFIFO));
1336 newCONSTSUB (stash, "SIGIO", newSViv (SIGIO));
1337
1338 create_pipe ();
1339 pthread_atfork (atfork_prepare, atfork_parent, atfork_child);
1340 }
1341
1342 void
1343 max_poll_reqs (int nreqs)
1344 PROTOTYPE: $
1345 CODE:
1346 max_poll_reqs = nreqs;
1347
1348 void
1349 max_poll_time (double nseconds)
1350 PROTOTYPE: $
1351 CODE:
1352 max_poll_time = nseconds * AIO_TICKS;
1353
1354 void
1355 min_parallel (int nthreads)
1356 PROTOTYPE: $
1357
1358 void
1359 max_parallel (int nthreads)
1360 PROTOTYPE: $
1361
1362 void
1363 max_idle (int nthreads)
1364 PROTOTYPE: $
1365 CODE:
1366 set_max_idle (nthreads);
1367
1368 int
1369 max_outstanding (int maxreqs)
1370 PROTOTYPE: $
1371 CODE:
1372 RETVAL = max_outstanding;
1373 max_outstanding = maxreqs;
1374 OUTPUT:
1375 RETVAL
1376
1377 void
1378 aio_open (pathname,flags,mode,callback=&PL_sv_undef)
1379 SV8 * pathname
1380 int flags
1381 int mode
1382 SV * callback
1383 PROTOTYPE: $$$;$
1384 PPCODE:
1385 {
1386 dREQ;
1387
1388 req->type = REQ_OPEN;
1389 req->sv1 = newSVsv (pathname);
1390 req->ptr1 = SvPVbyte_nolen (req->sv1);
1391 req->int1 = flags;
1392 req->mode = mode;
1393
1394 REQ_SEND;
1395 }
1396
1397 void
1398 aio_close (fh,callback=&PL_sv_undef)
1399 SV * fh
1400 SV * callback
1401 PROTOTYPE: $;$
1402 ALIAS:
1403 aio_close = REQ_CLOSE
1404 aio_fsync = REQ_FSYNC
1405 aio_fdatasync = REQ_FDATASYNC
1406 PPCODE:
1407 {
1408 dREQ;
1409
1410 req->type = ix;
1411 req->fh = newSVsv (fh);
1412 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh)));
1413
1414 REQ_SEND (req);
1415 }
1416
1417 void
1418 aio_read (fh,offset,length,data,dataoffset,callback=&PL_sv_undef)
1419 SV * fh
1420 UV offset
1421 UV length
1422 SV8 * data
1423 UV dataoffset
1424 SV * callback
1425 ALIAS:
1426 aio_read = REQ_READ
1427 aio_write = REQ_WRITE
1428 PROTOTYPE: $$$$$;$
1429 PPCODE:
1430 {
1431 STRLEN svlen;
1432 char *svptr = SvPVbyte (data, svlen);
1433
1434 SvUPGRADE (data, SVt_PV);
1435 SvPOK_on (data);
1436
1437 if (dataoffset < 0)
1438 dataoffset += svlen;
1439
1440 if (dataoffset < 0 || dataoffset > svlen)
1441 croak ("data offset outside of string");
1442
1443 if (ix == REQ_WRITE)
1444 {
1445 /* write: check length and adjust. */
1446 if (length < 0 || length + dataoffset > svlen)
1447 length = svlen - dataoffset;
1448 }
1449 else
1450 {
1451 /* read: grow scalar as necessary */
1452 svptr = SvGROW (data, length + dataoffset + 1);
1453 }
1454
1455 if (length < 0)
1456 croak ("length must not be negative");
1457
1458 {
1459 dREQ;
1460
1461 req->type = ix;
1462 req->fh = newSVsv (fh);
1463 req->int1 = PerlIO_fileno (ix == REQ_READ ? IoIFP (sv_2io (fh))
1464 : IoOFP (sv_2io (fh)));
1465 req->offs = offset;
1466 req->size = length;
1467 req->sv1 = SvREFCNT_inc (data);
1468 req->ptr1 = (char *)svptr + dataoffset;
1469 req->stroffset = dataoffset;
1470
1471 if (!SvREADONLY (data))
1472 {
1473 SvREADONLY_on (data);
1474 req->flags |= FLAG_SV1_RO_OFF;
1475 }
1476
1477 REQ_SEND;
1478 }
1479 }
1480
1481 void
1482 aio_readlink (path,callback=&PL_sv_undef)
1483 SV8 * path
1484 SV * callback
1485 PROTOTYPE: $$;$
1486 PPCODE:
1487 {
1488 SV *data;
1489 dREQ;
1490
1491 data = newSV (NAME_MAX);
1492 SvPOK_on (data);
1493
1494 req->type = REQ_READLINK;
1495 req->fh = newSVsv (path);
1496 req->ptr2 = SvPVbyte_nolen (req->fh);
1497 req->sv1 = data;
1498 req->ptr1 = SvPVbyte_nolen (data);
1499
1500 REQ_SEND;
1501 }
1502
1503 void
1504 aio_sendfile (out_fh,in_fh,in_offset,length,callback=&PL_sv_undef)
1505 SV * out_fh
1506 SV * in_fh
1507 UV in_offset
1508 UV length
1509 SV * callback
1510 PROTOTYPE: $$$$;$
1511 PPCODE:
1512 {
1513 dREQ;
1514
1515 req->type = REQ_SENDFILE;
1516 req->fh = newSVsv (out_fh);
1517 req->int1 = PerlIO_fileno (IoIFP (sv_2io (out_fh)));
1518 req->sv2 = newSVsv (in_fh);
1519 req->int2 = PerlIO_fileno (IoIFP (sv_2io (in_fh)));
1520 req->offs = in_offset;
1521 req->size = length;
1522
1523 REQ_SEND;
1524 }
1525
1526 void
1527 aio_readahead (fh,offset,length,callback=&PL_sv_undef)
1528 SV * fh
1529 UV offset
1530 IV length
1531 SV * callback
1532 PROTOTYPE: $$$;$
1533 PPCODE:
1534 {
1535 dREQ;
1536
1537 req->type = REQ_READAHEAD;
1538 req->fh = newSVsv (fh);
1539 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh)));
1540 req->offs = offset;
1541 req->size = length;
1542
1543 REQ_SEND;
1544 }
1545
1546 void
1547 aio_stat (fh_or_path,callback=&PL_sv_undef)
1548 SV8 * fh_or_path
1549 SV * callback
1550 ALIAS:
1551 aio_stat = REQ_STAT
1552 aio_lstat = REQ_LSTAT
1553 PPCODE:
1554 {
1555 dREQ;
1556
1557 req->ptr2 = malloc (sizeof (Stat_t));
1558 if (!req->ptr2)
1559 {
1560 req_free (req);
1561 croak ("out of memory during aio_stat statdata allocation");
1562 }
1563
1564 req->flags |= FLAG_PTR2_FREE;
1565
1566 if (SvPOK (fh_or_path))
1567 {
1568 req->type = ix;
1569 req->sv1 = newSVsv (fh_or_path);
1570 req->ptr1 = SvPVbyte_nolen (req->sv1);
1571 }
1572 else
1573 {
1574 req->type = REQ_FSTAT;
1575 req->fh = newSVsv (fh_or_path);
1576 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh_or_path)));
1577 }
1578
1579 REQ_SEND;
1580 }
1581
1582 void
1583 aio_unlink (pathname,callback=&PL_sv_undef)
1584 SV8 * pathname
1585 SV * callback
1586 ALIAS:
1587 aio_unlink = REQ_UNLINK
1588 aio_rmdir = REQ_RMDIR
1589 aio_readdir = REQ_READDIR
1590 PPCODE:
1591 {
1592 dREQ;
1593
1594 req->type = ix;
1595 req->sv1 = newSVsv (pathname);
1596 req->ptr1 = SvPVbyte_nolen (req->sv1);
1597
1598 REQ_SEND;
1599 }
1600
1601 void
1602 aio_mkdir (pathname,mode,callback=&PL_sv_undef)
1603 SV8 * pathname
1604 UV mode
1605 SV * callback
1606 PPCODE:
1607 {
1608 dREQ;
1609
1610 req->type = REQ_MKDIR;
1611 req->sv1 = newSVsv (pathname);
1612 req->ptr1 = SvPVbyte_nolen (req->sv1);
1613 req->mode = mode;
1614
1615 REQ_SEND;
1616 }
1617
1618 void
1619 aio_link (oldpath,newpath,callback=&PL_sv_undef)
1620 SV8 * oldpath
1621 SV8 * newpath
1622 SV * callback
1623 ALIAS:
1624 aio_link = REQ_LINK
1625 aio_symlink = REQ_SYMLINK
1626 aio_rename = REQ_RENAME
1627 PPCODE:
1628 {
1629 dREQ;
1630
1631 req->type = ix;
1632 req->fh = newSVsv (oldpath);
1633 req->ptr2 = SvPVbyte_nolen (req->fh);
1634 req->sv1 = newSVsv (newpath);
1635 req->ptr1 = SvPVbyte_nolen (req->sv1);
1636
1637 REQ_SEND;
1638 }
1639
1640 void
1641 aio_mknod (pathname,mode,dev,callback=&PL_sv_undef)
1642 SV8 * pathname
1643 UV mode
1644 UV dev
1645 SV * callback
1646 PPCODE:
1647 {
1648 dREQ;
1649
1650 req->type = REQ_MKNOD;
1651 req->sv1 = newSVsv (pathname);
1652 req->ptr1 = SvPVbyte_nolen (req->sv1);
1653 req->mode = (mode_t)mode;
1654 req->offs = dev;
1655
1656 REQ_SEND;
1657 }
1658
1659 void
1660 aio_busy (delay,callback=&PL_sv_undef)
1661 double delay
1662 SV * callback
1663 PPCODE:
1664 {
1665 dREQ;
1666
1667 req->type = REQ_BUSY;
1668 req->int1 = delay < 0. ? 0 : delay;
1669 req->int2 = delay < 0. ? 0 : 1000. * (delay - req->int1);
1670
1671 REQ_SEND;
1672 }
1673
1674 void
1675 aio_group (callback=&PL_sv_undef)
1676 SV * callback
1677 PROTOTYPE: ;$
1678 PPCODE:
1679 {
1680 dREQ;
1681
1682 req->type = REQ_GROUP;
1683
1684 req_send (req);
1685 XPUSHs (req_sv (req, AIO_GRP_KLASS));
1686 }
1687
1688 void
1689 aio_nop (callback=&PL_sv_undef)
1690 SV * callback
1691 PPCODE:
1692 {
1693 dREQ;
1694
1695 req->type = REQ_NOP;
1696
1697 REQ_SEND;
1698 }
1699
1700 int
1701 aioreq_pri (int pri = 0)
1702 PROTOTYPE: ;$
1703 CODE:
1704 RETVAL = next_pri - PRI_BIAS;
1705 if (items > 0)
1706 {
1707 if (pri < PRI_MIN) pri = PRI_MIN;
1708 if (pri > PRI_MAX) pri = PRI_MAX;
1709 next_pri = pri + PRI_BIAS;
1710 }
1711 OUTPUT:
1712 RETVAL
1713
1714 void
1715 aioreq_nice (int nice = 0)
1716 CODE:
1717 nice = next_pri - nice;
1718 if (nice < PRI_MIN) nice = PRI_MIN;
1719 if (nice > PRI_MAX) nice = PRI_MAX;
1720 next_pri = nice + PRI_BIAS;
1721
1722 void
1723 flush ()
1724 PROTOTYPE:
1725 CODE:
1726 while (nreqs)
1727 {
1728 poll_wait ();
1729 poll_cb ();
1730 }
1731
1732 int
1733 poll()
1734 PROTOTYPE:
1735 CODE:
1736 poll_wait ();
1737 RETVAL = poll_cb ();
1738 OUTPUT:
1739 RETVAL
1740
1741 int
1742 poll_fileno()
1743 PROTOTYPE:
1744 CODE:
1745 RETVAL = respipe [0];
1746 OUTPUT:
1747 RETVAL
1748
1749 int
1750 poll_cb(...)
1751 PROTOTYPE:
1752 CODE:
1753 RETVAL = poll_cb ();
1754 OUTPUT:
1755 RETVAL
1756
1757 void
1758 poll_wait()
1759 PROTOTYPE:
1760 CODE:
1761 poll_wait ();
1762
1763 void
1764 setsig (int signum = SIGIO)
1765 PROTOTYPE: ;$
1766 CODE:
1767 {
1768 if (block_sig_level)
1769 croak ("cannot call IO::AIO::setsig from within aio_block/callback");
1770
1771 LOCK (reslock);
1772 main_tid = pthread_self ();
1773 main_sig = signum;
1774 UNLOCK (reslock);
1775
1776 if (main_sig && npending)
1777 pthread_kill (main_tid, main_sig);
1778 }
1779
1780 void
1781 aio_block (SV *cb)
1782 PROTOTYPE: &
1783 PPCODE:
1784 {
1785 int count;
1786
1787 block_sig ();
1788 PUSHMARK (SP);
1789 PUTBACK;
1790 count = call_sv (cb, GIMME_V | G_NOARGS | G_EVAL);
1791 SPAGAIN;
1792 unblock_sig ();
1793
1794 if (SvTRUE (ERRSV))
1795 croak (0);
1796
1797 XSRETURN (count);
1798 }
1799
1800 int
1801 nreqs()
1802 PROTOTYPE:
1803 CODE:
1804 RETVAL = nreqs;
1805 OUTPUT:
1806 RETVAL
1807
1808 int
1809 nready()
1810 PROTOTYPE:
1811 CODE:
1812 RETVAL = get_nready ();
1813 OUTPUT:
1814 RETVAL
1815
1816 int
1817 npending()
1818 PROTOTYPE:
1819 CODE:
1820 RETVAL = get_npending ();
1821 OUTPUT:
1822 RETVAL
1823
1824 int
1825 nthreads()
1826 PROTOTYPE:
1827 CODE:
1828 if (WORDACCESS_UNSAFE) LOCK (wrklock);
1829 RETVAL = started;
1830 if (WORDACCESS_UNSAFE) UNLOCK (wrklock);
1831 OUTPUT:
1832 RETVAL
1833
1834 PROTOTYPES: DISABLE
1835
1836 MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1837
1838 void
1839 cancel (aio_req_ornot req)
1840 CODE:
1841 req_cancel (req);
1842
1843 void
1844 cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1845 CODE:
1846 SvREFCNT_dec (req->callback);
1847 req->callback = newSVsv (callback);
1848
1849 MODULE = IO::AIO PACKAGE = IO::AIO::GRP
1850
1851 void
1852 add (aio_req grp, ...)
1853 PPCODE:
1854 {
1855 int i;
1856 aio_req req;
1857
1858 if (main_sig && !block_sig_level)
1859 croak ("aio_group->add called outside aio_block/callback context while IO::AIO::setsig is in use");
1860
1861 if (grp->int1 == 2)
1862 croak ("cannot add requests to IO::AIO::GRP after the group finished");
1863
1864 for (i = 1; i < items; ++i )
1865 {
1866 if (GIMME_V != G_VOID)
1867 XPUSHs (sv_2mortal (newSVsv (ST (i))));
1868
1869 req = SvAIO_REQ (ST (i));
1870
1871 if (req)
1872 {
1873 ++grp->size;
1874 req->grp = grp;
1875
1876 req->grp_prev = 0;
1877 req->grp_next = grp->grp_first;
1878
1879 if (grp->grp_first)
1880 grp->grp_first->grp_prev = req;
1881
1882 grp->grp_first = req;
1883 }
1884 }
1885 }
1886
1887 void
1888 cancel_subs (aio_req_ornot req)
1889 CODE:
1890 req_cancel_subs (req);
1891
1892 void
1893 result (aio_req grp, ...)
1894 CODE:
1895 {
1896 int i;
1897 AV *av;
1898
1899 grp->errorno = errno;
1900
1901 av = newAV ();
1902
1903 for (i = 1; i < items; ++i )
1904 av_push (av, newSVsv (ST (i)));
1905
1906 SvREFCNT_dec (grp->sv1);
1907 grp->sv1 = (SV *)av;
1908 }
1909
1910 void
1911 errno (aio_req grp, int errorno = errno)
1912 CODE:
1913 grp->errorno = errorno;
1914
1915 void
1916 limit (aio_req grp, int limit)
1917 CODE:
1918 grp->int2 = limit;
1919 aio_grp_feed (grp);
1920
1921 void
1922 feed (aio_req grp, SV *callback=&PL_sv_undef)
1923 CODE:
1924 {
1925 SvREFCNT_dec (grp->sv2);
1926 grp->sv2 = newSVsv (callback);
1927
1928 if (grp->int2 <= 0)
1929 grp->int2 = 2;
1930
1931 aio_grp_feed (grp);
1932 }
1933