ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
Revision: 1.102
Committed: Sun Jun 3 09:44:17 2007 UTC (16 years, 11 months ago) by root
Branch: MAIN
Changes since 1.101: +8 -7 lines
Log Message:
*** empty log message ***

File Contents

# Content
1 #include "xthread.h"
2
3 #include <errno.h>
4
5 #include "EXTERN.h"
6 #include "perl.h"
7 #include "XSUB.h"
8
9 #include "autoconf/config.h"
10
11 #include <stddef.h>
12 #include <stdlib.h>
13 #include <errno.h>
14 #include <sys/time.h>
15 #include <sys/select.h>
16 #include <sys/types.h>
17 #include <sys/stat.h>
18 #include <limits.h>
19 #include <unistd.h>
20 #include <fcntl.h>
21 #include <signal.h>
22 #include <sched.h>
23 #include <utime.h>
24
25 #if HAVE_SENDFILE
26 # if __linux
27 # include <sys/sendfile.h>
28 # elif __freebsd
29 # include <sys/socket.h>
30 # include <sys/uio.h>
31 # elif __hpux
32 # include <sys/socket.h>
33 # elif __solaris /* not yet */
34 # include <sys/sendfile.h>
35 # else
36 # error sendfile support requested but not available
37 # endif
38 #endif
39
40 /* number of seconds after which idle threads exit */
41 #define IDLE_TIMEOUT 10
42
43 /* used for struct dirent, AIX doesn't provide it */
44 #ifndef NAME_MAX
45 # define NAME_MAX 4096
46 #endif
47
48 /* buffer size for various temporary buffers */
49 #define AIO_BUFSIZE 65536
50
51 /* use NV for 32 bit perls as it allows larger offsets */
52 #if IVSIZE >= 8
53 # define SvVAL64 SvIV
54 #else
55 # define SvVAL64 SvNV
56 #endif
57
58 #define dBUF \
59 char *aio_buf; \
60 LOCK (wrklock); \
61 self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \
62 UNLOCK (wrklock); \
63 if (!aio_buf) \
64 return -1;
65
66 typedef SV SV8; /* byte-sv, used for argument-checking */
67
68 enum {
69 REQ_QUIT,
70 REQ_OPEN, REQ_CLOSE,
71 REQ_READ, REQ_WRITE,
72 REQ_READAHEAD, REQ_SENDFILE,
73 REQ_STAT, REQ_LSTAT, REQ_FSTAT,
74 REQ_UTIME, REQ_FUTIME,
75 REQ_CHMOD, REQ_FCHMOD,
76 REQ_CHOWN, REQ_FCHOWN,
77 REQ_FSYNC, REQ_FDATASYNC,
78 REQ_UNLINK, REQ_RMDIR, REQ_MKDIR, REQ_RENAME,
79 REQ_MKNOD, REQ_READDIR,
80 REQ_LINK, REQ_SYMLINK, REQ_READLINK,
81 REQ_GROUP, REQ_NOP,
82 REQ_BUSY,
83 };
84
85 #define AIO_REQ_KLASS "IO::AIO::REQ"
86 #define AIO_GRP_KLASS "IO::AIO::GRP"
87
88 typedef struct aio_cb
89 {
90 struct aio_cb *volatile next;
91
92 SV *callback;
93 SV *sv1, *sv2;
94 void *ptr1, *ptr2;
95 off_t offs;
96 size_t size;
97 ssize_t result;
98 double nv1, nv2;
99
100 STRLEN stroffset;
101 int type;
102 int int1, int2, int3;
103 int errorno;
104 mode_t mode; /* open */
105
106 unsigned char flags;
107 unsigned char pri;
108
109 SV *self; /* the perl counterpart of this request, if any */
110 struct aio_cb *grp, *grp_prev, *grp_next, *grp_first;
111 } aio_cb;
112
113 enum {
114 FLAG_CANCELLED = 0x01, /* request was cancelled */
115 FLAG_SV2_RO_OFF = 0x40, /* data was set readonly */
116 FLAG_PTR2_FREE = 0x80, /* need to free(ptr2) */
117 };
118
119 typedef aio_cb *aio_req;
120 typedef aio_cb *aio_req_ornot;
121
122 enum {
123 PRI_MIN = -4,
124 PRI_MAX = 4,
125
126 DEFAULT_PRI = 0,
127 PRI_BIAS = -PRI_MIN,
128 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
129 };
130
131 #define AIO_TICKS ((1000000 + 1023) >> 10)
132
133 static unsigned int max_poll_time = 0;
134 static unsigned int max_poll_reqs = 0;
135
136 /* calculcate time difference in ~1/AIO_TICKS of a second */
137 static int tvdiff (struct timeval *tv1, struct timeval *tv2)
138 {
139 return (tv2->tv_sec - tv1->tv_sec ) * AIO_TICKS
140 + ((tv2->tv_usec - tv1->tv_usec) >> 10);
141 }
142
143 static thread_t main_tid;
144 static int main_sig;
145 static int block_sig_level;
146
147 void block_sig ()
148 {
149 sigset_t ss;
150
151 if (block_sig_level++)
152 return;
153
154 if (!main_sig)
155 return;
156
157 sigemptyset (&ss);
158 sigaddset (&ss, main_sig);
159 pthread_sigmask (SIG_BLOCK, &ss, 0);
160 }
161
162 void unblock_sig ()
163 {
164 sigset_t ss;
165
166 if (--block_sig_level)
167 return;
168
169 if (!main_sig)
170 return;
171
172 sigemptyset (&ss);
173 sigaddset (&ss, main_sig);
174 pthread_sigmask (SIG_UNBLOCK, &ss, 0);
175 }
176
177 static int next_pri = DEFAULT_PRI + PRI_BIAS;
178
179 static unsigned int started, idle, wanted;
180
181 /* worker threads management */
182 static mutex_t wrklock = MUTEX_INIT;
183
184 typedef struct worker {
185 /* locked by wrklock */
186 struct worker *prev, *next;
187
188 thread_t tid;
189
190 /* locked by reslock, reqlock or wrklock */
191 aio_req req; /* currently processed request */
192 void *dbuf;
193 DIR *dirp;
194 } worker;
195
196 static worker wrk_first = { &wrk_first, &wrk_first, 0 };
197
198 static void worker_clear (worker *wrk)
199 {
200 if (wrk->dirp)
201 {
202 closedir (wrk->dirp);
203 wrk->dirp = 0;
204 }
205
206 if (wrk->dbuf)
207 {
208 free (wrk->dbuf);
209 wrk->dbuf = 0;
210 }
211 }
212
213 static void worker_free (worker *wrk)
214 {
215 wrk->next->prev = wrk->prev;
216 wrk->prev->next = wrk->next;
217
218 free (wrk);
219 }
220
221 static volatile unsigned int nreqs, nready, npending;
222 static volatile unsigned int max_idle = 4;
223 static volatile unsigned int max_outstanding = 0xffffffff;
224 static int respipe [2];
225
226 static mutex_t reslock = MUTEX_INIT;
227 static mutex_t reqlock = MUTEX_INIT;
228 static cond_t reqwait = COND_INIT;
229
230 #if WORDACCESS_UNSAFE
231
232 static unsigned int get_nready ()
233 {
234 unsigned int retval;
235
236 LOCK (reqlock);
237 retval = nready;
238 UNLOCK (reqlock);
239
240 return retval;
241 }
242
243 static unsigned int get_npending ()
244 {
245 unsigned int retval;
246
247 LOCK (reslock);
248 retval = npending;
249 UNLOCK (reslock);
250
251 return retval;
252 }
253
254 static unsigned int get_nthreads ()
255 {
256 unsigned int retval;
257
258 LOCK (wrklock);
259 retval = started;
260 UNLOCK (wrklock);
261
262 return retval;
263 }
264
265 #else
266
267 # define get_nready() nready
268 # define get_npending() npending
269 # define get_nthreads() started
270
271 #endif
272
273 /*
274 * a somewhat faster data structure might be nice, but
275 * with 8 priorities this actually needs <20 insns
276 * per shift, the most expensive operation.
277 */
278 typedef struct {
279 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
280 int size;
281 } reqq;
282
283 static reqq req_queue;
284 static reqq res_queue;
285
286 int reqq_push (reqq *q, aio_req req)
287 {
288 int pri = req->pri;
289 req->next = 0;
290
291 if (q->qe[pri])
292 {
293 q->qe[pri]->next = req;
294 q->qe[pri] = req;
295 }
296 else
297 q->qe[pri] = q->qs[pri] = req;
298
299 return q->size++;
300 }
301
302 aio_req reqq_shift (reqq *q)
303 {
304 int pri;
305
306 if (!q->size)
307 return 0;
308
309 --q->size;
310
311 for (pri = NUM_PRI; pri--; )
312 {
313 aio_req req = q->qs[pri];
314
315 if (req)
316 {
317 if (!(q->qs[pri] = req->next))
318 q->qe[pri] = 0;
319
320 return req;
321 }
322 }
323
324 abort ();
325 }
326
327 static int poll_cb ();
328 static int req_invoke (aio_req req);
329 static void req_destroy (aio_req req);
330 static void req_cancel (aio_req req);
331
332 /* must be called at most once */
333 static SV *req_sv (aio_req req, const char *klass)
334 {
335 if (!req->self)
336 {
337 req->self = (SV *)newHV ();
338 sv_magic (req->self, 0, PERL_MAGIC_ext, (char *)req, 0);
339 }
340
341 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1)));
342 }
343
344 static aio_req SvAIO_REQ (SV *sv)
345 {
346 MAGIC *mg;
347
348 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv))
349 croak ("object of class " AIO_REQ_KLASS " expected");
350
351 mg = mg_find (SvRV (sv), PERL_MAGIC_ext);
352
353 return mg ? (aio_req)mg->mg_ptr : 0;
354 }
355
356 static void aio_grp_feed (aio_req grp)
357 {
358 block_sig ();
359
360 while (grp->size < grp->int2 && !(grp->flags & FLAG_CANCELLED))
361 {
362 int old_len = grp->size;
363
364 if (grp->sv2 && SvOK (grp->sv2))
365 {
366 dSP;
367
368 ENTER;
369 SAVETMPS;
370 PUSHMARK (SP);
371 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
372 PUTBACK;
373 call_sv (grp->sv2, G_VOID | G_EVAL | G_KEEPERR);
374 SPAGAIN;
375 FREETMPS;
376 LEAVE;
377 }
378
379 /* stop if no progress has been made */
380 if (old_len == grp->size)
381 {
382 SvREFCNT_dec (grp->sv2);
383 grp->sv2 = 0;
384 break;
385 }
386 }
387
388 unblock_sig ();
389 }
390
391 static void aio_grp_dec (aio_req grp)
392 {
393 --grp->size;
394
395 /* call feeder, if applicable */
396 aio_grp_feed (grp);
397
398 /* finish, if done */
399 if (!grp->size && grp->int1)
400 {
401 block_sig ();
402
403 if (!req_invoke (grp))
404 {
405 req_destroy (grp);
406 unblock_sig ();
407 croak (0);
408 }
409
410 req_destroy (grp);
411 unblock_sig ();
412 }
413 }
414
415 static int req_invoke (aio_req req)
416 {
417 dSP;
418
419 if (req->flags & FLAG_SV2_RO_OFF)
420 SvREADONLY_off (req->sv2);
421
422 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
423 {
424 ENTER;
425 SAVETMPS;
426 PUSHMARK (SP);
427 EXTEND (SP, 1);
428
429 switch (req->type)
430 {
431 case REQ_READDIR:
432 {
433 SV *rv = &PL_sv_undef;
434
435 if (req->result >= 0)
436 {
437 int i;
438 char *buf = req->ptr2;
439 AV *av = newAV ();
440
441 av_extend (av, req->result - 1);
442
443 for (i = 0; i < req->result; ++i)
444 {
445 SV *sv = newSVpv (buf, 0);
446
447 av_store (av, i, sv);
448 buf += SvCUR (sv) + 1;
449 }
450
451 rv = sv_2mortal (newRV_noinc ((SV *)av));
452 }
453
454 PUSHs (rv);
455 }
456 break;
457
458 case REQ_OPEN:
459 {
460 /* convert fd to fh */
461 SV *fh;
462
463 PUSHs (sv_2mortal (newSViv (req->result)));
464 PUTBACK;
465 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
466 SPAGAIN;
467
468 fh = POPs;
469 PUSHMARK (SP);
470 XPUSHs (fh);
471 }
472 break;
473
474 case REQ_GROUP:
475 req->int1 = 2; /* mark group as finished */
476
477 if (req->sv1)
478 {
479 int i;
480 AV *av = (AV *)req->sv1;
481
482 EXTEND (SP, AvFILL (av) + 1);
483 for (i = 0; i <= AvFILL (av); ++i)
484 PUSHs (*av_fetch (av, i, 0));
485 }
486 break;
487
488 case REQ_NOP:
489 case REQ_BUSY:
490 break;
491
492 case REQ_READLINK:
493 if (req->result > 0)
494 {
495 SvCUR_set (req->sv2, req->result);
496 *SvEND (req->sv2) = 0;
497 PUSHs (req->sv2);
498 }
499 break;
500
501 case REQ_STAT:
502 case REQ_LSTAT:
503 case REQ_FSTAT:
504 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
505 PL_laststatval = req->result;
506 PL_statcache = *(Stat_t *)(req->ptr2);
507 PUSHs (sv_2mortal (newSViv (req->result)));
508 break;
509
510 case REQ_READ:
511 SvCUR_set (req->sv2, req->stroffset + (req->result > 0 ? req->result : 0));
512 *SvEND (req->sv2) = 0;
513 PUSHs (sv_2mortal (newSViv (req->result)));
514 break;
515
516 default:
517 PUSHs (sv_2mortal (newSViv (req->result)));
518 break;
519 }
520
521 errno = req->errorno;
522
523 PUTBACK;
524 call_sv (req->callback, G_VOID | G_EVAL);
525 SPAGAIN;
526
527 FREETMPS;
528 LEAVE;
529 }
530
531 if (req->grp)
532 {
533 aio_req grp = req->grp;
534
535 /* unlink request */
536 if (req->grp_next) req->grp_next->grp_prev = req->grp_prev;
537 if (req->grp_prev) req->grp_prev->grp_next = req->grp_next;
538
539 if (grp->grp_first == req)
540 grp->grp_first = req->grp_next;
541
542 aio_grp_dec (grp);
543 }
544
545 return !SvTRUE (ERRSV);
546 }
547
548 static void req_destroy (aio_req req)
549 {
550 if (req->self)
551 {
552 sv_unmagic (req->self, PERL_MAGIC_ext);
553 SvREFCNT_dec (req->self);
554 }
555
556 SvREFCNT_dec (req->sv1);
557 SvREFCNT_dec (req->sv2);
558 SvREFCNT_dec (req->callback);
559
560 if (req->flags & FLAG_PTR2_FREE)
561 free (req->ptr2);
562
563 Safefree (req);
564 }
565
566 static void req_cancel_subs (aio_req grp)
567 {
568 aio_req sub;
569
570 if (grp->type != REQ_GROUP)
571 return;
572
573 SvREFCNT_dec (grp->sv2);
574 grp->sv2 = 0;
575
576 for (sub = grp->grp_first; sub; sub = sub->grp_next)
577 req_cancel (sub);
578 }
579
580 static void req_cancel (aio_req req)
581 {
582 req->flags |= FLAG_CANCELLED;
583
584 req_cancel_subs (req);
585 }
586
587 static void *aio_proc (void *arg);
588
589 static void start_thread (void)
590 {
591 worker *wrk = calloc (1, sizeof (worker));
592
593 if (!wrk)
594 croak ("unable to allocate worker thread data");
595
596 LOCK (wrklock);
597
598 if (thread_create (&wrk->tid, aio_proc, (void *)wrk))
599 {
600 wrk->prev = &wrk_first;
601 wrk->next = wrk_first.next;
602 wrk_first.next->prev = wrk;
603 wrk_first.next = wrk;
604 ++started;
605 }
606 else
607 free (wrk);
608
609 UNLOCK (wrklock);
610 }
611
612 static void maybe_start_thread ()
613 {
614 if (get_nthreads () >= wanted)
615 return;
616
617 /* todo: maybe use idle here, but might be less exact */
618 if (0 <= (int)get_nthreads () + (int)get_npending () - (int)nreqs)
619 return;
620
621 start_thread ();
622 }
623
624 static void req_send (aio_req req)
625 {
626 block_sig ();
627
628 ++nreqs;
629
630 LOCK (reqlock);
631 ++nready;
632 reqq_push (&req_queue, req);
633 COND_SIGNAL (reqwait);
634 UNLOCK (reqlock);
635
636 unblock_sig ();
637
638 maybe_start_thread ();
639 }
640
641 static void end_thread (void)
642 {
643 aio_req req;
644
645 Newz (0, req, 1, aio_cb);
646
647 req->type = REQ_QUIT;
648 req->pri = PRI_MAX + PRI_BIAS;
649
650 LOCK (reqlock);
651 reqq_push (&req_queue, req);
652 COND_SIGNAL (reqwait);
653 UNLOCK (reqlock);
654
655 LOCK (wrklock);
656 --started;
657 UNLOCK (wrklock);
658 }
659
660 static void set_max_idle (int nthreads)
661 {
662 if (WORDACCESS_UNSAFE) LOCK (reqlock);
663 max_idle = nthreads <= 0 ? 1 : nthreads;
664 if (WORDACCESS_UNSAFE) UNLOCK (reqlock);
665 }
666
667 static void min_parallel (int nthreads)
668 {
669 if (wanted < nthreads)
670 wanted = nthreads;
671 }
672
673 static void max_parallel (int nthreads)
674 {
675 if (wanted > nthreads)
676 wanted = nthreads;
677
678 while (started > wanted)
679 end_thread ();
680 }
681
682 static void poll_wait ()
683 {
684 fd_set rfd;
685
686 while (nreqs)
687 {
688 int size;
689 if (WORDACCESS_UNSAFE) LOCK (reslock);
690 size = res_queue.size;
691 if (WORDACCESS_UNSAFE) UNLOCK (reslock);
692
693 if (size)
694 return;
695
696 maybe_start_thread ();
697
698 FD_ZERO(&rfd);
699 FD_SET(respipe [0], &rfd);
700
701 select (respipe [0] + 1, &rfd, 0, 0, 0);
702 }
703 }
704
705 static int poll_cb ()
706 {
707 dSP;
708 int count = 0;
709 int maxreqs = max_poll_reqs;
710 int do_croak = 0;
711 struct timeval tv_start, tv_now;
712 aio_req req;
713
714 if (max_poll_time)
715 gettimeofday (&tv_start, 0);
716
717 block_sig ();
718
719 for (;;)
720 {
721 for (;;)
722 {
723 maybe_start_thread ();
724
725 LOCK (reslock);
726 req = reqq_shift (&res_queue);
727
728 if (req)
729 {
730 --npending;
731
732 if (!res_queue.size)
733 {
734 /* read any signals sent by the worker threads */
735 char buf [4];
736 while (read (respipe [0], buf, 4) == 4)
737 ;
738 }
739 }
740
741 UNLOCK (reslock);
742
743 if (!req)
744 break;
745
746 --nreqs;
747
748 if (req->type == REQ_GROUP && req->size)
749 {
750 req->int1 = 1; /* mark request as delayed */
751 continue;
752 }
753 else
754 {
755 if (!req_invoke (req))
756 {
757 req_destroy (req);
758 unblock_sig ();
759 croak (0);
760 }
761
762 count++;
763 }
764
765 req_destroy (req);
766
767 if (maxreqs && !--maxreqs)
768 break;
769
770 if (max_poll_time)
771 {
772 gettimeofday (&tv_now, 0);
773
774 if (tvdiff (&tv_start, &tv_now) >= max_poll_time)
775 break;
776 }
777 }
778
779 if (nreqs <= max_outstanding)
780 break;
781
782 poll_wait ();
783
784 ++maxreqs;
785 }
786
787 unblock_sig ();
788 return count;
789 }
790
791 static void create_pipe ()
792 {
793 if (pipe (respipe))
794 croak ("unable to initialize result pipe");
795
796 if (fcntl (respipe [0], F_SETFL, O_NONBLOCK))
797 croak ("cannot set result pipe to nonblocking mode");
798
799 if (fcntl (respipe [1], F_SETFL, O_NONBLOCK))
800 croak ("cannot set result pipe to nonblocking mode");
801 }
802
803 /*****************************************************************************/
804 /* work around various missing functions */
805
806 #if !HAVE_PREADWRITE
807 # define pread aio_pread
808 # define pwrite aio_pwrite
809
810 /*
811 * make our pread/pwrite safe against themselves, but not against
812 * normal read/write by using a mutex. slows down execution a lot,
813 * but that's your problem, not mine.
814 */
815 static mutex_t preadwritelock = MUTEX_INIT;
816
817 static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
818 {
819 ssize_t res;
820 off_t ooffset;
821
822 LOCK (preadwritelock);
823 ooffset = lseek (fd, 0, SEEK_CUR);
824 lseek (fd, offset, SEEK_SET);
825 res = read (fd, buf, count);
826 lseek (fd, ooffset, SEEK_SET);
827 UNLOCK (preadwritelock);
828
829 return res;
830 }
831
832 static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
833 {
834 ssize_t res;
835 off_t ooffset;
836
837 LOCK (preadwritelock);
838 ooffset = lseek (fd, 0, SEEK_CUR);
839 lseek (fd, offset, SEEK_SET);
840 res = write (fd, buf, count);
841 lseek (fd, offset, SEEK_SET);
842 UNLOCK (preadwritelock);
843
844 return res;
845 }
846 #endif
847
848 #ifndef HAVE_FUTIMES
849
850 # define utimes(path,times) aio_utimes (path, times)
851 # define futimes(fd,times) aio_futimes (fd, times)
852
853 int aio_utimes (const char *filename, const struct timeval times[2])
854 {
855 if (times)
856 {
857 struct utimbuf buf;
858
859 buf.actime = times[0].tv_sec;
860 buf.modtime = times[1].tv_sec;
861
862 return utime (filename, &buf);
863 }
864 else
865 return utime (filename, 0);
866 }
867
868 int aio_futimes (int fd, const struct timeval tv[2])
869 {
870 errno = ENOSYS;
871 return -1;
872 }
873
874 #endif
875
876 #if !HAVE_FDATASYNC
877 # define fdatasync fsync
878 #endif
879
880 #if !HAVE_READAHEAD
881 # define readahead(fd,offset,count) aio_readahead (fd, offset, count, self)
882
883 static ssize_t aio_readahead (int fd, off_t offset, size_t count, worker *self)
884 {
885 dBUF;
886
887 while (count > 0)
888 {
889 size_t len = count < AIO_BUFSIZE ? count : AIO_BUFSIZE;
890
891 pread (fd, aio_buf, len, offset);
892 offset += len;
893 count -= len;
894 }
895
896 errno = 0;
897 }
898
899 #endif
900
901 #if !HAVE_READDIR_R
902 # define readdir_r aio_readdir_r
903
904 static mutex_t readdirlock = MUTEX_INIT;
905
906 static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res)
907 {
908 struct dirent *e;
909 int errorno;
910
911 LOCK (readdirlock);
912
913 e = readdir (dirp);
914 errorno = errno;
915
916 if (e)
917 {
918 *res = ent;
919 strcpy (ent->d_name, e->d_name);
920 }
921 else
922 *res = 0;
923
924 UNLOCK (readdirlock);
925
926 errno = errorno;
927 return e ? 0 : -1;
928 }
929 #endif
930
931 /* sendfile always needs emulation */
932 static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self)
933 {
934 ssize_t res;
935
936 if (!count)
937 return 0;
938
939 #if HAVE_SENDFILE
940 # if __linux
941 res = sendfile (ofd, ifd, &offset, count);
942
943 # elif __freebsd
944 /*
945 * Of course, the freebsd sendfile is a dire hack with no thoughts
946 * wasted on making it similar to other I/O functions.
947 */
948 {
949 off_t sbytes;
950 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0);
951
952 if (res < 0 && sbytes)
953 /* maybe only on EAGAIN: as usual, the manpage leaves you guessing */
954 res = sbytes;
955 }
956
957 # elif __hpux
958 res = sendfile (ofd, ifd, offset, count, 0, 0);
959
960 # elif __solaris
961 {
962 struct sendfilevec vec;
963 size_t sbytes;
964
965 vec.sfv_fd = ifd;
966 vec.sfv_flag = 0;
967 vec.sfv_off = offset;
968 vec.sfv_len = count;
969
970 res = sendfilev (ofd, &vec, 1, &sbytes);
971
972 if (res < 0 && sbytes)
973 res = sbytes;
974 }
975
976 # endif
977 #else
978 res = -1;
979 errno = ENOSYS;
980 #endif
981
982 if (res < 0
983 && (errno == ENOSYS || errno == EINVAL || errno == ENOTSOCK
984 #if __solaris
985 || errno == EAFNOSUPPORT || errno == EPROTOTYPE
986 #endif
987 )
988 )
989 {
990 /* emulate sendfile. this is a major pain in the ass */
991 dBUF;
992
993 res = 0;
994
995 while (count)
996 {
997 ssize_t cnt;
998
999 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
1000
1001 if (cnt <= 0)
1002 {
1003 if (cnt && !res) res = -1;
1004 break;
1005 }
1006
1007 cnt = write (ofd, aio_buf, cnt);
1008
1009 if (cnt <= 0)
1010 {
1011 if (cnt && !res) res = -1;
1012 break;
1013 }
1014
1015 offset += cnt;
1016 res += cnt;
1017 count -= cnt;
1018 }
1019 }
1020
1021 return res;
1022 }
1023
1024 /* read a full directory */
1025 static void scandir_ (aio_req req, worker *self)
1026 {
1027 DIR *dirp;
1028 union
1029 {
1030 struct dirent d;
1031 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1];
1032 } *u;
1033 struct dirent *entp;
1034 char *name, *names;
1035 int memlen = 4096;
1036 int memofs = 0;
1037 int res = 0;
1038 int errorno;
1039
1040 LOCK (wrklock);
1041 self->dirp = dirp = opendir (req->ptr1);
1042 self->dbuf = u = malloc (sizeof (*u));
1043 req->flags |= FLAG_PTR2_FREE;
1044 req->ptr2 = names = malloc (memlen);
1045 UNLOCK (wrklock);
1046
1047 if (dirp && u && names)
1048 for (;;)
1049 {
1050 errno = 0;
1051 readdir_r (dirp, &u->d, &entp);
1052
1053 if (!entp)
1054 break;
1055
1056 name = entp->d_name;
1057
1058 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
1059 {
1060 int len = strlen (name) + 1;
1061
1062 res++;
1063
1064 while (memofs + len > memlen)
1065 {
1066 memlen *= 2;
1067 LOCK (wrklock);
1068 req->ptr2 = names = realloc (names, memlen);
1069 UNLOCK (wrklock);
1070
1071 if (!names)
1072 break;
1073 }
1074
1075 memcpy (names + memofs, name, len);
1076 memofs += len;
1077 }
1078 }
1079
1080 if (errno)
1081 res = -1;
1082
1083 req->result = res;
1084 }
1085
1086 /*****************************************************************************/
1087
1088 static void *aio_proc (void *thr_arg)
1089 {
1090 aio_req req;
1091 struct timespec ts;
1092 worker *self = (worker *)thr_arg;
1093
1094 /* try to distribute timeouts somewhat evenly */
1095 ts.tv_nsec = (((unsigned long)self + (unsigned long)ts.tv_sec) & 1023UL)
1096 * (1000000000UL / 1024UL);
1097
1098 for (;;)
1099 {
1100 ts.tv_sec = time (0) + IDLE_TIMEOUT;
1101
1102 LOCK (reqlock);
1103
1104 for (;;)
1105 {
1106 self->req = req = reqq_shift (&req_queue);
1107
1108 if (req)
1109 break;
1110
1111 ++idle;
1112
1113 if (COND_TIMEDWAIT (reqwait, reqlock, ts)
1114 == ETIMEDOUT)
1115 {
1116 if (idle > max_idle)
1117 {
1118 --idle;
1119 UNLOCK (reqlock);
1120 LOCK (wrklock);
1121 --started;
1122 UNLOCK (wrklock);
1123 goto quit;
1124 }
1125
1126 /* we are allowed to idle, so do so without any timeout */
1127 COND_WAIT (reqwait, reqlock);
1128 ts.tv_sec = time (0) + IDLE_TIMEOUT;
1129 }
1130
1131 --idle;
1132 }
1133
1134 --nready;
1135
1136 UNLOCK (reqlock);
1137
1138 errno = 0; /* strictly unnecessary */
1139
1140 if (!(req->flags & FLAG_CANCELLED))
1141 switch (req->type)
1142 {
1143 case REQ_READ: req->result = req->offs >= 0
1144 ? pread (req->int1, req->ptr1, req->size, req->offs)
1145 : read (req->int1, req->ptr1, req->size); break;
1146 case REQ_WRITE: req->result = req->offs >= 0
1147 ? pwrite (req->int1, req->ptr1, req->size, req->offs)
1148 : write (req->int1, req->ptr1, req->size); break;
1149
1150 case REQ_READAHEAD: req->result = readahead (req->int1, req->offs, req->size); break;
1151 case REQ_SENDFILE: req->result = sendfile_ (req->int1, req->int2, req->offs, req->size, self); break;
1152
1153 case REQ_STAT: req->result = stat (req->ptr1, (Stat_t *)req->ptr2); break;
1154 case REQ_LSTAT: req->result = lstat (req->ptr1, (Stat_t *)req->ptr2); break;
1155 case REQ_FSTAT: req->result = fstat (req->int1, (Stat_t *)req->ptr2); break;
1156
1157 case REQ_CHOWN: req->result = chown (req->ptr1, req->int2, req->int3); break;
1158 case REQ_FCHOWN: req->result = fchown (req->int1, req->int2, req->int3); break;
1159 case REQ_CHMOD: req->result = chmod (req->ptr1, req->mode); break;
1160 case REQ_FCHMOD: req->result = fchmod (req->int1, req->mode); break;
1161
1162 case REQ_OPEN: req->result = open (req->ptr1, req->int1, req->mode); break;
1163 case REQ_CLOSE: req->result = close (req->int1); break;
1164 case REQ_UNLINK: req->result = unlink (req->ptr1); break;
1165 case REQ_RMDIR: req->result = rmdir (req->ptr1); break;
1166 case REQ_MKDIR: req->result = mkdir (req->ptr1, req->mode); break;
1167 case REQ_RENAME: req->result = rename (req->ptr2, req->ptr1); break;
1168 case REQ_LINK: req->result = link (req->ptr2, req->ptr1); break;
1169 case REQ_SYMLINK: req->result = symlink (req->ptr2, req->ptr1); break;
1170 case REQ_MKNOD: req->result = mknod (req->ptr2, req->mode, (dev_t)req->offs); break;
1171 case REQ_READLINK: req->result = readlink (req->ptr2, req->ptr1, NAME_MAX); break;
1172
1173 case REQ_FDATASYNC: req->result = fdatasync (req->int1); break;
1174 case REQ_FSYNC: req->result = fsync (req->int1); break;
1175 case REQ_READDIR: scandir_ (req, self); break;
1176
1177 case REQ_BUSY:
1178 {
1179 struct timeval tv;
1180
1181 tv.tv_sec = req->nv1;
1182 tv.tv_usec = (req->nv1 - tv.tv_usec) * 1000000.;
1183
1184 req->result = select (0, 0, 0, 0, &tv);
1185 }
1186
1187 case REQ_UTIME:
1188 case REQ_FUTIME:
1189 {
1190 struct timeval tv[2];
1191 struct timeval *times;
1192
1193 if (req->nv1 != -1. || req->nv2 != -1.)
1194 {
1195 tv[0].tv_sec = req->nv1;
1196 tv[0].tv_usec = (req->nv1 - tv[0].tv_sec) * 1000000.;
1197 tv[1].tv_sec = req->nv2;
1198 tv[1].tv_usec = (req->nv2 - tv[1].tv_sec) * 1000000.;
1199
1200 times = tv;
1201 }
1202 else
1203 times = 0;
1204
1205
1206 req->result = req->type == REQ_FUTIME
1207 ? futimes (req->int1, times)
1208 : utimes (req->ptr1, times);
1209 }
1210
1211 case REQ_GROUP:
1212 case REQ_NOP:
1213 break;
1214
1215 case REQ_QUIT:
1216 goto quit;
1217
1218 default:
1219 req->result = -1;
1220 break;
1221 }
1222
1223 req->errorno = errno;
1224
1225 LOCK (reslock);
1226
1227 ++npending;
1228
1229 if (!reqq_push (&res_queue, req))
1230 {
1231 /* write a dummy byte to the pipe so fh becomes ready */
1232 write (respipe [1], &respipe, 1);
1233
1234 /* optionally signal the main thread asynchronously */
1235 if (main_sig)
1236 pthread_kill (main_tid, main_sig);
1237 }
1238
1239 self->req = 0;
1240 worker_clear (self);
1241
1242 UNLOCK (reslock);
1243 }
1244
1245 quit:
1246 LOCK (wrklock);
1247 worker_free (self);
1248 UNLOCK (wrklock);
1249
1250 return 0;
1251 }
1252
1253 /*****************************************************************************/
1254
1255 static void atfork_prepare (void)
1256 {
1257 LOCK (wrklock);
1258 LOCK (reqlock);
1259 LOCK (reslock);
1260 #if !HAVE_PREADWRITE
1261 LOCK (preadwritelock);
1262 #endif
1263 #if !HAVE_READDIR_R
1264 LOCK (readdirlock);
1265 #endif
1266 }
1267
1268 static void atfork_parent (void)
1269 {
1270 #if !HAVE_READDIR_R
1271 UNLOCK (readdirlock);
1272 #endif
1273 #if !HAVE_PREADWRITE
1274 UNLOCK (preadwritelock);
1275 #endif
1276 UNLOCK (reslock);
1277 UNLOCK (reqlock);
1278 UNLOCK (wrklock);
1279 }
1280
1281 static void atfork_child (void)
1282 {
1283 aio_req prv;
1284
1285 while (prv = reqq_shift (&req_queue))
1286 req_destroy (prv);
1287
1288 while (prv = reqq_shift (&res_queue))
1289 req_destroy (prv);
1290
1291 while (wrk_first.next != &wrk_first)
1292 {
1293 worker *wrk = wrk_first.next;
1294
1295 if (wrk->req)
1296 req_destroy (wrk->req);
1297
1298 worker_clear (wrk);
1299 worker_free (wrk);
1300 }
1301
1302 started = 0;
1303 idle = 0;
1304 nreqs = 0;
1305 nready = 0;
1306 npending = 0;
1307
1308 close (respipe [0]);
1309 close (respipe [1]);
1310 create_pipe ();
1311
1312 atfork_parent ();
1313 }
1314
1315 #define dREQ \
1316 aio_req req; \
1317 int req_pri = next_pri; \
1318 next_pri = DEFAULT_PRI + PRI_BIAS; \
1319 \
1320 if (SvOK (callback) && !SvROK (callback)) \
1321 croak ("callback must be undef or of reference type"); \
1322 \
1323 Newz (0, req, 1, aio_cb); \
1324 if (!req) \
1325 croak ("out of memory during aio_req allocation"); \
1326 \
1327 req->callback = newSVsv (callback); \
1328 req->pri = req_pri
1329
1330 #define REQ_SEND \
1331 req_send (req); \
1332 \
1333 if (GIMME_V != G_VOID) \
1334 XPUSHs (req_sv (req, AIO_REQ_KLASS));
1335
1336 MODULE = IO::AIO PACKAGE = IO::AIO
1337
1338 PROTOTYPES: ENABLE
1339
1340 BOOT:
1341 {
1342 HV *stash = gv_stashpv ("IO::AIO", 1);
1343
1344 newCONSTSUB (stash, "EXDEV", newSViv (EXDEV));
1345 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY));
1346 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY));
1347 newCONSTSUB (stash, "O_CREAT", newSViv (O_CREAT));
1348 newCONSTSUB (stash, "O_TRUNC", newSViv (O_TRUNC));
1349 newCONSTSUB (stash, "S_IFIFO", newSViv (S_IFIFO));
1350 newCONSTSUB (stash, "SIGIO", newSViv (SIGIO));
1351
1352 create_pipe ();
1353 ATFORK (atfork_prepare, atfork_parent, atfork_child);
1354 }
1355
1356 void
1357 max_poll_reqs (int nreqs)
1358 PROTOTYPE: $
1359 CODE:
1360 max_poll_reqs = nreqs;
1361
1362 void
1363 max_poll_time (double nseconds)
1364 PROTOTYPE: $
1365 CODE:
1366 max_poll_time = nseconds * AIO_TICKS;
1367
1368 void
1369 min_parallel (int nthreads)
1370 PROTOTYPE: $
1371
1372 void
1373 max_parallel (int nthreads)
1374 PROTOTYPE: $
1375
1376 void
1377 max_idle (int nthreads)
1378 PROTOTYPE: $
1379 CODE:
1380 set_max_idle (nthreads);
1381
1382 int
1383 max_outstanding (int maxreqs)
1384 PROTOTYPE: $
1385 CODE:
1386 RETVAL = max_outstanding;
1387 max_outstanding = maxreqs;
1388 OUTPUT:
1389 RETVAL
1390
1391 void
1392 aio_open (SV8 *pathname, int flags, int mode, SV *callback=&PL_sv_undef)
1393 PROTOTYPE: $$$;$
1394 PPCODE:
1395 {
1396 dREQ;
1397
1398 req->type = REQ_OPEN;
1399 req->sv1 = newSVsv (pathname);
1400 req->ptr1 = SvPVbyte_nolen (req->sv1);
1401 req->int1 = flags;
1402 req->mode = mode;
1403
1404 REQ_SEND;
1405 }
1406
1407 void
1408 aio_close (SV *fh, SV *callback=&PL_sv_undef)
1409 PROTOTYPE: $;$
1410 ALIAS:
1411 aio_close = REQ_CLOSE
1412 aio_fsync = REQ_FSYNC
1413 aio_fdatasync = REQ_FDATASYNC
1414 PPCODE:
1415 {
1416 dREQ;
1417
1418 req->type = ix;
1419 req->sv1 = newSVsv (fh);
1420 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh)));
1421
1422 REQ_SEND (req);
1423 }
1424
1425 void
1426 aio_read (SV *fh, SV *offset, SV *length, SV8 *data, IV dataoffset, SV *callback=&PL_sv_undef)
1427 ALIAS:
1428 aio_read = REQ_READ
1429 aio_write = REQ_WRITE
1430 PROTOTYPE: $$$$$;$
1431 PPCODE:
1432 {
1433 STRLEN svlen;
1434 char *svptr = SvPVbyte (data, svlen);
1435 UV len = SvUV (length);
1436
1437 SvUPGRADE (data, SVt_PV);
1438 SvPOK_on (data);
1439
1440 if (dataoffset < 0)
1441 dataoffset += svlen;
1442
1443 if (dataoffset < 0 || dataoffset > svlen)
1444 croak ("dataoffset outside of data scalar");
1445
1446 if (ix == REQ_WRITE)
1447 {
1448 /* write: check length and adjust. */
1449 if (!SvOK (length) || len + dataoffset > svlen)
1450 len = svlen - dataoffset;
1451 }
1452 else
1453 {
1454 /* read: grow scalar as necessary */
1455 svptr = SvGROW (data, len + dataoffset + 1);
1456 }
1457
1458 if (len < 0)
1459 croak ("length must not be negative");
1460
1461 {
1462 dREQ;
1463
1464 req->type = ix;
1465 req->sv1 = newSVsv (fh);
1466 req->int1 = PerlIO_fileno (ix == REQ_READ ? IoIFP (sv_2io (fh))
1467 : IoOFP (sv_2io (fh)));
1468 req->offs = SvOK (offset) ? SvVAL64 (offset) : -1;
1469 req->size = len;
1470 req->sv2 = SvREFCNT_inc (data);
1471 req->ptr1 = (char *)svptr + dataoffset;
1472 req->stroffset = dataoffset;
1473
1474 if (!SvREADONLY (data))
1475 {
1476 SvREADONLY_on (data);
1477 req->flags |= FLAG_SV2_RO_OFF;
1478 }
1479
1480 REQ_SEND;
1481 }
1482 }
1483
1484 void
1485 aio_readlink (SV8 *path, SV *callback=&PL_sv_undef)
1486 PROTOTYPE: $$;$
1487 PPCODE:
1488 {
1489 SV *data;
1490 dREQ;
1491
1492 data = newSV (NAME_MAX);
1493 SvPOK_on (data);
1494
1495 req->type = REQ_READLINK;
1496 req->sv1 = newSVsv (path);
1497 req->ptr2 = SvPVbyte_nolen (req->sv1);
1498 req->sv2 = data;
1499 req->ptr1 = SvPVbyte_nolen (data);
1500
1501 REQ_SEND;
1502 }
1503
1504 void
1505 aio_sendfile (SV *out_fh, SV *in_fh, SV *in_offset, UV length, SV *callback=&PL_sv_undef)
1506 PROTOTYPE: $$$$;$
1507 PPCODE:
1508 {
1509 dREQ;
1510
1511 req->type = REQ_SENDFILE;
1512 req->sv1 = newSVsv (out_fh);
1513 req->int1 = PerlIO_fileno (IoIFP (sv_2io (out_fh)));
1514 req->sv2 = newSVsv (in_fh);
1515 req->int2 = PerlIO_fileno (IoIFP (sv_2io (in_fh)));
1516 req->offs = SvVAL64 (in_offset);
1517 req->size = length;
1518
1519 REQ_SEND;
1520 }
1521
1522 void
1523 aio_readahead (SV *fh, SV *offset, IV length, SV *callback=&PL_sv_undef)
1524 PROTOTYPE: $$$;$
1525 PPCODE:
1526 {
1527 dREQ;
1528
1529 req->type = REQ_READAHEAD;
1530 req->sv1 = newSVsv (fh);
1531 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh)));
1532 req->offs = SvVAL64 (offset);
1533 req->size = length;
1534
1535 REQ_SEND;
1536 }
1537
1538 void
1539 aio_stat (SV8 *fh_or_path, SV *callback=&PL_sv_undef)
1540 ALIAS:
1541 aio_stat = REQ_STAT
1542 aio_lstat = REQ_LSTAT
1543 PPCODE:
1544 {
1545 dREQ;
1546
1547 req->ptr2 = malloc (sizeof (Stat_t));
1548 if (!req->ptr2)
1549 {
1550 req_destroy (req);
1551 croak ("out of memory during aio_stat statdata allocation");
1552 }
1553
1554 req->flags |= FLAG_PTR2_FREE;
1555 req->sv1 = newSVsv (fh_or_path);
1556
1557 if (SvPOK (fh_or_path))
1558 {
1559 req->type = ix;
1560 req->ptr1 = SvPVbyte_nolen (req->sv1);
1561 }
1562 else
1563 {
1564 req->type = REQ_FSTAT;
1565 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh_or_path)));
1566 }
1567
1568 REQ_SEND;
1569 }
1570
1571 void
1572 aio_utime (SV8 *fh_or_path, SV *atime, SV *mtime, SV *callback=&PL_sv_undef)
1573 PPCODE:
1574 {
1575 dREQ;
1576
1577 req->nv1 = SvOK (atime) ? SvNV (atime) : -1.;
1578 req->nv2 = SvOK (mtime) ? SvNV (mtime) : -1.;
1579 req->sv1 = newSVsv (fh_or_path);
1580
1581 if (SvPOK (fh_or_path))
1582 {
1583 req->type = REQ_UTIME;
1584 req->ptr1 = SvPVbyte_nolen (req->sv1);
1585 }
1586 else
1587 {
1588 req->type = REQ_FUTIME;
1589 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh_or_path)));
1590 }
1591
1592 REQ_SEND;
1593 }
1594
1595 void
1596 aio_chmod (SV8 *fh_or_path, int mode, SV *callback=&PL_sv_undef)
1597 PPCODE:
1598 {
1599 dREQ;
1600
1601 req->mode = mode;
1602 req->sv1 = newSVsv (fh_or_path);
1603
1604 if (SvPOK (fh_or_path))
1605 {
1606 req->type = REQ_CHMOD;
1607 req->ptr1 = SvPVbyte_nolen (req->sv1);
1608 }
1609 else
1610 {
1611 req->type = REQ_FCHMOD;
1612 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh_or_path)));
1613 }
1614
1615 REQ_SEND;
1616 }
1617
1618 void
1619 aio_chown (SV8 *fh_or_path, SV *uid, SV *gid, SV *callback=&PL_sv_undef)
1620 PPCODE:
1621 {
1622 dREQ;
1623
1624 req->int2 = SvOK (uid) ? SvIV (uid) : -1;
1625 req->int3 = SvOK (gid) ? SvIV (gid) : -1;
1626 req->sv1 = newSVsv (fh_or_path);
1627
1628 if (SvPOK (fh_or_path))
1629 {
1630 req->type = REQ_CHOWN;
1631 req->ptr1 = SvPVbyte_nolen (req->sv1);
1632 }
1633 else
1634 {
1635 req->type = REQ_FCHOWN;
1636 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh_or_path)));
1637 }
1638
1639 REQ_SEND;
1640 }
1641
1642 void
1643 aio_unlink (SV8 *pathname, SV *callback=&PL_sv_undef)
1644 ALIAS:
1645 aio_unlink = REQ_UNLINK
1646 aio_rmdir = REQ_RMDIR
1647 aio_readdir = REQ_READDIR
1648 PPCODE:
1649 {
1650 dREQ;
1651
1652 req->type = ix;
1653 req->sv1 = newSVsv (pathname);
1654 req->ptr1 = SvPVbyte_nolen (req->sv1);
1655
1656 REQ_SEND;
1657 }
1658
1659 void
1660 aio_mkdir (SV8 *pathname, int mode, SV *callback=&PL_sv_undef)
1661 PPCODE:
1662 {
1663 dREQ;
1664
1665 req->type = REQ_MKDIR;
1666 req->sv1 = newSVsv (pathname);
1667 req->ptr1 = SvPVbyte_nolen (req->sv1);
1668 req->mode = mode;
1669
1670 REQ_SEND;
1671 }
1672
1673 void
1674 aio_link (SV8 *oldpath, SV8 *newpath, SV *callback=&PL_sv_undef)
1675 ALIAS:
1676 aio_link = REQ_LINK
1677 aio_symlink = REQ_SYMLINK
1678 aio_rename = REQ_RENAME
1679 PPCODE:
1680 {
1681 dREQ;
1682
1683 req->type = ix;
1684 req->sv2 = newSVsv (oldpath);
1685 req->ptr2 = SvPVbyte_nolen (req->sv2);
1686 req->sv1 = newSVsv (newpath);
1687 req->ptr1 = SvPVbyte_nolen (req->sv1);
1688
1689 REQ_SEND;
1690 }
1691
1692 void
1693 aio_mknod (SV8 *pathname, int mode, UV dev, SV *callback=&PL_sv_undef)
1694 PPCODE:
1695 {
1696 dREQ;
1697
1698 req->type = REQ_MKNOD;
1699 req->sv1 = newSVsv (pathname);
1700 req->ptr1 = SvPVbyte_nolen (req->sv1);
1701 req->mode = (mode_t)mode;
1702 req->offs = dev;
1703
1704 REQ_SEND;
1705 }
1706
1707 void
1708 aio_busy (double delay, SV *callback=&PL_sv_undef)
1709 PPCODE:
1710 {
1711 dREQ;
1712
1713 req->type = REQ_BUSY;
1714 req->nv1 = delay < 0. ? 0. : delay;
1715
1716 REQ_SEND;
1717 }
1718
1719 void
1720 aio_group (SV *callback=&PL_sv_undef)
1721 PROTOTYPE: ;$
1722 PPCODE:
1723 {
1724 dREQ;
1725
1726 req->type = REQ_GROUP;
1727
1728 req_send (req);
1729 XPUSHs (req_sv (req, AIO_GRP_KLASS));
1730 }
1731
1732 void
1733 aio_nop (SV *callback=&PL_sv_undef)
1734 PPCODE:
1735 {
1736 dREQ;
1737
1738 req->type = REQ_NOP;
1739
1740 REQ_SEND;
1741 }
1742
1743 int
1744 aioreq_pri (int pri = 0)
1745 PROTOTYPE: ;$
1746 CODE:
1747 RETVAL = next_pri - PRI_BIAS;
1748 if (items > 0)
1749 {
1750 if (pri < PRI_MIN) pri = PRI_MIN;
1751 if (pri > PRI_MAX) pri = PRI_MAX;
1752 next_pri = pri + PRI_BIAS;
1753 }
1754 OUTPUT:
1755 RETVAL
1756
1757 void
1758 aioreq_nice (int nice = 0)
1759 CODE:
1760 nice = next_pri - nice;
1761 if (nice < PRI_MIN) nice = PRI_MIN;
1762 if (nice > PRI_MAX) nice = PRI_MAX;
1763 next_pri = nice + PRI_BIAS;
1764
1765 void
1766 flush ()
1767 PROTOTYPE:
1768 CODE:
1769 while (nreqs)
1770 {
1771 poll_wait ();
1772 poll_cb ();
1773 }
1774
1775 int
1776 poll()
1777 PROTOTYPE:
1778 CODE:
1779 poll_wait ();
1780 RETVAL = poll_cb ();
1781 OUTPUT:
1782 RETVAL
1783
1784 int
1785 poll_fileno()
1786 PROTOTYPE:
1787 CODE:
1788 RETVAL = respipe [0];
1789 OUTPUT:
1790 RETVAL
1791
1792 int
1793 poll_cb(...)
1794 PROTOTYPE:
1795 CODE:
1796 RETVAL = poll_cb ();
1797 OUTPUT:
1798 RETVAL
1799
1800 void
1801 poll_wait()
1802 PROTOTYPE:
1803 CODE:
1804 poll_wait ();
1805
1806 void
1807 setsig (int signum = SIGIO)
1808 PROTOTYPE: ;$
1809 CODE:
1810 {
1811 if (block_sig_level)
1812 croak ("cannot call IO::AIO::setsig from within aio_block/callback");
1813
1814 LOCK (reslock);
1815 main_tid = pthread_self ();
1816 main_sig = signum;
1817 UNLOCK (reslock);
1818
1819 if (main_sig && npending)
1820 pthread_kill (main_tid, main_sig);
1821 }
1822
1823 void
1824 aio_block (SV *cb)
1825 PROTOTYPE: &
1826 PPCODE:
1827 {
1828 int count;
1829
1830 block_sig ();
1831 PUSHMARK (SP);
1832 PUTBACK;
1833 count = call_sv (cb, GIMME_V | G_NOARGS | G_EVAL);
1834 SPAGAIN;
1835 unblock_sig ();
1836
1837 if (SvTRUE (ERRSV))
1838 croak (0);
1839
1840 XSRETURN (count);
1841 }
1842
1843 int
1844 nreqs()
1845 PROTOTYPE:
1846 CODE:
1847 RETVAL = nreqs;
1848 OUTPUT:
1849 RETVAL
1850
1851 int
1852 nready()
1853 PROTOTYPE:
1854 CODE:
1855 RETVAL = get_nready ();
1856 OUTPUT:
1857 RETVAL
1858
1859 int
1860 npending()
1861 PROTOTYPE:
1862 CODE:
1863 RETVAL = get_npending ();
1864 OUTPUT:
1865 RETVAL
1866
1867 int
1868 nthreads()
1869 PROTOTYPE:
1870 CODE:
1871 if (WORDACCESS_UNSAFE) LOCK (wrklock);
1872 RETVAL = started;
1873 if (WORDACCESS_UNSAFE) UNLOCK (wrklock);
1874 OUTPUT:
1875 RETVAL
1876
1877 PROTOTYPES: DISABLE
1878
1879 MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1880
1881 void
1882 cancel (aio_req_ornot req)
1883 CODE:
1884 req_cancel (req);
1885
1886 void
1887 cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1888 CODE:
1889 SvREFCNT_dec (req->callback);
1890 req->callback = newSVsv (callback);
1891
1892 MODULE = IO::AIO PACKAGE = IO::AIO::GRP
1893
1894 void
1895 add (aio_req grp, ...)
1896 PPCODE:
1897 {
1898 int i;
1899 aio_req req;
1900
1901 if (main_sig && !block_sig_level)
1902 croak ("aio_group->add called outside aio_block/callback context while IO::AIO::setsig is in use");
1903
1904 if (grp->int1 == 2)
1905 croak ("cannot add requests to IO::AIO::GRP after the group finished");
1906
1907 for (i = 1; i < items; ++i )
1908 {
1909 if (GIMME_V != G_VOID)
1910 XPUSHs (sv_2mortal (newSVsv (ST (i))));
1911
1912 req = SvAIO_REQ (ST (i));
1913
1914 if (req)
1915 {
1916 ++grp->size;
1917 req->grp = grp;
1918
1919 req->grp_prev = 0;
1920 req->grp_next = grp->grp_first;
1921
1922 if (grp->grp_first)
1923 grp->grp_first->grp_prev = req;
1924
1925 grp->grp_first = req;
1926 }
1927 }
1928 }
1929
1930 void
1931 cancel_subs (aio_req_ornot req)
1932 CODE:
1933 req_cancel_subs (req);
1934
1935 void
1936 result (aio_req grp, ...)
1937 CODE:
1938 {
1939 int i;
1940 AV *av;
1941
1942 grp->errorno = errno;
1943
1944 av = newAV ();
1945
1946 for (i = 1; i < items; ++i )
1947 av_push (av, newSVsv (ST (i)));
1948
1949 SvREFCNT_dec (grp->sv1);
1950 grp->sv1 = (SV *)av;
1951 }
1952
1953 void
1954 errno (aio_req grp, int errorno = errno)
1955 CODE:
1956 grp->errorno = errorno;
1957
1958 void
1959 limit (aio_req grp, int limit)
1960 CODE:
1961 grp->int2 = limit;
1962 aio_grp_feed (grp);
1963
1964 void
1965 feed (aio_req grp, SV *callback=&PL_sv_undef)
1966 CODE:
1967 {
1968 SvREFCNT_dec (grp->sv2);
1969 grp->sv2 = newSVsv (callback);
1970
1971 if (grp->int2 <= 0)
1972 grp->int2 = 2;
1973
1974 aio_grp_feed (grp);
1975 }
1976