ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
Revision: 1.101
Committed: Fri Jun 1 13:25:51 2007 UTC (16 years, 11 months ago) by root
Branch: MAIN
Changes since 1.100: +35 -24 lines
Log Message:
*** empty log message ***

File Contents

# Content
1 #include "xthread.h"
2
3 #include <errno.h>
4
5 #include "EXTERN.h"
6 #include "perl.h"
7 #include "XSUB.h"
8
9 #include "autoconf/config.h"
10
11 #include <stddef.h>
12 #include <stdlib.h>
13 #include <errno.h>
14 #include <sys/time.h>
15 #include <sys/select.h>
16 #include <sys/types.h>
17 #include <sys/stat.h>
18 #include <limits.h>
19 #include <unistd.h>
20 #include <fcntl.h>
21 #include <signal.h>
22 #include <sched.h>
23 #include <utime.h>
24
25 #if HAVE_SENDFILE
26 # if __linux
27 # include <sys/sendfile.h>
28 # elif __freebsd
29 # include <sys/socket.h>
30 # include <sys/uio.h>
31 # elif __hpux
32 # include <sys/socket.h>
33 # elif __solaris /* not yet */
34 # include <sys/sendfile.h>
35 # else
36 # error sendfile support requested but not available
37 # endif
38 #endif
39
40 /* number of seconds after which idle threads exit */
41 #define IDLE_TIMEOUT 10
42
43 /* used for struct dirent, AIX doesn't provide it */
44 #ifndef NAME_MAX
45 # define NAME_MAX 4096
46 #endif
47
48 /* buffer size for various temporary buffers */
49 #define AIO_BUFSIZE 65536
50
51 /* use NV for 32 bit perls as it allows larger offsets */
52 #if IVSIZE >= 8
53 # define SvVAL64 SvIV
54 #else
55 # define SvVAL64 SvNV
56 #endif
57
58 #define dBUF \
59 char *aio_buf; \
60 LOCK (wrklock); \
61 self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \
62 UNLOCK (wrklock); \
63 if (!aio_buf) \
64 return -1;
65
66 typedef SV SV8; /* byte-sv, used for argument-checking */
67
68 enum {
69 REQ_QUIT,
70 REQ_OPEN, REQ_CLOSE,
71 REQ_READ, REQ_WRITE,
72 REQ_READAHEAD, REQ_SENDFILE,
73 REQ_STAT, REQ_LSTAT, REQ_FSTAT,
74 REQ_UTIME, REQ_FUTIME,
75 REQ_CHMOD, REQ_FCHMOD,
76 REQ_CHOWN, REQ_FCHOWN,
77 REQ_FSYNC, REQ_FDATASYNC,
78 REQ_UNLINK, REQ_RMDIR, REQ_MKDIR, REQ_RENAME,
79 REQ_MKNOD, REQ_READDIR,
80 REQ_LINK, REQ_SYMLINK, REQ_READLINK,
81 REQ_GROUP, REQ_NOP,
82 REQ_BUSY,
83 };
84
85 #define AIO_REQ_KLASS "IO::AIO::REQ"
86 #define AIO_GRP_KLASS "IO::AIO::GRP"
87
88 typedef struct aio_cb
89 {
90 struct aio_cb *volatile next;
91
92 SV *callback;
93 SV *sv1, *sv2;
94 void *ptr1, *ptr2;
95 off_t offs;
96 size_t size;
97 ssize_t result;
98 double nv1, nv2;
99
100 STRLEN stroffset;
101 int type;
102 int int1, int2, int3;
103 int errorno;
104 mode_t mode; /* open */
105
106 unsigned char flags;
107 unsigned char pri;
108
109 SV *self; /* the perl counterpart of this request, if any */
110 struct aio_cb *grp, *grp_prev, *grp_next, *grp_first;
111 } aio_cb;
112
113 enum {
114 FLAG_CANCELLED = 0x01, /* request was cancelled */
115 FLAG_SV2_RO_OFF = 0x40, /* data was set readonly */
116 FLAG_PTR2_FREE = 0x80, /* need to free(ptr2) */
117 };
118
119 typedef aio_cb *aio_req;
120 typedef aio_cb *aio_req_ornot;
121
122 enum {
123 PRI_MIN = -4,
124 PRI_MAX = 4,
125
126 DEFAULT_PRI = 0,
127 PRI_BIAS = -PRI_MIN,
128 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
129 };
130
131 #define AIO_TICKS ((1000000 + 1023) >> 10)
132
133 static unsigned int max_poll_time = 0;
134 static unsigned int max_poll_reqs = 0;
135
136 /* calculcate time difference in ~1/AIO_TICKS of a second */
137 static int tvdiff (struct timeval *tv1, struct timeval *tv2)
138 {
139 return (tv2->tv_sec - tv1->tv_sec ) * AIO_TICKS
140 + ((tv2->tv_usec - tv1->tv_usec) >> 10);
141 }
142
143 static thread_t main_tid;
144 static int main_sig;
145 static int block_sig_level;
146
147 void block_sig ()
148 {
149 sigset_t ss;
150
151 if (block_sig_level++)
152 return;
153
154 if (!main_sig)
155 return;
156
157 sigemptyset (&ss);
158 sigaddset (&ss, main_sig);
159 pthread_sigmask (SIG_BLOCK, &ss, 0);
160 }
161
162 void unblock_sig ()
163 {
164 sigset_t ss;
165
166 if (--block_sig_level)
167 return;
168
169 if (!main_sig)
170 return;
171
172 sigemptyset (&ss);
173 sigaddset (&ss, main_sig);
174 pthread_sigmask (SIG_UNBLOCK, &ss, 0);
175 }
176
177 static int next_pri = DEFAULT_PRI + PRI_BIAS;
178
179 static unsigned int started, idle, wanted;
180
181 /* worker threads management */
182 static mutex_t wrklock = MUTEX_INIT;
183
184 typedef struct worker {
185 /* locked by wrklock */
186 struct worker *prev, *next;
187
188 thread_t tid;
189
190 /* locked by reslock, reqlock or wrklock */
191 aio_req req; /* currently processed request */
192 void *dbuf;
193 DIR *dirp;
194 } worker;
195
196 static worker wrk_first = { &wrk_first, &wrk_first, 0 };
197
198 static void worker_clear (worker *wrk)
199 {
200 if (wrk->dirp)
201 {
202 closedir (wrk->dirp);
203 wrk->dirp = 0;
204 }
205
206 if (wrk->dbuf)
207 {
208 free (wrk->dbuf);
209 wrk->dbuf = 0;
210 }
211 }
212
213 static void worker_free (worker *wrk)
214 {
215 wrk->next->prev = wrk->prev;
216 wrk->prev->next = wrk->next;
217
218 free (wrk);
219 }
220
221 static volatile unsigned int nreqs, nready, npending;
222 static volatile unsigned int max_idle = 4;
223 static volatile unsigned int max_outstanding = 0xffffffff;
224 static int respipe [2];
225
226 static mutex_t reslock = MUTEX_INIT;
227 static mutex_t reqlock = MUTEX_INIT;
228 static cond_t reqwait = COND_INIT;
229
230 #if WORDACCESS_UNSAFE
231
232 static unsigned int get_nready ()
233 {
234 unsigned int retval;
235
236 LOCK (reqlock);
237 retval = nready;
238 UNLOCK (reqlock);
239
240 return retval;
241 }
242
243 static unsigned int get_npending ()
244 {
245 unsigned int retval;
246
247 LOCK (reslock);
248 retval = npending;
249 UNLOCK (reslock);
250
251 return retval;
252 }
253
254 static unsigned int get_nthreads ()
255 {
256 unsigned int retval;
257
258 LOCK (wrklock);
259 retval = started;
260 UNLOCK (wrklock);
261
262 return retval;
263 }
264
265 #else
266
267 # define get_nready() nready
268 # define get_npending() npending
269 # define get_nthreads() started
270
271 #endif
272
273 /*
274 * a somewhat faster data structure might be nice, but
275 * with 8 priorities this actually needs <20 insns
276 * per shift, the most expensive operation.
277 */
278 typedef struct {
279 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
280 int size;
281 } reqq;
282
283 static reqq req_queue;
284 static reqq res_queue;
285
286 int reqq_push (reqq *q, aio_req req)
287 {
288 int pri = req->pri;
289 req->next = 0;
290
291 if (q->qe[pri])
292 {
293 q->qe[pri]->next = req;
294 q->qe[pri] = req;
295 }
296 else
297 q->qe[pri] = q->qs[pri] = req;
298
299 return q->size++;
300 }
301
302 aio_req reqq_shift (reqq *q)
303 {
304 int pri;
305
306 if (!q->size)
307 return 0;
308
309 --q->size;
310
311 for (pri = NUM_PRI; pri--; )
312 {
313 aio_req req = q->qs[pri];
314
315 if (req)
316 {
317 if (!(q->qs[pri] = req->next))
318 q->qe[pri] = 0;
319
320 return req;
321 }
322 }
323
324 abort ();
325 }
326
327 static int poll_cb ();
328 static int req_invoke (aio_req req);
329 static void req_destroy (aio_req req);
330 static void req_cancel (aio_req req);
331
332 /* must be called at most once */
333 static SV *req_sv (aio_req req, const char *klass)
334 {
335 if (!req->self)
336 {
337 req->self = (SV *)newHV ();
338 sv_magic (req->self, 0, PERL_MAGIC_ext, (char *)req, 0);
339 }
340
341 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1)));
342 }
343
344 static aio_req SvAIO_REQ (SV *sv)
345 {
346 MAGIC *mg;
347
348 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv))
349 croak ("object of class " AIO_REQ_KLASS " expected");
350
351 mg = mg_find (SvRV (sv), PERL_MAGIC_ext);
352
353 return mg ? (aio_req)mg->mg_ptr : 0;
354 }
355
356 static void aio_grp_feed (aio_req grp)
357 {
358 block_sig ();
359
360 while (grp->size < grp->int2 && !(grp->flags & FLAG_CANCELLED))
361 {
362 int old_len = grp->size;
363
364 if (grp->sv2 && SvOK (grp->sv2))
365 {
366 dSP;
367
368 ENTER;
369 SAVETMPS;
370 PUSHMARK (SP);
371 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
372 PUTBACK;
373 call_sv (grp->sv2, G_VOID | G_EVAL | G_KEEPERR);
374 SPAGAIN;
375 FREETMPS;
376 LEAVE;
377 }
378
379 /* stop if no progress has been made */
380 if (old_len == grp->size)
381 {
382 SvREFCNT_dec (grp->sv2);
383 grp->sv2 = 0;
384 break;
385 }
386 }
387
388 unblock_sig ();
389 }
390
391 static void aio_grp_dec (aio_req grp)
392 {
393 --grp->size;
394
395 /* call feeder, if applicable */
396 aio_grp_feed (grp);
397
398 /* finish, if done */
399 if (!grp->size && grp->int1)
400 {
401 block_sig ();
402
403 if (!req_invoke (grp))
404 {
405 req_destroy (grp);
406 unblock_sig ();
407 croak (0);
408 }
409
410 req_destroy (grp);
411 unblock_sig ();
412 }
413 }
414
415 static int req_invoke (aio_req req)
416 {
417 dSP;
418
419 if (req->flags & FLAG_SV2_RO_OFF)
420 SvREADONLY_off (req->sv2);
421
422 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
423 {
424 ENTER;
425 SAVETMPS;
426 PUSHMARK (SP);
427 EXTEND (SP, 1);
428
429 switch (req->type)
430 {
431 case REQ_READDIR:
432 {
433 SV *rv = &PL_sv_undef;
434
435 if (req->result >= 0)
436 {
437 int i;
438 char *buf = req->ptr2;
439 AV *av = newAV ();
440
441 av_extend (av, req->result - 1);
442
443 for (i = 0; i < req->result; ++i)
444 {
445 SV *sv = newSVpv (buf, 0);
446
447 av_store (av, i, sv);
448 buf += SvCUR (sv) + 1;
449 }
450
451 rv = sv_2mortal (newRV_noinc ((SV *)av));
452 }
453
454 PUSHs (rv);
455 }
456 break;
457
458 case REQ_OPEN:
459 {
460 /* convert fd to fh */
461 SV *fh;
462
463 PUSHs (sv_2mortal (newSViv (req->result)));
464 PUTBACK;
465 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
466 SPAGAIN;
467
468 fh = POPs;
469 PUSHMARK (SP);
470 XPUSHs (fh);
471 }
472 break;
473
474 case REQ_GROUP:
475 req->int1 = 2; /* mark group as finished */
476
477 if (req->sv1)
478 {
479 int i;
480 AV *av = (AV *)req->sv1;
481
482 EXTEND (SP, AvFILL (av) + 1);
483 for (i = 0; i <= AvFILL (av); ++i)
484 PUSHs (*av_fetch (av, i, 0));
485 }
486 break;
487
488 case REQ_NOP:
489 case REQ_BUSY:
490 break;
491
492 case REQ_READLINK:
493 if (req->result > 0)
494 {
495 SvCUR_set (req->sv2, req->result);
496 *SvEND (req->sv2) = 0;
497 PUSHs (req->sv2);
498 }
499 break;
500
501 case REQ_STAT:
502 case REQ_LSTAT:
503 case REQ_FSTAT:
504 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
505 PL_laststatval = req->result;
506 PL_statcache = *(Stat_t *)(req->ptr2);
507 PUSHs (sv_2mortal (newSViv (req->result)));
508 break;
509
510 case REQ_READ:
511 SvCUR_set (req->sv2, req->stroffset + (req->result > 0 ? req->result : 0));
512 *SvEND (req->sv2) = 0;
513 PUSHs (sv_2mortal (newSViv (req->result)));
514 break;
515
516 default:
517 PUSHs (sv_2mortal (newSViv (req->result)));
518 break;
519 }
520
521 errno = req->errorno;
522
523 PUTBACK;
524 call_sv (req->callback, G_VOID | G_EVAL);
525 SPAGAIN;
526
527 FREETMPS;
528 LEAVE;
529 }
530
531 if (req->grp)
532 {
533 aio_req grp = req->grp;
534
535 /* unlink request */
536 if (req->grp_next) req->grp_next->grp_prev = req->grp_prev;
537 if (req->grp_prev) req->grp_prev->grp_next = req->grp_next;
538
539 if (grp->grp_first == req)
540 grp->grp_first = req->grp_next;
541
542 aio_grp_dec (grp);
543 }
544
545 return !SvTRUE (ERRSV);
546 }
547
548 static void req_destroy (aio_req req)
549 {
550 if (req->self)
551 {
552 sv_unmagic (req->self, PERL_MAGIC_ext);
553 SvREFCNT_dec (req->self);
554 }
555
556 SvREFCNT_dec (req->sv1);
557 SvREFCNT_dec (req->sv2);
558 SvREFCNT_dec (req->callback);
559
560 if (req->flags & FLAG_PTR2_FREE)
561 free (req->ptr2);
562
563 Safefree (req);
564 }
565
566 static void req_cancel_subs (aio_req grp)
567 {
568 aio_req sub;
569
570 if (grp->type != REQ_GROUP)
571 return;
572
573 SvREFCNT_dec (grp->sv2);
574 grp->sv2 = 0;
575
576 for (sub = grp->grp_first; sub; sub = sub->grp_next)
577 req_cancel (sub);
578 }
579
580 static void req_cancel (aio_req req)
581 {
582 req->flags |= FLAG_CANCELLED;
583
584 req_cancel_subs (req);
585 }
586
587 static void *aio_proc (void *arg);
588
589 static void start_thread (void)
590 {
591 worker *wrk = calloc (1, sizeof (worker));
592
593 if (!wrk)
594 croak ("unable to allocate worker thread data");
595
596 LOCK (wrklock);
597
598 if (thread_create (&wrk->tid, aio_proc, (void *)wrk))
599 {
600 wrk->prev = &wrk_first;
601 wrk->next = wrk_first.next;
602 wrk_first.next->prev = wrk;
603 wrk_first.next = wrk;
604 ++started;
605 }
606 else
607 free (wrk);
608
609 UNLOCK (wrklock);
610 }
611
612 static void maybe_start_thread ()
613 {
614 if (get_nthreads () >= wanted)
615 return;
616
617 /* todo: maybe use idle here, but might be less exact */
618 if (0 <= (int)get_nthreads () + (int)get_npending () - (int)nreqs)
619 return;
620
621 start_thread ();
622 }
623
624 static void req_send (aio_req req)
625 {
626 block_sig ();
627
628 ++nreqs;
629
630 LOCK (reqlock);
631 ++nready;
632 reqq_push (&req_queue, req);
633 COND_SIGNAL (reqwait);
634 UNLOCK (reqlock);
635
636 unblock_sig ();
637
638 maybe_start_thread ();
639 }
640
641 static void end_thread (void)
642 {
643 aio_req req;
644
645 Newz (0, req, 1, aio_cb);
646
647 req->type = REQ_QUIT;
648 req->pri = PRI_MAX + PRI_BIAS;
649
650 LOCK (reqlock);
651 reqq_push (&req_queue, req);
652 COND_SIGNAL (reqwait);
653 UNLOCK (reqlock);
654
655 LOCK (wrklock);
656 --started;
657 UNLOCK (wrklock);
658 }
659
660 static void set_max_idle (int nthreads)
661 {
662 if (WORDACCESS_UNSAFE) LOCK (reqlock);
663 max_idle = nthreads <= 0 ? 1 : nthreads;
664 if (WORDACCESS_UNSAFE) UNLOCK (reqlock);
665 }
666
667 static void min_parallel (int nthreads)
668 {
669 if (wanted < nthreads)
670 wanted = nthreads;
671 }
672
673 static void max_parallel (int nthreads)
674 {
675 if (wanted > nthreads)
676 wanted = nthreads;
677
678 while (started > wanted)
679 end_thread ();
680 }
681
682 static void poll_wait ()
683 {
684 fd_set rfd;
685
686 while (nreqs)
687 {
688 int size;
689 if (WORDACCESS_UNSAFE) LOCK (reslock);
690 size = res_queue.size;
691 if (WORDACCESS_UNSAFE) UNLOCK (reslock);
692
693 if (size)
694 return;
695
696 maybe_start_thread ();
697
698 FD_ZERO(&rfd);
699 FD_SET(respipe [0], &rfd);
700
701 select (respipe [0] + 1, &rfd, 0, 0, 0);
702 }
703 }
704
705 static int poll_cb ()
706 {
707 dSP;
708 int count = 0;
709 int maxreqs = max_poll_reqs;
710 int do_croak = 0;
711 struct timeval tv_start, tv_now;
712 aio_req req;
713
714 if (max_poll_time)
715 gettimeofday (&tv_start, 0);
716
717 block_sig ();
718
719 for (;;)
720 {
721 for (;;)
722 {
723 maybe_start_thread ();
724
725 LOCK (reslock);
726 req = reqq_shift (&res_queue);
727
728 if (req)
729 {
730 --npending;
731
732 if (!res_queue.size)
733 {
734 /* read any signals sent by the worker threads */
735 char buf [4];
736 while (read (respipe [0], buf, 4) == 4)
737 ;
738 }
739 }
740
741 UNLOCK (reslock);
742
743 if (!req)
744 break;
745
746 --nreqs;
747
748 if (req->type == REQ_GROUP && req->size)
749 {
750 req->int1 = 1; /* mark request as delayed */
751 continue;
752 }
753 else
754 {
755 if (!req_invoke (req))
756 {
757 req_destroy (req);
758 unblock_sig ();
759 croak (0);
760 }
761
762 count++;
763 }
764
765 req_destroy (req);
766
767 if (maxreqs && !--maxreqs)
768 break;
769
770 if (max_poll_time)
771 {
772 gettimeofday (&tv_now, 0);
773
774 if (tvdiff (&tv_start, &tv_now) >= max_poll_time)
775 break;
776 }
777 }
778
779 if (nreqs <= max_outstanding)
780 break;
781
782 poll_wait ();
783
784 ++maxreqs;
785 }
786
787 unblock_sig ();
788 return count;
789 }
790
791 static void create_pipe ()
792 {
793 if (pipe (respipe))
794 croak ("unable to initialize result pipe");
795
796 if (fcntl (respipe [0], F_SETFL, O_NONBLOCK))
797 croak ("cannot set result pipe to nonblocking mode");
798
799 if (fcntl (respipe [1], F_SETFL, O_NONBLOCK))
800 croak ("cannot set result pipe to nonblocking mode");
801 }
802
803 /*****************************************************************************/
804 /* work around various missing functions */
805
806 #if !HAVE_PREADWRITE
807 # define pread aio_pread
808 # define pwrite aio_pwrite
809
810 /*
811 * make our pread/pwrite safe against themselves, but not against
812 * normal read/write by using a mutex. slows down execution a lot,
813 * but that's your problem, not mine.
814 */
815 static mutex_t preadwritelock = MUTEX_INIT;
816
817 static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
818 {
819 ssize_t res;
820 off_t ooffset;
821
822 LOCK (preadwritelock);
823 ooffset = lseek (fd, 0, SEEK_CUR);
824 lseek (fd, offset, SEEK_SET);
825 res = read (fd, buf, count);
826 lseek (fd, ooffset, SEEK_SET);
827 UNLOCK (preadwritelock);
828
829 return res;
830 }
831
832 static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
833 {
834 ssize_t res;
835 off_t ooffset;
836
837 LOCK (preadwritelock);
838 ooffset = lseek (fd, 0, SEEK_CUR);
839 lseek (fd, offset, SEEK_SET);
840 res = write (fd, buf, count);
841 lseek (fd, offset, SEEK_SET);
842 UNLOCK (preadwritelock);
843
844 return res;
845 }
846 #endif
847
848 #ifndef HAVE_FUTIMES
849
850 # define utimes(path,times) aio_utimes (path, times)
851 # define futimes(fd,times) aio_futimes (fd, times)
852
853 int aio_utimes (const char *filename, const struct timeval times[2])
854 {
855 if (times)
856 {
857 struct utimbuf buf;
858
859 buf.actime = times[0].tv_sec;
860 buf.modtime = times[1].tv_sec;
861
862 return utime (filename, &buf);
863 }
864 else
865 return utime (filename, 0);
866 }
867
868 int aio_futimes (int fd, const struct timeval tv[2])
869 {
870 errno = ENOSYS;
871 return -1;
872 }
873
874 #endif
875
876 #if !HAVE_FDATASYNC
877 # define fdatasync fsync
878 #endif
879
880 #if !HAVE_READAHEAD
881 # define readahead(fd,offset,count) aio_readahead (fd, offset, count, self)
882
883 static ssize_t aio_readahead (int fd, off_t offset, size_t count, worker *self)
884 {
885 dBUF;
886
887 while (count > 0)
888 {
889 size_t len = count < AIO_BUFSIZE ? count : AIO_BUFSIZE;
890
891 pread (fd, aio_buf, len, offset);
892 offset += len;
893 count -= len;
894 }
895
896 errno = 0;
897 }
898
899 #endif
900
901 #if !HAVE_READDIR_R
902 # define readdir_r aio_readdir_r
903
904 static mutex_t readdirlock = MUTEX_INIT;
905
906 static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res)
907 {
908 struct dirent *e;
909 int errorno;
910
911 LOCK (readdirlock);
912
913 e = readdir (dirp);
914 errorno = errno;
915
916 if (e)
917 {
918 *res = ent;
919 strcpy (ent->d_name, e->d_name);
920 }
921 else
922 *res = 0;
923
924 UNLOCK (readdirlock);
925
926 errno = errorno;
927 return e ? 0 : -1;
928 }
929 #endif
930
931 /* sendfile always needs emulation */
932 static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self)
933 {
934 ssize_t res;
935
936 if (!count)
937 return 0;
938
939 #if HAVE_SENDFILE
940 # if __linux
941 res = sendfile (ofd, ifd, &offset, count);
942
943 # elif __freebsd
944 /*
945 * Of course, the freebsd sendfile is a dire hack with no thoughts
946 * wasted on making it similar to other I/O functions.
947 */
948 {
949 off_t sbytes;
950 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0);
951
952 if (res < 0 && sbytes)
953 /* maybe only on EAGAIN: as usual, the manpage leaves you guessing */
954 res = sbytes;
955 }
956
957 # elif __hpux
958 res = sendfile (ofd, ifd, offset, count, 0, 0);
959
960 # elif __solaris
961 {
962 struct sendfilevec vec;
963 size_t sbytes;
964
965 vec.sfv_fd = ifd;
966 vec.sfv_flag = 0;
967 vec.sfv_off = offset;
968 vec.sfv_len = count;
969
970 res = sendfilev (ofd, &vec, 1, &sbytes);
971
972 if (res < 0 && sbytes)
973 res = sbytes;
974 }
975
976 # endif
977 #else
978 res = -1;
979 errno = ENOSYS;
980 #endif
981
982 if (res < 0
983 && (errno == ENOSYS || errno == EINVAL || errno == ENOTSOCK
984 #if __solaris
985 || errno == EAFNOSUPPORT || errno == EPROTOTYPE
986 #endif
987 )
988 )
989 {
990 /* emulate sendfile. this is a major pain in the ass */
991 dBUF;
992
993 res = 0;
994
995 while (count)
996 {
997 ssize_t cnt;
998
999 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
1000
1001 if (cnt <= 0)
1002 {
1003 if (cnt && !res) res = -1;
1004 break;
1005 }
1006
1007 cnt = write (ofd, aio_buf, cnt);
1008
1009 if (cnt <= 0)
1010 {
1011 if (cnt && !res) res = -1;
1012 break;
1013 }
1014
1015 offset += cnt;
1016 res += cnt;
1017 count -= cnt;
1018 }
1019 }
1020
1021 return res;
1022 }
1023
1024 /* read a full directory */
1025 static void scandir_ (aio_req req, worker *self)
1026 {
1027 DIR *dirp;
1028 union
1029 {
1030 struct dirent d;
1031 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1];
1032 } *u;
1033 struct dirent *entp;
1034 char *name, *names;
1035 int memlen = 4096;
1036 int memofs = 0;
1037 int res = 0;
1038 int errorno;
1039
1040 LOCK (wrklock);
1041 self->dirp = dirp = opendir (req->ptr1);
1042 self->dbuf = u = malloc (sizeof (*u));
1043 req->flags |= FLAG_PTR2_FREE;
1044 req->ptr2 = names = malloc (memlen);
1045 UNLOCK (wrklock);
1046
1047 if (dirp && u && names)
1048 for (;;)
1049 {
1050 errno = 0;
1051 readdir_r (dirp, &u->d, &entp);
1052
1053 if (!entp)
1054 break;
1055
1056 name = entp->d_name;
1057
1058 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
1059 {
1060 int len = strlen (name) + 1;
1061
1062 res++;
1063
1064 while (memofs + len > memlen)
1065 {
1066 memlen *= 2;
1067 LOCK (wrklock);
1068 req->ptr2 = names = realloc (names, memlen);
1069 UNLOCK (wrklock);
1070
1071 if (!names)
1072 break;
1073 }
1074
1075 memcpy (names + memofs, name, len);
1076 memofs += len;
1077 }
1078 }
1079
1080 if (errno)
1081 res = -1;
1082
1083 req->result = res;
1084 }
1085
1086 /*****************************************************************************/
1087
1088 static void *aio_proc (void *thr_arg)
1089 {
1090 aio_req req;
1091 struct timespec ts;
1092 worker *self = (worker *)thr_arg;
1093
1094 /* try to distribute timeouts somewhat evenly */
1095 ts.tv_nsec = (((unsigned long)self + (unsigned long)ts.tv_sec) & 1023UL)
1096 * (1000000000UL / 1024UL);
1097
1098 for (;;)
1099 {
1100 ts.tv_sec = time (0) + IDLE_TIMEOUT;
1101
1102 LOCK (reqlock);
1103
1104 for (;;)
1105 {
1106 self->req = req = reqq_shift (&req_queue);
1107
1108 if (req)
1109 break;
1110
1111 ++idle;
1112
1113 if (COND_TIMEDWAIT (reqwait, reqlock, ts)
1114 == ETIMEDOUT)
1115 {
1116 if (idle > max_idle)
1117 {
1118 --idle;
1119 UNLOCK (reqlock);
1120 LOCK (wrklock);
1121 --started;
1122 UNLOCK (wrklock);
1123 goto quit;
1124 }
1125
1126 /* we are allowed to idle, so do so without any timeout */
1127 COND_WAIT (reqwait, reqlock);
1128 ts.tv_sec = time (0) + IDLE_TIMEOUT;
1129 }
1130
1131 --idle;
1132 }
1133
1134 --nready;
1135
1136 UNLOCK (reqlock);
1137
1138 errno = 0; /* strictly unnecessary */
1139
1140 if (!(req->flags & FLAG_CANCELLED))
1141 switch (req->type)
1142 {
1143 case REQ_READ: req->result = req->offs >= 0
1144 ? pread (req->int1, req->ptr1, req->size, req->offs)
1145 : read (req->int1, req->ptr1, req->size); break;
1146 case REQ_WRITE: req->result = req->offs >= 0
1147 ? pwrite (req->int1, req->ptr1, req->size, req->offs)
1148 : write (req->int1, req->ptr1, req->size); break;
1149
1150 case REQ_READAHEAD: req->result = readahead (req->int1, req->offs, req->size); break;
1151 case REQ_SENDFILE: req->result = sendfile_ (req->int1, req->int2, req->offs, req->size, self); break;
1152
1153 case REQ_STAT: req->result = stat (req->ptr1, (Stat_t *)req->ptr2); break;
1154 case REQ_LSTAT: req->result = lstat (req->ptr1, (Stat_t *)req->ptr2); break;
1155 case REQ_FSTAT: req->result = fstat (req->int1, (Stat_t *)req->ptr2); break;
1156
1157 case REQ_CHOWN: req->result = chown (req->ptr1, req->int2, req->int3); break;
1158 case REQ_FCHOWN: req->result = fchown (req->int1, req->int2, req->int3); break;
1159 case REQ_CHMOD: req->result = chmod (req->ptr1, req->mode); break;
1160 case REQ_FCHMOD: req->result = fchmod (req->int1, req->mode); break;
1161
1162 case REQ_OPEN: req->result = open (req->ptr1, req->int1, req->mode); break;
1163 case REQ_CLOSE: req->result = close (req->int1); break;
1164 case REQ_UNLINK: req->result = unlink (req->ptr1); break;
1165 case REQ_RMDIR: req->result = rmdir (req->ptr1); break;
1166 case REQ_MKDIR: req->result = mkdir (req->ptr1, req->mode); break;
1167 case REQ_RENAME: req->result = rename (req->ptr2, req->ptr1); break;
1168 case REQ_LINK: req->result = link (req->ptr2, req->ptr1); break;
1169 case REQ_SYMLINK: req->result = symlink (req->ptr2, req->ptr1); break;
1170 case REQ_MKNOD: req->result = mknod (req->ptr2, req->mode, (dev_t)req->offs); break;
1171 case REQ_READLINK: req->result = readlink (req->ptr2, req->ptr1, NAME_MAX); break;
1172
1173 case REQ_FDATASYNC: req->result = fdatasync (req->int1); break;
1174 case REQ_FSYNC: req->result = fsync (req->int1); break;
1175 case REQ_READDIR: scandir_ (req, self); break;
1176
1177 case REQ_BUSY:
1178 {
1179 struct timeval tv;
1180
1181 tv.tv_sec = req->nv1;
1182 tv.tv_usec = (req->nv1 - tv.tv_usec) * 1000000.;
1183
1184 req->result = select (0, 0, 0, 0, &tv);
1185 }
1186
1187 case REQ_UTIME:
1188 case REQ_FUTIME:
1189 {
1190 struct timeval tv[2];
1191 struct timeval *times;
1192
1193 if (req->nv1 != -1. || req->nv2 != -1.)
1194 {
1195 tv[0].tv_sec = req->nv1;
1196 tv[0].tv_usec = (req->nv1 - tv[0].tv_sec) * 1000000.;
1197 tv[1].tv_sec = req->nv2;
1198 tv[1].tv_usec = (req->nv2 - tv[1].tv_sec) * 1000000.;
1199
1200 times = tv;
1201 }
1202 else
1203 times = 0;
1204
1205
1206 req->result = req->type == REQ_FUTIME
1207 ? futimes (req->int1, times)
1208 : utimes (req->ptr1, times);
1209 }
1210
1211 case REQ_GROUP:
1212 case REQ_NOP:
1213 break;
1214
1215 case REQ_QUIT:
1216 goto quit;
1217
1218 default:
1219 req->result = -1;
1220 break;
1221 }
1222
1223 req->errorno = errno;
1224
1225 LOCK (reslock);
1226
1227 ++npending;
1228
1229 if (!reqq_push (&res_queue, req))
1230 {
1231 /* write a dummy byte to the pipe so fh becomes ready */
1232 write (respipe [1], &respipe, 1);
1233
1234 /* optionally signal the main thread asynchronously */
1235 if (main_sig)
1236 pthread_kill (main_tid, main_sig);
1237 }
1238
1239 self->req = 0;
1240 worker_clear (self);
1241
1242 UNLOCK (reslock);
1243 }
1244
1245 quit:
1246 LOCK (wrklock);
1247 worker_free (self);
1248 UNLOCK (wrklock);
1249
1250 return 0;
1251 }
1252
1253 /*****************************************************************************/
1254
1255 static void atfork_prepare (void)
1256 {
1257 LOCK (wrklock);
1258 LOCK (reqlock);
1259 LOCK (reslock);
1260 #if !HAVE_PREADWRITE
1261 LOCK (preadwritelock);
1262 #endif
1263 #if !HAVE_READDIR_R
1264 LOCK (readdirlock);
1265 #endif
1266 }
1267
1268 static void atfork_parent (void)
1269 {
1270 #if !HAVE_READDIR_R
1271 UNLOCK (readdirlock);
1272 #endif
1273 #if !HAVE_PREADWRITE
1274 UNLOCK (preadwritelock);
1275 #endif
1276 UNLOCK (reslock);
1277 UNLOCK (reqlock);
1278 UNLOCK (wrklock);
1279 }
1280
1281 static void atfork_child (void)
1282 {
1283 aio_req prv;
1284
1285 while (prv = reqq_shift (&req_queue))
1286 req_destroy (prv);
1287
1288 while (prv = reqq_shift (&res_queue))
1289 req_destroy (prv);
1290
1291 while (wrk_first.next != &wrk_first)
1292 {
1293 worker *wrk = wrk_first.next;
1294
1295 if (wrk->req)
1296 req_destroy (wrk->req);
1297
1298 worker_clear (wrk);
1299 worker_free (wrk);
1300 }
1301
1302 started = 0;
1303 idle = 0;
1304 nreqs = 0;
1305 nready = 0;
1306 npending = 0;
1307
1308 close (respipe [0]);
1309 close (respipe [1]);
1310 create_pipe ();
1311
1312 atfork_parent ();
1313 }
1314
1315 #define dREQ \
1316 aio_req req; \
1317 int req_pri = next_pri; \
1318 next_pri = DEFAULT_PRI + PRI_BIAS; \
1319 \
1320 if (SvOK (callback) && !SvROK (callback)) \
1321 croak ("callback must be undef or of reference type"); \
1322 \
1323 Newz (0, req, 1, aio_cb); \
1324 if (!req) \
1325 croak ("out of memory during aio_req allocation"); \
1326 \
1327 req->callback = newSVsv (callback); \
1328 req->pri = req_pri
1329
1330 #define REQ_SEND \
1331 req_send (req); \
1332 \
1333 if (GIMME_V != G_VOID) \
1334 XPUSHs (req_sv (req, AIO_REQ_KLASS));
1335
1336 MODULE = IO::AIO PACKAGE = IO::AIO
1337
1338 PROTOTYPES: ENABLE
1339
1340 BOOT:
1341 {
1342 HV *stash = gv_stashpv ("IO::AIO", 1);
1343
1344 newCONSTSUB (stash, "EXDEV", newSViv (EXDEV));
1345 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY));
1346 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY));
1347 newCONSTSUB (stash, "O_CREAT", newSViv (O_CREAT));
1348 newCONSTSUB (stash, "O_TRUNC", newSViv (O_TRUNC));
1349 newCONSTSUB (stash, "S_IFIFO", newSViv (S_IFIFO));
1350 newCONSTSUB (stash, "SIGIO", newSViv (SIGIO));
1351
1352 create_pipe ();
1353 ATFORK (atfork_prepare, atfork_parent, atfork_child);
1354 }
1355
1356 void
1357 max_poll_reqs (int nreqs)
1358 PROTOTYPE: $
1359 CODE:
1360 max_poll_reqs = nreqs;
1361
1362 void
1363 max_poll_time (double nseconds)
1364 PROTOTYPE: $
1365 CODE:
1366 max_poll_time = nseconds * AIO_TICKS;
1367
1368 void
1369 min_parallel (int nthreads)
1370 PROTOTYPE: $
1371
1372 void
1373 max_parallel (int nthreads)
1374 PROTOTYPE: $
1375
1376 void
1377 max_idle (int nthreads)
1378 PROTOTYPE: $
1379 CODE:
1380 set_max_idle (nthreads);
1381
1382 int
1383 max_outstanding (int maxreqs)
1384 PROTOTYPE: $
1385 CODE:
1386 RETVAL = max_outstanding;
1387 max_outstanding = maxreqs;
1388 OUTPUT:
1389 RETVAL
1390
1391 void
1392 aio_open (SV8 *pathname, int flags, int mode, SV *callback=&PL_sv_undef)
1393 PROTOTYPE: $$$;$
1394 PPCODE:
1395 {
1396 dREQ;
1397
1398 req->type = REQ_OPEN;
1399 req->sv1 = newSVsv (pathname);
1400 req->ptr1 = SvPVbyte_nolen (req->sv1);
1401 req->int1 = flags;
1402 req->mode = mode;
1403
1404 REQ_SEND;
1405 }
1406
1407 void
1408 aio_close (SV *fh, SV *callback=&PL_sv_undef)
1409 PROTOTYPE: $;$
1410 ALIAS:
1411 aio_close = REQ_CLOSE
1412 aio_fsync = REQ_FSYNC
1413 aio_fdatasync = REQ_FDATASYNC
1414 PPCODE:
1415 {
1416 dREQ;
1417
1418 req->type = ix;
1419 req->sv1 = newSVsv (fh);
1420 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh)));
1421
1422 REQ_SEND (req);
1423 }
1424
1425 void
1426 aio_read (SV *fh, SV *offset, UV length, SV8 *data, UV dataoffset, SV *callback=&PL_sv_undef)
1427 ALIAS:
1428 aio_read = REQ_READ
1429 aio_write = REQ_WRITE
1430 PROTOTYPE: $$$$$;$
1431 PPCODE:
1432 {
1433 STRLEN svlen;
1434 char *svptr = SvPVbyte (data, svlen);
1435
1436 SvUPGRADE (data, SVt_PV);
1437 SvPOK_on (data);
1438
1439 if (dataoffset < 0)
1440 dataoffset += svlen;
1441
1442 if (dataoffset < 0 || dataoffset > svlen)
1443 croak ("data offset outside of string");
1444
1445 if (ix == REQ_WRITE)
1446 {
1447 /* write: check length and adjust. */
1448 if (length < 0 || length + dataoffset > svlen)
1449 length = svlen - dataoffset;
1450 }
1451 else
1452 {
1453 /* read: grow scalar as necessary */
1454 svptr = SvGROW (data, length + dataoffset + 1);
1455 }
1456
1457 if (length < 0)
1458 croak ("length must not be negative");
1459
1460 {
1461 dREQ;
1462
1463 req->type = ix;
1464 req->sv1 = newSVsv (fh);
1465 req->int1 = PerlIO_fileno (ix == REQ_READ ? IoIFP (sv_2io (fh))
1466 : IoOFP (sv_2io (fh)));
1467 req->offs = SvOK (offset) ? SvVAL64 (offset) : -1;
1468 req->size = length;
1469 req->sv2 = SvREFCNT_inc (data);
1470 req->ptr1 = (char *)svptr + dataoffset;
1471 req->stroffset = dataoffset;
1472
1473 if (!SvREADONLY (data))
1474 {
1475 SvREADONLY_on (data);
1476 req->flags |= FLAG_SV2_RO_OFF;
1477 }
1478
1479 REQ_SEND;
1480 }
1481 }
1482
1483 void
1484 aio_readlink (SV8 *path, SV *callback=&PL_sv_undef)
1485 PROTOTYPE: $$;$
1486 PPCODE:
1487 {
1488 SV *data;
1489 dREQ;
1490
1491 data = newSV (NAME_MAX);
1492 SvPOK_on (data);
1493
1494 req->type = REQ_READLINK;
1495 req->sv1 = newSVsv (path);
1496 req->ptr2 = SvPVbyte_nolen (req->sv1);
1497 req->sv2 = data;
1498 req->ptr1 = SvPVbyte_nolen (data);
1499
1500 REQ_SEND;
1501 }
1502
1503 void
1504 aio_sendfile (SV *out_fh, SV *in_fh, SV *in_offset, UV length, SV *callback=&PL_sv_undef)
1505 PROTOTYPE: $$$$;$
1506 PPCODE:
1507 {
1508 dREQ;
1509
1510 req->type = REQ_SENDFILE;
1511 req->sv1 = newSVsv (out_fh);
1512 req->int1 = PerlIO_fileno (IoIFP (sv_2io (out_fh)));
1513 req->sv2 = newSVsv (in_fh);
1514 req->int2 = PerlIO_fileno (IoIFP (sv_2io (in_fh)));
1515 req->offs = SvVAL64 (in_offset);
1516 req->size = length;
1517
1518 REQ_SEND;
1519 }
1520
1521 void
1522 aio_readahead (SV *fh, SV *offset, IV length, SV *callback=&PL_sv_undef)
1523 PROTOTYPE: $$$;$
1524 PPCODE:
1525 {
1526 dREQ;
1527
1528 req->type = REQ_READAHEAD;
1529 req->sv1 = newSVsv (fh);
1530 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh)));
1531 req->offs = SvVAL64 (offset);
1532 req->size = length;
1533
1534 REQ_SEND;
1535 }
1536
1537 void
1538 aio_stat (SV8 *fh_or_path, SV *callback=&PL_sv_undef)
1539 ALIAS:
1540 aio_stat = REQ_STAT
1541 aio_lstat = REQ_LSTAT
1542 PPCODE:
1543 {
1544 dREQ;
1545
1546 req->ptr2 = malloc (sizeof (Stat_t));
1547 if (!req->ptr2)
1548 {
1549 req_destroy (req);
1550 croak ("out of memory during aio_stat statdata allocation");
1551 }
1552
1553 req->flags |= FLAG_PTR2_FREE;
1554 req->sv1 = newSVsv (fh_or_path);
1555
1556 if (SvPOK (fh_or_path))
1557 {
1558 req->type = ix;
1559 req->ptr1 = SvPVbyte_nolen (req->sv1);
1560 }
1561 else
1562 {
1563 req->type = REQ_FSTAT;
1564 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh_or_path)));
1565 }
1566
1567 REQ_SEND;
1568 }
1569
1570 void
1571 aio_utime (SV8 *fh_or_path, SV *atime, SV *mtime, SV *callback=&PL_sv_undef)
1572 PPCODE:
1573 {
1574 dREQ;
1575
1576 req->nv1 = SvOK (atime) ? SvNV (atime) : -1.;
1577 req->nv2 = SvOK (mtime) ? SvNV (mtime) : -1.;
1578 req->sv1 = newSVsv (fh_or_path);
1579
1580 if (SvPOK (fh_or_path))
1581 {
1582 req->type = REQ_UTIME;
1583 req->ptr1 = SvPVbyte_nolen (req->sv1);
1584 }
1585 else
1586 {
1587 req->type = REQ_FUTIME;
1588 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh_or_path)));
1589 }
1590
1591 REQ_SEND;
1592 }
1593
1594 void
1595 aio_chmod (SV8 *fh_or_path, int mode, SV *callback=&PL_sv_undef)
1596 PPCODE:
1597 {
1598 dREQ;
1599
1600 req->mode = mode;
1601 req->sv1 = newSVsv (fh_or_path);
1602
1603 if (SvPOK (fh_or_path))
1604 {
1605 req->type = REQ_CHMOD;
1606 req->ptr1 = SvPVbyte_nolen (req->sv1);
1607 }
1608 else
1609 {
1610 req->type = REQ_FCHMOD;
1611 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh_or_path)));
1612 }
1613
1614 REQ_SEND;
1615 }
1616
1617 void
1618 aio_chown (SV8 *fh_or_path, SV *uid, SV *gid, SV *callback=&PL_sv_undef)
1619 PPCODE:
1620 {
1621 dREQ;
1622
1623 req->int2 = SvOK (uid) ? SvIV (uid) : -1;
1624 req->int3 = SvOK (gid) ? SvIV (gid) : -1;
1625 req->sv1 = newSVsv (fh_or_path);
1626
1627 if (SvPOK (fh_or_path))
1628 {
1629 req->type = REQ_CHOWN;
1630 req->ptr1 = SvPVbyte_nolen (req->sv1);
1631 }
1632 else
1633 {
1634 req->type = REQ_FCHOWN;
1635 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh_or_path)));
1636 }
1637
1638 REQ_SEND;
1639 }
1640
1641 void
1642 aio_unlink (SV8 *pathname, SV *callback=&PL_sv_undef)
1643 ALIAS:
1644 aio_unlink = REQ_UNLINK
1645 aio_rmdir = REQ_RMDIR
1646 aio_readdir = REQ_READDIR
1647 PPCODE:
1648 {
1649 dREQ;
1650
1651 req->type = ix;
1652 req->sv1 = newSVsv (pathname);
1653 req->ptr1 = SvPVbyte_nolen (req->sv1);
1654
1655 REQ_SEND;
1656 }
1657
1658 void
1659 aio_mkdir (SV8 *pathname, int mode, SV *callback=&PL_sv_undef)
1660 PPCODE:
1661 {
1662 dREQ;
1663
1664 req->type = REQ_MKDIR;
1665 req->sv1 = newSVsv (pathname);
1666 req->ptr1 = SvPVbyte_nolen (req->sv1);
1667 req->mode = mode;
1668
1669 REQ_SEND;
1670 }
1671
1672 void
1673 aio_link (SV8 *oldpath, SV8 *newpath, SV *callback=&PL_sv_undef)
1674 ALIAS:
1675 aio_link = REQ_LINK
1676 aio_symlink = REQ_SYMLINK
1677 aio_rename = REQ_RENAME
1678 PPCODE:
1679 {
1680 dREQ;
1681
1682 req->type = ix;
1683 req->sv2 = newSVsv (oldpath);
1684 req->ptr2 = SvPVbyte_nolen (req->sv2);
1685 req->sv1 = newSVsv (newpath);
1686 req->ptr1 = SvPVbyte_nolen (req->sv1);
1687
1688 REQ_SEND;
1689 }
1690
1691 void
1692 aio_mknod (SV8 *pathname, int mode, UV dev, SV *callback=&PL_sv_undef)
1693 PPCODE:
1694 {
1695 dREQ;
1696
1697 req->type = REQ_MKNOD;
1698 req->sv1 = newSVsv (pathname);
1699 req->ptr1 = SvPVbyte_nolen (req->sv1);
1700 req->mode = (mode_t)mode;
1701 req->offs = dev;
1702
1703 REQ_SEND;
1704 }
1705
1706 void
1707 aio_busy (double delay, SV *callback=&PL_sv_undef)
1708 PPCODE:
1709 {
1710 dREQ;
1711
1712 req->type = REQ_BUSY;
1713 req->nv1 = delay < 0. ? 0. : delay;
1714
1715 REQ_SEND;
1716 }
1717
1718 void
1719 aio_group (SV *callback=&PL_sv_undef)
1720 PROTOTYPE: ;$
1721 PPCODE:
1722 {
1723 dREQ;
1724
1725 req->type = REQ_GROUP;
1726
1727 req_send (req);
1728 XPUSHs (req_sv (req, AIO_GRP_KLASS));
1729 }
1730
1731 void
1732 aio_nop (SV *callback=&PL_sv_undef)
1733 PPCODE:
1734 {
1735 dREQ;
1736
1737 req->type = REQ_NOP;
1738
1739 REQ_SEND;
1740 }
1741
1742 int
1743 aioreq_pri (int pri = 0)
1744 PROTOTYPE: ;$
1745 CODE:
1746 RETVAL = next_pri - PRI_BIAS;
1747 if (items > 0)
1748 {
1749 if (pri < PRI_MIN) pri = PRI_MIN;
1750 if (pri > PRI_MAX) pri = PRI_MAX;
1751 next_pri = pri + PRI_BIAS;
1752 }
1753 OUTPUT:
1754 RETVAL
1755
1756 void
1757 aioreq_nice (int nice = 0)
1758 CODE:
1759 nice = next_pri - nice;
1760 if (nice < PRI_MIN) nice = PRI_MIN;
1761 if (nice > PRI_MAX) nice = PRI_MAX;
1762 next_pri = nice + PRI_BIAS;
1763
1764 void
1765 flush ()
1766 PROTOTYPE:
1767 CODE:
1768 while (nreqs)
1769 {
1770 poll_wait ();
1771 poll_cb ();
1772 }
1773
1774 int
1775 poll()
1776 PROTOTYPE:
1777 CODE:
1778 poll_wait ();
1779 RETVAL = poll_cb ();
1780 OUTPUT:
1781 RETVAL
1782
1783 int
1784 poll_fileno()
1785 PROTOTYPE:
1786 CODE:
1787 RETVAL = respipe [0];
1788 OUTPUT:
1789 RETVAL
1790
1791 int
1792 poll_cb(...)
1793 PROTOTYPE:
1794 CODE:
1795 RETVAL = poll_cb ();
1796 OUTPUT:
1797 RETVAL
1798
1799 void
1800 poll_wait()
1801 PROTOTYPE:
1802 CODE:
1803 poll_wait ();
1804
1805 void
1806 setsig (int signum = SIGIO)
1807 PROTOTYPE: ;$
1808 CODE:
1809 {
1810 if (block_sig_level)
1811 croak ("cannot call IO::AIO::setsig from within aio_block/callback");
1812
1813 LOCK (reslock);
1814 main_tid = pthread_self ();
1815 main_sig = signum;
1816 UNLOCK (reslock);
1817
1818 if (main_sig && npending)
1819 pthread_kill (main_tid, main_sig);
1820 }
1821
1822 void
1823 aio_block (SV *cb)
1824 PROTOTYPE: &
1825 PPCODE:
1826 {
1827 int count;
1828
1829 block_sig ();
1830 PUSHMARK (SP);
1831 PUTBACK;
1832 count = call_sv (cb, GIMME_V | G_NOARGS | G_EVAL);
1833 SPAGAIN;
1834 unblock_sig ();
1835
1836 if (SvTRUE (ERRSV))
1837 croak (0);
1838
1839 XSRETURN (count);
1840 }
1841
1842 int
1843 nreqs()
1844 PROTOTYPE:
1845 CODE:
1846 RETVAL = nreqs;
1847 OUTPUT:
1848 RETVAL
1849
1850 int
1851 nready()
1852 PROTOTYPE:
1853 CODE:
1854 RETVAL = get_nready ();
1855 OUTPUT:
1856 RETVAL
1857
1858 int
1859 npending()
1860 PROTOTYPE:
1861 CODE:
1862 RETVAL = get_npending ();
1863 OUTPUT:
1864 RETVAL
1865
1866 int
1867 nthreads()
1868 PROTOTYPE:
1869 CODE:
1870 if (WORDACCESS_UNSAFE) LOCK (wrklock);
1871 RETVAL = started;
1872 if (WORDACCESS_UNSAFE) UNLOCK (wrklock);
1873 OUTPUT:
1874 RETVAL
1875
1876 PROTOTYPES: DISABLE
1877
1878 MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1879
1880 void
1881 cancel (aio_req_ornot req)
1882 CODE:
1883 req_cancel (req);
1884
1885 void
1886 cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1887 CODE:
1888 SvREFCNT_dec (req->callback);
1889 req->callback = newSVsv (callback);
1890
1891 MODULE = IO::AIO PACKAGE = IO::AIO::GRP
1892
1893 void
1894 add (aio_req grp, ...)
1895 PPCODE:
1896 {
1897 int i;
1898 aio_req req;
1899
1900 if (main_sig && !block_sig_level)
1901 croak ("aio_group->add called outside aio_block/callback context while IO::AIO::setsig is in use");
1902
1903 if (grp->int1 == 2)
1904 croak ("cannot add requests to IO::AIO::GRP after the group finished");
1905
1906 for (i = 1; i < items; ++i )
1907 {
1908 if (GIMME_V != G_VOID)
1909 XPUSHs (sv_2mortal (newSVsv (ST (i))));
1910
1911 req = SvAIO_REQ (ST (i));
1912
1913 if (req)
1914 {
1915 ++grp->size;
1916 req->grp = grp;
1917
1918 req->grp_prev = 0;
1919 req->grp_next = grp->grp_first;
1920
1921 if (grp->grp_first)
1922 grp->grp_first->grp_prev = req;
1923
1924 grp->grp_first = req;
1925 }
1926 }
1927 }
1928
1929 void
1930 cancel_subs (aio_req_ornot req)
1931 CODE:
1932 req_cancel_subs (req);
1933
1934 void
1935 result (aio_req grp, ...)
1936 CODE:
1937 {
1938 int i;
1939 AV *av;
1940
1941 grp->errorno = errno;
1942
1943 av = newAV ();
1944
1945 for (i = 1; i < items; ++i )
1946 av_push (av, newSVsv (ST (i)));
1947
1948 SvREFCNT_dec (grp->sv1);
1949 grp->sv1 = (SV *)av;
1950 }
1951
1952 void
1953 errno (aio_req grp, int errorno = errno)
1954 CODE:
1955 grp->errorno = errorno;
1956
1957 void
1958 limit (aio_req grp, int limit)
1959 CODE:
1960 grp->int2 = limit;
1961 aio_grp_feed (grp);
1962
1963 void
1964 feed (aio_req grp, SV *callback=&PL_sv_undef)
1965 CODE:
1966 {
1967 SvREFCNT_dec (grp->sv2);
1968 grp->sv2 = newSVsv (callback);
1969
1970 if (grp->int2 <= 0)
1971 grp->int2 = 2;
1972
1973 aio_grp_feed (grp);
1974 }
1975