ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
Revision: 1.103
Committed: Sun Jul 8 09:09:34 2007 UTC (16 years, 10 months ago) by root
Branch: MAIN
Changes since 1.102: +178 -102 lines
Log Message:
*** empty log message ***

File Contents

# Content
1 #include "xthread.h"
2
3 #include <errno.h>
4
5 #include "EXTERN.h"
6 #include "perl.h"
7 #include "XSUB.h"
8
9 #include <stddef.h>
10 #include <stdlib.h>
11 #include <errno.h>
12 #include <sys/types.h>
13 #include <sys/stat.h>
14 #include <limits.h>
15 #include <fcntl.h>
16 #include <sched.h>
17
18 #ifdef _WIN32
19
20 # define SIGIO 0
21 typedef Direntry_t X_DIRENT;
22 #undef malloc
23 #undef free
24
25 // perl overrides all those nice win32 functions
26 # undef open
27 # undef read
28 # undef write
29 # undef stat
30 # undef fstat
31 # define lstat stat
32 # undef truncate
33 # undef ftruncate
34 # undef open
35 # undef close
36 # undef unlink
37 # undef rmdir
38 # undef rename
39 # undef lseek
40
41 # define chown(a,b,c) (errno = ENOSYS, -1)
42 # define fchown(a,b,c) (errno = ENOSYS, -1)
43 # define fchmod(a,b) (errno = ENOSYS, -1)
44 # define symlink(a,b) (errno = ENOSYS, -1)
45 # define readlink(a,b,c) (errno = ENOSYS, -1)
46 # define mknod(a,b,c) (errno = ENOSYS, -1)
47 # define truncate(a,b) (errno = ENOSYS, -1)
48 # define ftruncate(fd,o) chsize ((fd), (o))
49 # define fsync(fd) _commit (fd)
50 # define opendir(fd) (errno = ENOSYS, 0)
51 # define readdir(fd) (errno = ENOSYS, -1)
52 # define closedir(fd) (errno = ENOSYS, -1)
53 # define mkdir(a,b) mkdir (a)
54
55 #else
56
57 # include "autoconf/config.h"
58 # include <sys/time.h>
59 # include <sys/select.h>
60 # include <unistd.h>
61 # include <utime.h>
62 # include <signal.h>
63 typedef struct dirent X_DIRENT;
64
65 #endif
66
67 #if HAVE_SENDFILE
68 # if __linux
69 # include <sys/sendfile.h>
70 # elif __freebsd
71 # include <sys/socket.h>
72 # include <sys/uio.h>
73 # elif __hpux
74 # include <sys/socket.h>
75 # elif __solaris /* not yet */
76 # include <sys/sendfile.h>
77 # else
78 # error sendfile support requested but not available
79 # endif
80 #endif
81
82 /* number of seconds after which idle threads exit */
83 #define IDLE_TIMEOUT 10
84
85 /* used for struct dirent, AIX doesn't provide it */
86 #ifndef NAME_MAX
87 # define NAME_MAX 4096
88 #endif
89
90 /* buffer size for various temporary buffers */
91 #define AIO_BUFSIZE 65536
92
93 /* use NV for 32 bit perls as it allows larger offsets */
94 #if IVSIZE >= 8
95 # define SvVAL64 SvIV
96 #else
97 # define SvVAL64 SvNV
98 #endif
99
100 #define dBUF \
101 char *aio_buf; \
102 X_LOCK (wrklock); \
103 self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \
104 X_UNLOCK (wrklock); \
105 if (!aio_buf) \
106 return -1;
107
108 typedef SV SV8; /* byte-sv, used for argument-checking */
109
110 enum {
111 REQ_QUIT,
112 REQ_OPEN, REQ_CLOSE,
113 REQ_READ, REQ_WRITE,
114 REQ_READAHEAD, REQ_SENDFILE,
115 REQ_STAT, REQ_LSTAT, REQ_FSTAT,
116 REQ_TRUNCATE, REQ_FTRUNCATE,
117 REQ_UTIME, REQ_FUTIME,
118 REQ_CHMOD, REQ_FCHMOD,
119 REQ_CHOWN, REQ_FCHOWN,
120 REQ_FSYNC, REQ_FDATASYNC,
121 REQ_UNLINK, REQ_RMDIR, REQ_MKDIR, REQ_RENAME,
122 REQ_MKNOD, REQ_READDIR,
123 REQ_LINK, REQ_SYMLINK, REQ_READLINK,
124 REQ_GROUP, REQ_NOP,
125 REQ_BUSY,
126 };
127
128 #define AIO_REQ_KLASS "IO::AIO::REQ"
129 #define AIO_GRP_KLASS "IO::AIO::GRP"
130
131 typedef struct aio_cb
132 {
133 struct aio_cb *volatile next;
134
135 SV *callback;
136 SV *sv1, *sv2;
137 void *ptr1, *ptr2;
138 off_t offs;
139 size_t size;
140 ssize_t result;
141 double nv1, nv2;
142
143 STRLEN stroffset;
144 int type;
145 int int1, int2, int3;
146 int errorno;
147 mode_t mode; /* open */
148
149 unsigned char flags;
150 unsigned char pri;
151
152 SV *self; /* the perl counterpart of this request, if any */
153 struct aio_cb *grp, *grp_prev, *grp_next, *grp_first;
154 } aio_cb;
155
156 enum {
157 FLAG_CANCELLED = 0x01, /* request was cancelled */
158 FLAG_SV2_RO_OFF = 0x40, /* data was set readonly */
159 FLAG_PTR2_FREE = 0x80, /* need to free(ptr2) */
160 };
161
162 typedef aio_cb *aio_req;
163 typedef aio_cb *aio_req_ornot;
164
165 enum {
166 PRI_MIN = -4,
167 PRI_MAX = 4,
168
169 DEFAULT_PRI = 0,
170 PRI_BIAS = -PRI_MIN,
171 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
172 };
173
174 #define AIO_TICKS ((1000000 + 1023) >> 10)
175
176 static unsigned int max_poll_time = 0;
177 static unsigned int max_poll_reqs = 0;
178
179 /* calculcate time difference in ~1/AIO_TICKS of a second */
180 static int tvdiff (struct timeval *tv1, struct timeval *tv2)
181 {
182 return (tv2->tv_sec - tv1->tv_sec ) * AIO_TICKS
183 + ((tv2->tv_usec - tv1->tv_usec) >> 10);
184 }
185
186 static thread_t main_tid;
187 static int main_sig;
188 static int block_sig_level;
189
190 void block_sig ()
191 {
192 sigset_t ss;
193
194 if (block_sig_level++)
195 return;
196
197 if (!main_sig)
198 return;
199
200 sigemptyset (&ss);
201 sigaddset (&ss, main_sig);
202 pthread_sigmask (SIG_BLOCK, &ss, 0);
203 }
204
205 void unblock_sig ()
206 {
207 sigset_t ss;
208
209 if (--block_sig_level)
210 return;
211
212 if (!main_sig)
213 return;
214
215 sigemptyset (&ss);
216 sigaddset (&ss, main_sig);
217 pthread_sigmask (SIG_UNBLOCK, &ss, 0);
218 }
219
220 static int next_pri = DEFAULT_PRI + PRI_BIAS;
221
222 static unsigned int started, idle, wanted;
223
224 /* worker threads management */
225 static mutex_t wrklock = X_MUTEX_INIT;
226
227 typedef struct worker {
228 /* locked by wrklock */
229 struct worker *prev, *next;
230
231 thread_t tid;
232
233 /* locked by reslock, reqlock or wrklock */
234 aio_req req; /* currently processed request */
235 void *dbuf;
236 DIR *dirp;
237 } worker;
238
239 static worker wrk_first = { &wrk_first, &wrk_first, 0 };
240
241 static void worker_clear (worker *wrk)
242 {
243 if (wrk->dirp)
244 {
245 closedir (wrk->dirp);
246 wrk->dirp = 0;
247 }
248
249 if (wrk->dbuf)
250 {
251 free (wrk->dbuf);
252 wrk->dbuf = 0;
253 }
254 }
255
256 static void worker_free (worker *wrk)
257 {
258 wrk->next->prev = wrk->prev;
259 wrk->prev->next = wrk->next;
260
261 free (wrk);
262 }
263
264 static volatile unsigned int nreqs, nready, npending;
265 static volatile unsigned int max_idle = 4;
266 static volatile unsigned int max_outstanding = 0xffffffff;
267 static int respipe [2];
268
269 static mutex_t reslock = X_MUTEX_INIT;
270 static mutex_t reqlock = X_MUTEX_INIT;
271 static cond_t reqwait = X_COND_INIT;
272
273 #if WORDACCESS_UNSAFE
274
275 static unsigned int get_nready ()
276 {
277 unsigned int retval;
278
279 X_LOCK (reqlock);
280 retval = nready;
281 X_UNLOCK (reqlock);
282
283 return retval;
284 }
285
286 static unsigned int get_npending ()
287 {
288 unsigned int retval;
289
290 X_LOCK (reslock);
291 retval = npending;
292 X_UNLOCK (reslock);
293
294 return retval;
295 }
296
297 static unsigned int get_nthreads ()
298 {
299 unsigned int retval;
300
301 X_LOCK (wrklock);
302 retval = started;
303 X_UNLOCK (wrklock);
304
305 return retval;
306 }
307
308 #else
309
310 # define get_nready() nready
311 # define get_npending() npending
312 # define get_nthreads() started
313
314 #endif
315
316 /*
317 * a somewhat faster data structure might be nice, but
318 * with 8 priorities this actually needs <20 insns
319 * per shift, the most expensive operation.
320 */
321 typedef struct {
322 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
323 int size;
324 } reqq;
325
326 static reqq req_queue;
327 static reqq res_queue;
328
329 int reqq_push (reqq *q, aio_req req)
330 {
331 int pri = req->pri;
332 req->next = 0;
333
334 if (q->qe[pri])
335 {
336 q->qe[pri]->next = req;
337 q->qe[pri] = req;
338 }
339 else
340 q->qe[pri] = q->qs[pri] = req;
341
342 return q->size++;
343 }
344
345 aio_req reqq_shift (reqq *q)
346 {
347 int pri;
348
349 if (!q->size)
350 return 0;
351
352 --q->size;
353
354 for (pri = NUM_PRI; pri--; )
355 {
356 aio_req req = q->qs[pri];
357
358 if (req)
359 {
360 if (!(q->qs[pri] = req->next))
361 q->qe[pri] = 0;
362
363 return req;
364 }
365 }
366
367 abort ();
368 }
369
370 static int poll_cb ();
371 static int req_invoke (aio_req req);
372 static void req_destroy (aio_req req);
373 static void req_cancel (aio_req req);
374
375 /* must be called at most once */
376 static SV *req_sv (aio_req req, const char *klass)
377 {
378 if (!req->self)
379 {
380 req->self = (SV *)newHV ();
381 sv_magic (req->self, 0, PERL_MAGIC_ext, (char *)req, 0);
382 }
383
384 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1)));
385 }
386
387 static aio_req SvAIO_REQ (SV *sv)
388 {
389 MAGIC *mg;
390
391 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv))
392 croak ("object of class " AIO_REQ_KLASS " expected");
393
394 mg = mg_find (SvRV (sv), PERL_MAGIC_ext);
395
396 return mg ? (aio_req)mg->mg_ptr : 0;
397 }
398
399 static void aio_grp_feed (aio_req grp)
400 {
401 block_sig ();
402
403 while (grp->size < grp->int2 && !(grp->flags & FLAG_CANCELLED))
404 {
405 int old_len = grp->size;
406
407 if (grp->sv2 && SvOK (grp->sv2))
408 {
409 dSP;
410
411 ENTER;
412 SAVETMPS;
413 PUSHMARK (SP);
414 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
415 PUTBACK;
416 call_sv (grp->sv2, G_VOID | G_EVAL | G_KEEPERR);
417 SPAGAIN;
418 FREETMPS;
419 LEAVE;
420 }
421
422 /* stop if no progress has been made */
423 if (old_len == grp->size)
424 {
425 SvREFCNT_dec (grp->sv2);
426 grp->sv2 = 0;
427 break;
428 }
429 }
430
431 unblock_sig ();
432 }
433
434 static void aio_grp_dec (aio_req grp)
435 {
436 --grp->size;
437
438 /* call feeder, if applicable */
439 aio_grp_feed (grp);
440
441 /* finish, if done */
442 if (!grp->size && grp->int1)
443 {
444 block_sig ();
445
446 if (!req_invoke (grp))
447 {
448 req_destroy (grp);
449 unblock_sig ();
450 croak (0);
451 }
452
453 req_destroy (grp);
454 unblock_sig ();
455 }
456 }
457
458 static int req_invoke (aio_req req)
459 {
460 dSP;
461
462 if (req->flags & FLAG_SV2_RO_OFF)
463 SvREADONLY_off (req->sv2);
464
465 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
466 {
467 ENTER;
468 SAVETMPS;
469 PUSHMARK (SP);
470 EXTEND (SP, 1);
471
472 switch (req->type)
473 {
474 case REQ_READDIR:
475 {
476 SV *rv = &PL_sv_undef;
477
478 if (req->result >= 0)
479 {
480 int i;
481 char *buf = req->ptr2;
482 AV *av = newAV ();
483
484 av_extend (av, req->result - 1);
485
486 for (i = 0; i < req->result; ++i)
487 {
488 SV *sv = newSVpv (buf, 0);
489
490 av_store (av, i, sv);
491 buf += SvCUR (sv) + 1;
492 }
493
494 rv = sv_2mortal (newRV_noinc ((SV *)av));
495 }
496
497 PUSHs (rv);
498 }
499 break;
500
501 case REQ_OPEN:
502 {
503 /* convert fd to fh */
504 SV *fh;
505
506 PUSHs (sv_2mortal (newSViv (req->result)));
507 PUTBACK;
508 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
509 SPAGAIN;
510
511 fh = POPs;
512 PUSHMARK (SP);
513 XPUSHs (fh);
514 }
515 break;
516
517 case REQ_GROUP:
518 req->int1 = 2; /* mark group as finished */
519
520 if (req->sv1)
521 {
522 int i;
523 AV *av = (AV *)req->sv1;
524
525 EXTEND (SP, AvFILL (av) + 1);
526 for (i = 0; i <= AvFILL (av); ++i)
527 PUSHs (*av_fetch (av, i, 0));
528 }
529 break;
530
531 case REQ_NOP:
532 case REQ_BUSY:
533 break;
534
535 case REQ_READLINK:
536 if (req->result > 0)
537 {
538 SvCUR_set (req->sv2, req->result);
539 *SvEND (req->sv2) = 0;
540 PUSHs (req->sv2);
541 }
542 break;
543
544 case REQ_STAT:
545 case REQ_LSTAT:
546 case REQ_FSTAT:
547 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
548 PL_laststatval = req->result;
549 PL_statcache = *(Stat_t *)(req->ptr2);
550 PUSHs (sv_2mortal (newSViv (req->result)));
551 break;
552
553 case REQ_READ:
554 SvCUR_set (req->sv2, req->stroffset + (req->result > 0 ? req->result : 0));
555 *SvEND (req->sv2) = 0;
556 PUSHs (sv_2mortal (newSViv (req->result)));
557 break;
558
559 default:
560 PUSHs (sv_2mortal (newSViv (req->result)));
561 break;
562 }
563
564 errno = req->errorno;
565
566 PUTBACK;
567 call_sv (req->callback, G_VOID | G_EVAL);
568 SPAGAIN;
569
570 FREETMPS;
571 LEAVE;
572 }
573
574 if (req->grp)
575 {
576 aio_req grp = req->grp;
577
578 /* unlink request */
579 if (req->grp_next) req->grp_next->grp_prev = req->grp_prev;
580 if (req->grp_prev) req->grp_prev->grp_next = req->grp_next;
581
582 if (grp->grp_first == req)
583 grp->grp_first = req->grp_next;
584
585 aio_grp_dec (grp);
586 }
587
588 return !SvTRUE (ERRSV);
589 }
590
591 static void req_destroy (aio_req req)
592 {
593 if (req->self)
594 {
595 sv_unmagic (req->self, PERL_MAGIC_ext);
596 SvREFCNT_dec (req->self);
597 }
598
599 SvREFCNT_dec (req->sv1);
600 SvREFCNT_dec (req->sv2);
601 SvREFCNT_dec (req->callback);
602
603 if (req->flags & FLAG_PTR2_FREE)
604 free (req->ptr2);
605
606 Safefree (req);
607 }
608
609 static void req_cancel_subs (aio_req grp)
610 {
611 aio_req sub;
612
613 if (grp->type != REQ_GROUP)
614 return;
615
616 SvREFCNT_dec (grp->sv2);
617 grp->sv2 = 0;
618
619 for (sub = grp->grp_first; sub; sub = sub->grp_next)
620 req_cancel (sub);
621 }
622
623 static void req_cancel (aio_req req)
624 {
625 req->flags |= FLAG_CANCELLED;
626
627 req_cancel_subs (req);
628 }
629
630 X_THREAD_PROC (aio_proc);
631
632 static void start_thread (void)
633 {
634 worker *wrk = calloc (1, sizeof (worker));
635
636 if (!wrk)
637 croak ("unable to allocate worker thread data");
638
639 X_LOCK (wrklock);
640
641 if (thread_create (&wrk->tid, aio_proc, (void *)wrk))
642 {
643 wrk->prev = &wrk_first;
644 wrk->next = wrk_first.next;
645 wrk_first.next->prev = wrk;
646 wrk_first.next = wrk;
647 ++started;
648 }
649 else
650 free (wrk);
651
652 X_UNLOCK (wrklock);
653 }
654
655 static void maybe_start_thread ()
656 {
657 if (get_nthreads () >= wanted)
658 return;
659
660 /* todo: maybe use idle here, but might be less exact */
661 if (0 <= (int)get_nthreads () + (int)get_npending () - (int)nreqs)
662 return;
663
664 start_thread ();
665 }
666
667 static void req_send (aio_req req)
668 {
669 block_sig ();
670
671 ++nreqs;
672
673 X_LOCK (reqlock);
674 ++nready;
675 reqq_push (&req_queue, req);
676 X_COND_SIGNAL (reqwait);
677 X_UNLOCK (reqlock);
678
679 unblock_sig ();
680
681 maybe_start_thread ();
682 }
683
684 static void end_thread (void)
685 {
686 aio_req req;
687
688 Newz (0, req, 1, aio_cb);
689
690 req->type = REQ_QUIT;
691 req->pri = PRI_MAX + PRI_BIAS;
692
693 X_LOCK (reqlock);
694 reqq_push (&req_queue, req);
695 X_COND_SIGNAL (reqwait);
696 X_UNLOCK (reqlock);
697
698 X_LOCK (wrklock);
699 --started;
700 X_UNLOCK (wrklock);
701 }
702
703 static void set_max_idle (int nthreads)
704 {
705 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
706 max_idle = nthreads <= 0 ? 1 : nthreads;
707 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
708 }
709
710 static void min_parallel (int nthreads)
711 {
712 if (wanted < nthreads)
713 wanted = nthreads;
714 }
715
716 static void max_parallel (int nthreads)
717 {
718 if (wanted > nthreads)
719 wanted = nthreads;
720
721 while (started > wanted)
722 end_thread ();
723 }
724
725 static void poll_wait ()
726 {
727 fd_set rfd;
728
729 while (nreqs)
730 {
731 int size;
732 if (WORDACCESS_UNSAFE) X_LOCK (reslock);
733 size = res_queue.size;
734 if (WORDACCESS_UNSAFE) X_UNLOCK (reslock);
735
736 if (size)
737 return;
738
739 maybe_start_thread ();
740
741 FD_ZERO(&rfd);
742 FD_SET(respipe [0], &rfd);
743
744 select (respipe [0] + 1, &rfd, 0, 0, 0);
745 }
746 }
747
748 static int poll_cb ()
749 {
750 dSP;
751 int count = 0;
752 int maxreqs = max_poll_reqs;
753 int do_croak = 0;
754 struct timeval tv_start, tv_now;
755 aio_req req;
756
757 if (max_poll_time)
758 gettimeofday (&tv_start, 0);
759
760 block_sig ();
761
762 for (;;)
763 {
764 for (;;)
765 {
766 maybe_start_thread ();
767
768 X_LOCK (reslock);
769 req = reqq_shift (&res_queue);
770
771 if (req)
772 {
773 --npending;
774
775 if (!res_queue.size)
776 {
777 /* read any signals sent by the worker threads */
778 char buf [4];
779 while (read (respipe [0], buf, 4) == 4)
780 ;
781 }
782 }
783
784 X_UNLOCK (reslock);
785
786 if (!req)
787 break;
788
789 --nreqs;
790
791 if (req->type == REQ_GROUP && req->size)
792 {
793 req->int1 = 1; /* mark request as delayed */
794 continue;
795 }
796 else
797 {
798 if (!req_invoke (req))
799 {
800 req_destroy (req);
801 unblock_sig ();
802 croak (0);
803 }
804
805 count++;
806 }
807
808 req_destroy (req);
809
810 if (maxreqs && !--maxreqs)
811 break;
812
813 if (max_poll_time)
814 {
815 gettimeofday (&tv_now, 0);
816
817 if (tvdiff (&tv_start, &tv_now) >= max_poll_time)
818 break;
819 }
820 }
821
822 if (nreqs <= max_outstanding)
823 break;
824
825 poll_wait ();
826
827 ++maxreqs;
828 }
829
830 unblock_sig ();
831 return count;
832 }
833
834 /*****************************************************************************/
835 /* work around various missing functions */
836
837 #if !HAVE_PREADWRITE
838 # define pread aio_pread
839 # define pwrite aio_pwrite
840
841 /*
842 * make our pread/pwrite safe against themselves, but not against
843 * normal read/write by using a mutex. slows down execution a lot,
844 * but that's your problem, not mine.
845 */
846 static mutex_t preadwritelock = X_MUTEX_INIT;
847
848 static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
849 {
850 ssize_t res;
851 off_t ooffset;
852
853 X_LOCK (preadwritelock);
854 ooffset = lseek (fd, 0, SEEK_CUR);
855 lseek (fd, offset, SEEK_SET);
856 res = read (fd, buf, count);
857 lseek (fd, ooffset, SEEK_SET);
858 X_UNLOCK (preadwritelock);
859
860 return res;
861 }
862
863 static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
864 {
865 ssize_t res;
866 off_t ooffset;
867
868 X_LOCK (preadwritelock);
869 ooffset = lseek (fd, 0, SEEK_CUR);
870 lseek (fd, offset, SEEK_SET);
871 res = write (fd, buf, count);
872 lseek (fd, offset, SEEK_SET);
873 X_UNLOCK (preadwritelock);
874
875 return res;
876 }
877 #endif
878
879 #ifndef HAVE_FUTIMES
880
881 # define utimes(path,times) aio_utimes (path, times)
882 # define futimes(fd,times) aio_futimes (fd, times)
883
884 int aio_utimes (const char *filename, const struct timeval times[2])
885 {
886 if (times)
887 {
888 struct utimbuf buf;
889
890 buf.actime = times[0].tv_sec;
891 buf.modtime = times[1].tv_sec;
892
893 return utime (filename, &buf);
894 }
895 else
896 return utime (filename, 0);
897 }
898
899 int aio_futimes (int fd, const struct timeval tv[2])
900 {
901 errno = ENOSYS;
902 return -1;
903 }
904
905 #endif
906
907 #if !HAVE_FDATASYNC
908 # define fdatasync fsync
909 #endif
910
911 #if !HAVE_READAHEAD
912 # define readahead(fd,offset,count) aio_readahead (fd, offset, count, self)
913
914 static ssize_t aio_readahead (int fd, off_t offset, size_t count, worker *self)
915 {
916 size_t todo = count;
917 dBUF;
918
919 while (todo > 0)
920 {
921 size_t len = todo < AIO_BUFSIZE ? todo : AIO_BUFSIZE;
922
923 pread (fd, aio_buf, len, offset);
924 offset += len;
925 todo -= len;
926 }
927
928 errno = 0;
929 return count;
930 }
931
932 #endif
933
934 #if !HAVE_READDIR_R
935 # define readdir_r aio_readdir_r
936
937 static mutex_t readdirlock = X_MUTEX_INIT;
938
939 static int readdir_r (DIR *dirp, X_DIRENT *ent, X_DIRENT **res)
940 {
941 X_DIRENT *e;
942 int errorno;
943
944 X_LOCK (readdirlock);
945
946 e = readdir (dirp);
947 errorno = errno;
948
949 if (e)
950 {
951 *res = ent;
952 strcpy (ent->d_name, e->d_name);
953 }
954 else
955 *res = 0;
956
957 X_UNLOCK (readdirlock);
958
959 errno = errorno;
960 return e ? 0 : -1;
961 }
962 #endif
963
964 /* sendfile always needs emulation */
965 static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self)
966 {
967 ssize_t res;
968
969 if (!count)
970 return 0;
971
972 #if HAVE_SENDFILE
973 # if __linux
974 res = sendfile (ofd, ifd, &offset, count);
975
976 # elif __freebsd
977 /*
978 * Of course, the freebsd sendfile is a dire hack with no thoughts
979 * wasted on making it similar to other I/O functions.
980 */
981 {
982 off_t sbytes;
983 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0);
984
985 if (res < 0 && sbytes)
986 /* maybe only on EAGAIN: as usual, the manpage leaves you guessing */
987 res = sbytes;
988 }
989
990 # elif __hpux
991 res = sendfile (ofd, ifd, offset, count, 0, 0);
992
993 # elif __solaris
994 {
995 struct sendfilevec vec;
996 size_t sbytes;
997
998 vec.sfv_fd = ifd;
999 vec.sfv_flag = 0;
1000 vec.sfv_off = offset;
1001 vec.sfv_len = count;
1002
1003 res = sendfilev (ofd, &vec, 1, &sbytes);
1004
1005 if (res < 0 && sbytes)
1006 res = sbytes;
1007 }
1008
1009 # endif
1010 #else
1011 res = -1;
1012 errno = ENOSYS;
1013 #endif
1014
1015 if (res < 0
1016 && (errno == ENOSYS || errno == EINVAL || errno == ENOTSOCK
1017 #if __solaris
1018 || errno == EAFNOSUPPORT || errno == EPROTOTYPE
1019 #endif
1020 )
1021 )
1022 {
1023 /* emulate sendfile. this is a major pain in the ass */
1024 dBUF;
1025
1026 res = 0;
1027
1028 while (count)
1029 {
1030 ssize_t cnt;
1031
1032 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
1033
1034 if (cnt <= 0)
1035 {
1036 if (cnt && !res) res = -1;
1037 break;
1038 }
1039
1040 cnt = write (ofd, aio_buf, cnt);
1041
1042 if (cnt <= 0)
1043 {
1044 if (cnt && !res) res = -1;
1045 break;
1046 }
1047
1048 offset += cnt;
1049 res += cnt;
1050 count -= cnt;
1051 }
1052 }
1053
1054 return res;
1055 }
1056
1057 /* read a full directory */
1058 static void scandir_ (aio_req req, worker *self)
1059 {
1060 DIR *dirp;
1061 union
1062 {
1063 X_DIRENT d;
1064 char b [offsetof (X_DIRENT, d_name) + NAME_MAX + 1];
1065 } *u;
1066 X_DIRENT *entp;
1067 char *name, *names;
1068 int memlen = 4096;
1069 int memofs = 0;
1070 int res = 0;
1071
1072 X_LOCK (wrklock);
1073 self->dirp = dirp = opendir (req->ptr1);
1074 self->dbuf = u = malloc (sizeof (*u));
1075 req->flags |= FLAG_PTR2_FREE;
1076 req->ptr2 = names = malloc (memlen);
1077 X_UNLOCK (wrklock);
1078
1079 if (dirp && u && names)
1080 for (;;)
1081 {
1082 errno = 0;
1083 readdir_r (dirp, &u->d, &entp);
1084
1085 if (!entp)
1086 break;
1087
1088 name = entp->d_name;
1089
1090 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
1091 {
1092 int len = strlen (name) + 1;
1093
1094 res++;
1095
1096 while (memofs + len > memlen)
1097 {
1098 memlen *= 2;
1099 X_LOCK (wrklock);
1100 req->ptr2 = names = realloc (names, memlen);
1101 X_UNLOCK (wrklock);
1102
1103 if (!names)
1104 break;
1105 }
1106
1107 memcpy (names + memofs, name, len);
1108 memofs += len;
1109 }
1110 }
1111
1112 if (errno)
1113 res = -1;
1114
1115 req->result = res;
1116 }
1117
1118 /*****************************************************************************/
1119
1120 X_THREAD_PROC (aio_proc)
1121 {
1122 {//D
1123 aio_req req;
1124 struct timespec ts;
1125 worker *self = (worker *)thr_arg;
1126
1127 /* try to distribute timeouts somewhat randomly */
1128 ts.tv_nsec = ((unsigned long)self & 1023UL) * (1000000000UL / 1024UL);
1129
1130 for (;;)
1131 {
1132 ts.tv_sec = time (0) + IDLE_TIMEOUT;
1133
1134 X_LOCK (reqlock);
1135
1136 for (;;)
1137 {
1138 self->req = req = reqq_shift (&req_queue);
1139
1140 if (req)
1141 break;
1142
1143 ++idle;
1144
1145 if (X_COND_TIMEDWAIT (reqwait, reqlock, ts)
1146 == ETIMEDOUT)
1147 {
1148 if (idle > max_idle)
1149 {
1150 --idle;
1151 X_UNLOCK (reqlock);
1152 X_LOCK (wrklock);
1153 --started;
1154 X_UNLOCK (wrklock);
1155 goto quit;
1156 }
1157
1158 /* we are allowed to idle, so do so without any timeout */
1159 X_COND_WAIT (reqwait, reqlock);
1160 ts.tv_sec = time (0) + IDLE_TIMEOUT;
1161 }
1162
1163 --idle;
1164 }
1165
1166 --nready;
1167
1168 X_UNLOCK (reqlock);
1169
1170 errno = 0; /* strictly unnecessary */
1171
1172 if (!(req->flags & FLAG_CANCELLED))
1173 switch (req->type)
1174 {
1175 case REQ_READ: req->result = req->offs >= 0
1176 ? pread (req->int1, req->ptr1, req->size, req->offs)
1177 : read (req->int1, req->ptr1, req->size); break;
1178 case REQ_WRITE: req->result = req->offs >= 0
1179 ? pwrite (req->int1, req->ptr1, req->size, req->offs)
1180 : write (req->int1, req->ptr1, req->size); break;
1181
1182 case REQ_READAHEAD: req->result = readahead (req->int1, req->offs, req->size); break;
1183 case REQ_SENDFILE: req->result = sendfile_ (req->int1, req->int2, req->offs, req->size, self); break;
1184
1185 case REQ_STAT: req->result = stat (req->ptr1, (Stat_t *)req->ptr2); break;
1186 case REQ_LSTAT: req->result = lstat (req->ptr1, (Stat_t *)req->ptr2); break;
1187 case REQ_FSTAT: req->result = fstat (req->int1, (Stat_t *)req->ptr2); break;
1188
1189 case REQ_CHOWN: req->result = chown (req->ptr1, req->int2, req->int3); break;
1190 case REQ_FCHOWN: req->result = fchown (req->int1, req->int2, req->int3); break;
1191 case REQ_CHMOD: req->result = chmod (req->ptr1, req->mode); break;
1192 case REQ_FCHMOD: req->result = fchmod (req->int1, req->mode); break;
1193 case REQ_TRUNCATE: req->result = truncate (req->ptr1, req->offs); break;
1194 case REQ_FTRUNCATE: req->result = ftruncate (req->int1, req->offs); break;
1195
1196 case REQ_OPEN: req->result = open (req->ptr1, req->int1, req->mode); break;
1197 case REQ_CLOSE: req->result = close (req->int1); break;
1198 case REQ_UNLINK: req->result = unlink (req->ptr1); break;
1199 case REQ_RMDIR: req->result = rmdir (req->ptr1); break;
1200 case REQ_MKDIR: req->result = mkdir (req->ptr1, req->mode); break;
1201 case REQ_RENAME: req->result = rename (req->ptr2, req->ptr1); break;
1202 case REQ_LINK: req->result = link (req->ptr2, req->ptr1); break;
1203 case REQ_SYMLINK: req->result = symlink (req->ptr2, req->ptr1); break;
1204 case REQ_MKNOD: req->result = mknod (req->ptr2, req->mode, (dev_t)req->offs); break;
1205 case REQ_READLINK: req->result = readlink (req->ptr2, req->ptr1, NAME_MAX); break;
1206
1207 case REQ_FDATASYNC: req->result = fdatasync (req->int1); break;
1208 case REQ_FSYNC: req->result = fsync (req->int1); break;
1209 case REQ_READDIR: scandir_ (req, self); break;
1210
1211 case REQ_BUSY:
1212 #ifdef _WIN32
1213 Sleep (req->nv1 * 1000.);
1214 #else
1215 {
1216 struct timeval tv;
1217
1218 tv.tv_sec = req->nv1;
1219 tv.tv_usec = (req->nv1 - tv.tv_sec) * 1000000.;
1220
1221 req->result = select (0, 0, 0, 0, &tv);
1222 }
1223 #endif
1224 break;
1225
1226 case REQ_UTIME:
1227 case REQ_FUTIME:
1228 {
1229 struct timeval tv[2];
1230 struct timeval *times;
1231
1232 if (req->nv1 != -1. || req->nv2 != -1.)
1233 {
1234 tv[0].tv_sec = req->nv1;
1235 tv[0].tv_usec = (req->nv1 - tv[0].tv_sec) * 1000000.;
1236 tv[1].tv_sec = req->nv2;
1237 tv[1].tv_usec = (req->nv2 - tv[1].tv_sec) * 1000000.;
1238
1239 times = tv;
1240 }
1241 else
1242 times = 0;
1243
1244
1245 req->result = req->type == REQ_FUTIME
1246 ? futimes (req->int1, times)
1247 : utimes (req->ptr1, times);
1248 }
1249
1250 case REQ_GROUP:
1251 case REQ_NOP:
1252 break;
1253
1254 case REQ_QUIT:
1255 goto quit;
1256
1257 default:
1258 req->result = -1;
1259 break;
1260 }
1261
1262 req->errorno = errno;
1263
1264 X_LOCK (reslock);
1265
1266 ++npending;
1267
1268 if (!reqq_push (&res_queue, req))
1269 {
1270 /* write a dummy byte to the pipe so fh becomes ready */
1271 write (respipe [1], &respipe, 1);
1272
1273 /* optionally signal the main thread asynchronously */
1274 if (main_sig)
1275 pthread_kill (main_tid, main_sig);
1276 }
1277
1278 self->req = 0;
1279 worker_clear (self);
1280
1281 X_UNLOCK (reslock);
1282 }
1283
1284 quit:
1285 X_LOCK (wrklock);
1286 worker_free (self);
1287 X_UNLOCK (wrklock);
1288
1289 return 0;
1290 }//D
1291 }
1292
1293 /*****************************************************************************/
1294
1295 static void atfork_prepare (void)
1296 {
1297 X_LOCK (wrklock);
1298 X_LOCK (reqlock);
1299 X_LOCK (reslock);
1300 #if !HAVE_PREADWRITE
1301 X_LOCK (preadwritelock);
1302 #endif
1303 #if !HAVE_READDIR_R
1304 X_LOCK (readdirlock);
1305 #endif
1306 }
1307
1308 static void atfork_parent (void)
1309 {
1310 #if !HAVE_READDIR_R
1311 X_UNLOCK (readdirlock);
1312 #endif
1313 #if !HAVE_PREADWRITE
1314 X_UNLOCK (preadwritelock);
1315 #endif
1316 X_UNLOCK (reslock);
1317 X_UNLOCK (reqlock);
1318 X_UNLOCK (wrklock);
1319 }
1320
1321 static void atfork_child (void)
1322 {
1323 aio_req prv;
1324
1325 while (prv = reqq_shift (&req_queue))
1326 req_destroy (prv);
1327
1328 while (prv = reqq_shift (&res_queue))
1329 req_destroy (prv);
1330
1331 while (wrk_first.next != &wrk_first)
1332 {
1333 worker *wrk = wrk_first.next;
1334
1335 if (wrk->req)
1336 req_destroy (wrk->req);
1337
1338 worker_clear (wrk);
1339 worker_free (wrk);
1340 }
1341
1342 started = 0;
1343 idle = 0;
1344 nreqs = 0;
1345 nready = 0;
1346 npending = 0;
1347
1348 close (respipe [0]);
1349 close (respipe [1]);
1350
1351 if (!create_pipe (respipe))
1352 croak ("cannot set result pipe to nonblocking mode");
1353
1354 atfork_parent ();
1355 }
1356
1357 #define dREQ \
1358 aio_req req; \
1359 int req_pri = next_pri; \
1360 next_pri = DEFAULT_PRI + PRI_BIAS; \
1361 \
1362 if (SvOK (callback) && !SvROK (callback)) \
1363 croak ("callback must be undef or of reference type"); \
1364 \
1365 Newz (0, req, 1, aio_cb); \
1366 if (!req) \
1367 croak ("out of memory during aio_req allocation"); \
1368 \
1369 req->callback = newSVsv (callback); \
1370 req->pri = req_pri
1371
1372 #define REQ_SEND \
1373 req_send (req); \
1374 \
1375 if (GIMME_V != G_VOID) \
1376 XPUSHs (req_sv (req, AIO_REQ_KLASS));
1377
1378 MODULE = IO::AIO PACKAGE = IO::AIO
1379
1380 PROTOTYPES: ENABLE
1381
1382 BOOT:
1383 {
1384 HV *stash = gv_stashpv ("IO::AIO", 1);
1385
1386 newCONSTSUB (stash, "EXDEV", newSViv (EXDEV));
1387 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY));
1388 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY));
1389 newCONSTSUB (stash, "O_CREAT", newSViv (O_CREAT));
1390 newCONSTSUB (stash, "O_TRUNC", newSViv (O_TRUNC));
1391 #ifdef _WIN32
1392 X_MUTEX_CHECK (wrklock);
1393 X_MUTEX_CHECK (reslock);
1394 X_MUTEX_CHECK (reqlock);
1395 X_MUTEX_CHECK (reqwait);
1396 X_MUTEX_CHECK (preadwritelock);
1397 X_MUTEX_CHECK (readdirlock);
1398 #else
1399 newCONSTSUB (stash, "S_IFIFO", newSViv (S_IFIFO));
1400 newCONSTSUB (stash, "SIGIO", newSViv (SIGIO));
1401 #endif
1402
1403 if (!create_pipe (respipe))
1404 croak ("cannot set result pipe to nonblocking mode");
1405
1406 X_THREAD_ATFORK (atfork_prepare, atfork_parent, atfork_child);
1407 }
1408
1409 void
1410 max_poll_reqs (int nreqs)
1411 PROTOTYPE: $
1412 CODE:
1413 max_poll_reqs = nreqs;
1414
1415 void
1416 max_poll_time (double nseconds)
1417 PROTOTYPE: $
1418 CODE:
1419 max_poll_time = nseconds * AIO_TICKS;
1420
1421 void
1422 min_parallel (int nthreads)
1423 PROTOTYPE: $
1424
1425 void
1426 max_parallel (int nthreads)
1427 PROTOTYPE: $
1428
1429 void
1430 max_idle (int nthreads)
1431 PROTOTYPE: $
1432 CODE:
1433 set_max_idle (nthreads);
1434
1435 int
1436 max_outstanding (int maxreqs)
1437 PROTOTYPE: $
1438 CODE:
1439 RETVAL = max_outstanding;
1440 max_outstanding = maxreqs;
1441 OUTPUT:
1442 RETVAL
1443
1444 void
1445 aio_open (SV8 *pathname, int flags, int mode, SV *callback=&PL_sv_undef)
1446 PROTOTYPE: $$$;$
1447 PPCODE:
1448 {
1449 dREQ;
1450
1451 req->type = REQ_OPEN;
1452 req->sv1 = newSVsv (pathname);
1453 req->ptr1 = SvPVbyte_nolen (req->sv1);
1454 req->int1 = flags;
1455 req->mode = mode;
1456
1457 REQ_SEND;
1458 }
1459
1460 void
1461 aio_close (SV *fh, SV *callback=&PL_sv_undef)
1462 PROTOTYPE: $;$
1463 ALIAS:
1464 aio_close = REQ_CLOSE
1465 aio_fsync = REQ_FSYNC
1466 aio_fdatasync = REQ_FDATASYNC
1467 PPCODE:
1468 {
1469 dREQ;
1470
1471 req->type = ix;
1472 req->sv1 = newSVsv (fh);
1473 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh)));
1474
1475 REQ_SEND (req);
1476 }
1477
1478 void
1479 aio_read (SV *fh, SV *offset, SV *length, SV8 *data, IV dataoffset, SV *callback=&PL_sv_undef)
1480 ALIAS:
1481 aio_read = REQ_READ
1482 aio_write = REQ_WRITE
1483 PROTOTYPE: $$$$$;$
1484 PPCODE:
1485 {
1486 STRLEN svlen;
1487 char *svptr = SvPVbyte (data, svlen);
1488 UV len = SvUV (length);
1489
1490 SvUPGRADE (data, SVt_PV);
1491 SvPOK_on (data);
1492
1493 if (dataoffset < 0)
1494 dataoffset += svlen;
1495
1496 if (dataoffset < 0 || dataoffset > svlen)
1497 croak ("dataoffset outside of data scalar");
1498
1499 if (ix == REQ_WRITE)
1500 {
1501 /* write: check length and adjust. */
1502 if (!SvOK (length) || len + dataoffset > svlen)
1503 len = svlen - dataoffset;
1504 }
1505 else
1506 {
1507 /* read: grow scalar as necessary */
1508 svptr = SvGROW (data, len + dataoffset + 1);
1509 }
1510
1511 if (len < 0)
1512 croak ("length must not be negative");
1513
1514 {
1515 dREQ;
1516
1517 req->type = ix;
1518 req->sv1 = newSVsv (fh);
1519 req->int1 = PerlIO_fileno (ix == REQ_READ ? IoIFP (sv_2io (fh))
1520 : IoOFP (sv_2io (fh)));
1521 req->offs = SvOK (offset) ? SvVAL64 (offset) : -1;
1522 req->size = len;
1523 req->sv2 = SvREFCNT_inc (data);
1524 req->ptr1 = (char *)svptr + dataoffset;
1525 req->stroffset = dataoffset;
1526
1527 if (!SvREADONLY (data))
1528 {
1529 SvREADONLY_on (data);
1530 req->flags |= FLAG_SV2_RO_OFF;
1531 }
1532
1533 REQ_SEND;
1534 }
1535 }
1536
1537 void
1538 aio_readlink (SV8 *path, SV *callback=&PL_sv_undef)
1539 PROTOTYPE: $$;$
1540 PPCODE:
1541 {
1542 SV *data;
1543 dREQ;
1544
1545 data = newSV (NAME_MAX);
1546 SvPOK_on (data);
1547
1548 req->type = REQ_READLINK;
1549 req->sv1 = newSVsv (path);
1550 req->ptr2 = SvPVbyte_nolen (req->sv1);
1551 req->sv2 = data;
1552 req->ptr1 = SvPVbyte_nolen (data);
1553
1554 REQ_SEND;
1555 }
1556
1557 void
1558 aio_sendfile (SV *out_fh, SV *in_fh, SV *in_offset, UV length, SV *callback=&PL_sv_undef)
1559 PROTOTYPE: $$$$;$
1560 PPCODE:
1561 {
1562 dREQ;
1563
1564 req->type = REQ_SENDFILE;
1565 req->sv1 = newSVsv (out_fh);
1566 req->int1 = PerlIO_fileno (IoIFP (sv_2io (out_fh)));
1567 req->sv2 = newSVsv (in_fh);
1568 req->int2 = PerlIO_fileno (IoIFP (sv_2io (in_fh)));
1569 req->offs = SvVAL64 (in_offset);
1570 req->size = length;
1571
1572 REQ_SEND;
1573 }
1574
1575 void
1576 aio_readahead (SV *fh, SV *offset, IV length, SV *callback=&PL_sv_undef)
1577 PROTOTYPE: $$$;$
1578 PPCODE:
1579 {
1580 dREQ;
1581
1582 req->type = REQ_READAHEAD;
1583 req->sv1 = newSVsv (fh);
1584 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh)));
1585 req->offs = SvVAL64 (offset);
1586 req->size = length;
1587
1588 REQ_SEND;
1589 }
1590
1591 void
1592 aio_stat (SV8 *fh_or_path, SV *callback=&PL_sv_undef)
1593 ALIAS:
1594 aio_stat = REQ_STAT
1595 aio_lstat = REQ_LSTAT
1596 PPCODE:
1597 {
1598 dREQ;
1599
1600 req->ptr2 = malloc (sizeof (Stat_t));
1601 if (!req->ptr2)
1602 {
1603 req_destroy (req);
1604 croak ("out of memory during aio_stat statdata allocation");
1605 }
1606
1607 req->flags |= FLAG_PTR2_FREE;
1608 req->sv1 = newSVsv (fh_or_path);
1609
1610 if (SvPOK (fh_or_path))
1611 {
1612 req->type = ix;
1613 req->ptr1 = SvPVbyte_nolen (req->sv1);
1614 }
1615 else
1616 {
1617 req->type = REQ_FSTAT;
1618 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh_or_path)));
1619 }
1620
1621 REQ_SEND;
1622 }
1623
1624 void
1625 aio_utime (SV8 *fh_or_path, SV *atime, SV *mtime, SV *callback=&PL_sv_undef)
1626 PPCODE:
1627 {
1628 dREQ;
1629
1630 req->nv1 = SvOK (atime) ? SvNV (atime) : -1.;
1631 req->nv2 = SvOK (mtime) ? SvNV (mtime) : -1.;
1632 req->sv1 = newSVsv (fh_or_path);
1633
1634 if (SvPOK (fh_or_path))
1635 {
1636 req->type = REQ_UTIME;
1637 req->ptr1 = SvPVbyte_nolen (req->sv1);
1638 }
1639 else
1640 {
1641 req->type = REQ_FUTIME;
1642 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh_or_path)));
1643 }
1644
1645 REQ_SEND;
1646 }
1647
1648 void
1649 aio_truncate (SV8 *fh_or_path, SV *offset, SV *callback=&PL_sv_undef)
1650 PPCODE:
1651 {
1652 dREQ;
1653
1654 req->sv1 = newSVsv (fh_or_path);
1655 req->offs = SvOK (offset) ? SvVAL64 (offset) : -1;
1656
1657 if (SvPOK (fh_or_path))
1658 {
1659 req->type = REQ_TRUNCATE;
1660 req->ptr1 = SvPVbyte_nolen (req->sv1);
1661 }
1662 else
1663 {
1664 req->type = REQ_FTRUNCATE;
1665 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh_or_path)));
1666 }
1667
1668 REQ_SEND;
1669 }
1670
1671 void
1672 aio_chmod (SV8 *fh_or_path, int mode, SV *callback=&PL_sv_undef)
1673 PPCODE:
1674 {
1675 dREQ;
1676
1677 req->mode = mode;
1678 req->sv1 = newSVsv (fh_or_path);
1679
1680 if (SvPOK (fh_or_path))
1681 {
1682 req->type = REQ_CHMOD;
1683 req->ptr1 = SvPVbyte_nolen (req->sv1);
1684 }
1685 else
1686 {
1687 req->type = REQ_FCHMOD;
1688 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh_or_path)));
1689 }
1690
1691 REQ_SEND;
1692 }
1693
1694 void
1695 aio_chown (SV8 *fh_or_path, SV *uid, SV *gid, SV *callback=&PL_sv_undef)
1696 PPCODE:
1697 {
1698 dREQ;
1699
1700 req->int2 = SvOK (uid) ? SvIV (uid) : -1;
1701 req->int3 = SvOK (gid) ? SvIV (gid) : -1;
1702 req->sv1 = newSVsv (fh_or_path);
1703
1704 if (SvPOK (fh_or_path))
1705 {
1706 req->type = REQ_CHOWN;
1707 req->ptr1 = SvPVbyte_nolen (req->sv1);
1708 }
1709 else
1710 {
1711 req->type = REQ_FCHOWN;
1712 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh_or_path)));
1713 }
1714
1715 REQ_SEND;
1716 }
1717
1718 void
1719 aio_unlink (SV8 *pathname, SV *callback=&PL_sv_undef)
1720 ALIAS:
1721 aio_unlink = REQ_UNLINK
1722 aio_rmdir = REQ_RMDIR
1723 aio_readdir = REQ_READDIR
1724 PPCODE:
1725 {
1726 dREQ;
1727
1728 req->type = ix;
1729 req->sv1 = newSVsv (pathname);
1730 req->ptr1 = SvPVbyte_nolen (req->sv1);
1731
1732 REQ_SEND;
1733 }
1734
1735 void
1736 aio_mkdir (SV8 *pathname, int mode, SV *callback=&PL_sv_undef)
1737 PPCODE:
1738 {
1739 dREQ;
1740
1741 req->type = REQ_MKDIR;
1742 req->sv1 = newSVsv (pathname);
1743 req->ptr1 = SvPVbyte_nolen (req->sv1);
1744 req->mode = mode;
1745
1746 REQ_SEND;
1747 }
1748
1749 void
1750 aio_link (SV8 *oldpath, SV8 *newpath, SV *callback=&PL_sv_undef)
1751 ALIAS:
1752 aio_link = REQ_LINK
1753 aio_symlink = REQ_SYMLINK
1754 aio_rename = REQ_RENAME
1755 PPCODE:
1756 {
1757 dREQ;
1758
1759 req->type = ix;
1760 req->sv2 = newSVsv (oldpath);
1761 req->ptr2 = SvPVbyte_nolen (req->sv2);
1762 req->sv1 = newSVsv (newpath);
1763 req->ptr1 = SvPVbyte_nolen (req->sv1);
1764
1765 REQ_SEND;
1766 }
1767
1768 void
1769 aio_mknod (SV8 *pathname, int mode, UV dev, SV *callback=&PL_sv_undef)
1770 PPCODE:
1771 {
1772 dREQ;
1773
1774 req->type = REQ_MKNOD;
1775 req->sv1 = newSVsv (pathname);
1776 req->ptr1 = SvPVbyte_nolen (req->sv1);
1777 req->mode = (mode_t)mode;
1778 req->offs = dev;
1779
1780 REQ_SEND;
1781 }
1782
1783 void
1784 aio_busy (double delay, SV *callback=&PL_sv_undef)
1785 PPCODE:
1786 {
1787 dREQ;
1788
1789 req->type = REQ_BUSY;
1790 req->nv1 = delay < 0. ? 0. : delay;
1791
1792 REQ_SEND;
1793 }
1794
1795 void
1796 aio_group (SV *callback=&PL_sv_undef)
1797 PROTOTYPE: ;$
1798 PPCODE:
1799 {
1800 dREQ;
1801
1802 req->type = REQ_GROUP;
1803
1804 req_send (req);
1805 XPUSHs (req_sv (req, AIO_GRP_KLASS));
1806 }
1807
1808 void
1809 aio_nop (SV *callback=&PL_sv_undef)
1810 PPCODE:
1811 {
1812 dREQ;
1813
1814 req->type = REQ_NOP;
1815
1816 REQ_SEND;
1817 }
1818
1819 int
1820 aioreq_pri (int pri = 0)
1821 PROTOTYPE: ;$
1822 CODE:
1823 RETVAL = next_pri - PRI_BIAS;
1824 if (items > 0)
1825 {
1826 if (pri < PRI_MIN) pri = PRI_MIN;
1827 if (pri > PRI_MAX) pri = PRI_MAX;
1828 next_pri = pri + PRI_BIAS;
1829 }
1830 OUTPUT:
1831 RETVAL
1832
1833 void
1834 aioreq_nice (int nice = 0)
1835 CODE:
1836 nice = next_pri - nice;
1837 if (nice < PRI_MIN) nice = PRI_MIN;
1838 if (nice > PRI_MAX) nice = PRI_MAX;
1839 next_pri = nice + PRI_BIAS;
1840
1841 void
1842 flush ()
1843 PROTOTYPE:
1844 CODE:
1845 while (nreqs)
1846 {
1847 poll_wait ();
1848 poll_cb ();
1849 }
1850
1851 int
1852 poll()
1853 PROTOTYPE:
1854 CODE:
1855 poll_wait ();
1856 RETVAL = poll_cb ();
1857 OUTPUT:
1858 RETVAL
1859
1860 int
1861 poll_fileno()
1862 PROTOTYPE:
1863 CODE:
1864 RETVAL = respipe [0];
1865 OUTPUT:
1866 RETVAL
1867
1868 int
1869 poll_cb(...)
1870 PROTOTYPE:
1871 CODE:
1872 RETVAL = poll_cb ();
1873 OUTPUT:
1874 RETVAL
1875
1876 void
1877 poll_wait()
1878 PROTOTYPE:
1879 CODE:
1880 poll_wait ();
1881
1882 void
1883 setsig (int signum = SIGIO)
1884 PROTOTYPE: ;$
1885 CODE:
1886 {
1887 if (block_sig_level)
1888 croak ("cannot call IO::AIO::setsig from within aio_block/callback");
1889
1890 X_LOCK (reslock);
1891 main_tid = pthread_self ();
1892 main_sig = signum;
1893 X_UNLOCK (reslock);
1894
1895 if (main_sig && npending)
1896 pthread_kill (main_tid, main_sig);
1897 }
1898
1899 void
1900 aio_block (SV *cb)
1901 PROTOTYPE: &
1902 PPCODE:
1903 {
1904 int count;
1905
1906 block_sig ();
1907 PUSHMARK (SP);
1908 PUTBACK;
1909 count = call_sv (cb, GIMME_V | G_NOARGS | G_EVAL);
1910 SPAGAIN;
1911 unblock_sig ();
1912
1913 if (SvTRUE (ERRSV))
1914 croak (0);
1915
1916 XSRETURN (count);
1917 }
1918
1919 int
1920 nreqs()
1921 PROTOTYPE:
1922 CODE:
1923 RETVAL = nreqs;
1924 OUTPUT:
1925 RETVAL
1926
1927 int
1928 nready()
1929 PROTOTYPE:
1930 CODE:
1931 RETVAL = get_nready ();
1932 OUTPUT:
1933 RETVAL
1934
1935 int
1936 npending()
1937 PROTOTYPE:
1938 CODE:
1939 RETVAL = get_npending ();
1940 OUTPUT:
1941 RETVAL
1942
1943 int
1944 nthreads()
1945 PROTOTYPE:
1946 CODE:
1947 if (WORDACCESS_UNSAFE) X_LOCK (wrklock);
1948 RETVAL = started;
1949 if (WORDACCESS_UNSAFE) X_UNLOCK (wrklock);
1950 OUTPUT:
1951 RETVAL
1952
1953 PROTOTYPES: DISABLE
1954
1955 MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1956
1957 void
1958 cancel (aio_req_ornot req)
1959 CODE:
1960 req_cancel (req);
1961
1962 void
1963 cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1964 CODE:
1965 SvREFCNT_dec (req->callback);
1966 req->callback = newSVsv (callback);
1967
1968 MODULE = IO::AIO PACKAGE = IO::AIO::GRP
1969
1970 void
1971 add (aio_req grp, ...)
1972 PPCODE:
1973 {
1974 int i;
1975 aio_req req;
1976
1977 if (main_sig && !block_sig_level)
1978 croak ("aio_group->add called outside aio_block/callback context while IO::AIO::setsig is in use");
1979
1980 if (grp->int1 == 2)
1981 croak ("cannot add requests to IO::AIO::GRP after the group finished");
1982
1983 for (i = 1; i < items; ++i )
1984 {
1985 if (GIMME_V != G_VOID)
1986 XPUSHs (sv_2mortal (newSVsv (ST (i))));
1987
1988 req = SvAIO_REQ (ST (i));
1989
1990 if (req)
1991 {
1992 ++grp->size;
1993 req->grp = grp;
1994
1995 req->grp_prev = 0;
1996 req->grp_next = grp->grp_first;
1997
1998 if (grp->grp_first)
1999 grp->grp_first->grp_prev = req;
2000
2001 grp->grp_first = req;
2002 }
2003 }
2004 }
2005
2006 void
2007 cancel_subs (aio_req_ornot req)
2008 CODE:
2009 req_cancel_subs (req);
2010
2011 void
2012 result (aio_req grp, ...)
2013 CODE:
2014 {
2015 int i;
2016 AV *av;
2017
2018 grp->errorno = errno;
2019
2020 av = newAV ();
2021
2022 for (i = 1; i < items; ++i )
2023 av_push (av, newSVsv (ST (i)));
2024
2025 SvREFCNT_dec (grp->sv1);
2026 grp->sv1 = (SV *)av;
2027 }
2028
2029 void
2030 errno (aio_req grp, int errorno = errno)
2031 CODE:
2032 grp->errorno = errorno;
2033
2034 void
2035 limit (aio_req grp, int limit)
2036 CODE:
2037 grp->int2 = limit;
2038 aio_grp_feed (grp);
2039
2040 void
2041 feed (aio_req grp, SV *callback=&PL_sv_undef)
2042 CODE:
2043 {
2044 SvREFCNT_dec (grp->sv2);
2045 grp->sv2 = newSVsv (callback);
2046
2047 if (grp->int2 <= 0)
2048 grp->int2 = 2;
2049
2050 aio_grp_feed (grp);
2051 }
2052