ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
Revision: 1.105
Committed: Sun Jul 8 11:12:15 2007 UTC (16 years, 10 months ago) by root
Branch: MAIN
CVS Tags: rel-2_4
Changes since 1.104: +4 -4 lines
Log Message:
*** empty log message ***

File Contents

# Content
1 #include "xthread.h"
2
3 #include <errno.h>
4
5 #include "EXTERN.h"
6 #include "perl.h"
7 #include "XSUB.h"
8
9 #include <stddef.h>
10 #include <stdlib.h>
11 #include <errno.h>
12 #include <sys/types.h>
13 #include <sys/stat.h>
14 #include <limits.h>
15 #include <fcntl.h>
16 #include <sched.h>
17
18 #ifdef _WIN32
19
20 # define SIGIO 0
21 typedef Direntry_t X_DIRENT;
22 #undef malloc
23 #undef free
24
25 // perl overrides all those nice win32 functions
26 # undef open
27 # undef read
28 # undef write
29 # undef send
30 # undef recv
31 # undef stat
32 # undef fstat
33 # define lstat stat
34 # undef truncate
35 # undef ftruncate
36 # undef open
37 # undef close
38 # undef unlink
39 # undef rmdir
40 # undef rename
41 # undef lseek
42
43 # define chown(a,b,c) (errno = ENOSYS, -1)
44 # define fchown(a,b,c) (errno = ENOSYS, -1)
45 # define fchmod(a,b) (errno = ENOSYS, -1)
46 # define symlink(a,b) (errno = ENOSYS, -1)
47 # define readlink(a,b,c) (errno = ENOSYS, -1)
48 # define mknod(a,b,c) (errno = ENOSYS, -1)
49 # define truncate(a,b) (errno = ENOSYS, -1)
50 # define ftruncate(fd,o) chsize ((fd), (o))
51 # define fsync(fd) _commit (fd)
52 # define opendir(fd) (errno = ENOSYS, 0)
53 # define readdir(fd) (errno = ENOSYS, -1)
54 # define closedir(fd) (errno = ENOSYS, -1)
55 # define mkdir(a,b) mkdir (a)
56
57 #else
58
59 # include "autoconf/config.h"
60 # include <sys/time.h>
61 # include <sys/select.h>
62 # include <unistd.h>
63 # include <utime.h>
64 # include <signal.h>
65 typedef struct dirent X_DIRENT;
66
67 #endif
68
69 #if HAVE_SENDFILE
70 # if __linux
71 # include <sys/sendfile.h>
72 # elif __freebsd
73 # include <sys/socket.h>
74 # include <sys/uio.h>
75 # elif __hpux
76 # include <sys/socket.h>
77 # elif __solaris /* not yet */
78 # include <sys/sendfile.h>
79 # else
80 # error sendfile support requested but not available
81 # endif
82 #endif
83
84 /* number of seconds after which idle threads exit */
85 #define IDLE_TIMEOUT 10
86
87 /* used for struct dirent, AIX doesn't provide it */
88 #ifndef NAME_MAX
89 # define NAME_MAX 4096
90 #endif
91
92 /* buffer size for various temporary buffers */
93 #define AIO_BUFSIZE 65536
94
95 /* use NV for 32 bit perls as it allows larger offsets */
96 #if IVSIZE >= 8
97 # define SvVAL64 SvIV
98 #else
99 # define SvVAL64 SvNV
100 #endif
101
102 #define dBUF \
103 char *aio_buf; \
104 X_LOCK (wrklock); \
105 self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \
106 X_UNLOCK (wrklock); \
107 if (!aio_buf) \
108 return -1;
109
110 typedef SV SV8; /* byte-sv, used for argument-checking */
111
112 enum {
113 REQ_QUIT,
114 REQ_OPEN, REQ_CLOSE,
115 REQ_READ, REQ_WRITE,
116 REQ_READAHEAD, REQ_SENDFILE,
117 REQ_STAT, REQ_LSTAT, REQ_FSTAT,
118 REQ_TRUNCATE, REQ_FTRUNCATE,
119 REQ_UTIME, REQ_FUTIME,
120 REQ_CHMOD, REQ_FCHMOD,
121 REQ_CHOWN, REQ_FCHOWN,
122 REQ_FSYNC, REQ_FDATASYNC,
123 REQ_UNLINK, REQ_RMDIR, REQ_MKDIR, REQ_RENAME,
124 REQ_MKNOD, REQ_READDIR,
125 REQ_LINK, REQ_SYMLINK, REQ_READLINK,
126 REQ_GROUP, REQ_NOP,
127 REQ_BUSY,
128 };
129
130 #define AIO_REQ_KLASS "IO::AIO::REQ"
131 #define AIO_GRP_KLASS "IO::AIO::GRP"
132
133 typedef struct aio_cb
134 {
135 struct aio_cb *volatile next;
136
137 SV *callback;
138 SV *sv1, *sv2;
139 void *ptr1, *ptr2;
140 off_t offs;
141 size_t size;
142 ssize_t result;
143 double nv1, nv2;
144
145 STRLEN stroffset;
146 int type;
147 int int1, int2, int3;
148 int errorno;
149 mode_t mode; /* open */
150
151 unsigned char flags;
152 unsigned char pri;
153
154 SV *self; /* the perl counterpart of this request, if any */
155 struct aio_cb *grp, *grp_prev, *grp_next, *grp_first;
156 } aio_cb;
157
158 enum {
159 FLAG_CANCELLED = 0x01, /* request was cancelled */
160 FLAG_SV2_RO_OFF = 0x40, /* data was set readonly */
161 FLAG_PTR2_FREE = 0x80, /* need to free(ptr2) */
162 };
163
164 typedef aio_cb *aio_req;
165 typedef aio_cb *aio_req_ornot;
166
167 enum {
168 PRI_MIN = -4,
169 PRI_MAX = 4,
170
171 DEFAULT_PRI = 0,
172 PRI_BIAS = -PRI_MIN,
173 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
174 };
175
176 #define AIO_TICKS ((1000000 + 1023) >> 10)
177
178 static unsigned int max_poll_time = 0;
179 static unsigned int max_poll_reqs = 0;
180
181 /* calculcate time difference in ~1/AIO_TICKS of a second */
182 static int tvdiff (struct timeval *tv1, struct timeval *tv2)
183 {
184 return (tv2->tv_sec - tv1->tv_sec ) * AIO_TICKS
185 + ((tv2->tv_usec - tv1->tv_usec) >> 10);
186 }
187
188 static thread_t main_tid;
189 static int main_sig;
190 static int block_sig_level;
191
192 void block_sig ()
193 {
194 sigset_t ss;
195
196 if (block_sig_level++)
197 return;
198
199 if (!main_sig)
200 return;
201
202 sigemptyset (&ss);
203 sigaddset (&ss, main_sig);
204 pthread_sigmask (SIG_BLOCK, &ss, 0);
205 }
206
207 void unblock_sig ()
208 {
209 sigset_t ss;
210
211 if (--block_sig_level)
212 return;
213
214 if (!main_sig)
215 return;
216
217 sigemptyset (&ss);
218 sigaddset (&ss, main_sig);
219 pthread_sigmask (SIG_UNBLOCK, &ss, 0);
220 }
221
222 static int next_pri = DEFAULT_PRI + PRI_BIAS;
223
224 static unsigned int started, idle, wanted;
225
226 /* worker threads management */
227 static mutex_t wrklock = X_MUTEX_INIT;
228
229 typedef struct worker {
230 /* locked by wrklock */
231 struct worker *prev, *next;
232
233 thread_t tid;
234
235 /* locked by reslock, reqlock or wrklock */
236 aio_req req; /* currently processed request */
237 void *dbuf;
238 DIR *dirp;
239 } worker;
240
241 static worker wrk_first = { &wrk_first, &wrk_first, 0 };
242
243 static void worker_clear (worker *wrk)
244 {
245 if (wrk->dirp)
246 {
247 closedir (wrk->dirp);
248 wrk->dirp = 0;
249 }
250
251 if (wrk->dbuf)
252 {
253 free (wrk->dbuf);
254 wrk->dbuf = 0;
255 }
256 }
257
258 static void worker_free (worker *wrk)
259 {
260 wrk->next->prev = wrk->prev;
261 wrk->prev->next = wrk->next;
262
263 free (wrk);
264 }
265
266 static volatile unsigned int nreqs, nready, npending;
267 static volatile unsigned int max_idle = 4;
268 static volatile unsigned int max_outstanding = 0xffffffff;
269 static int respipe [2], respipe_osf [2];
270
271 static mutex_t reslock = X_MUTEX_INIT;
272 static mutex_t reqlock = X_MUTEX_INIT;
273 static cond_t reqwait = X_COND_INIT;
274
275 #if WORDACCESS_UNSAFE
276
277 static unsigned int get_nready ()
278 {
279 unsigned int retval;
280
281 X_LOCK (reqlock);
282 retval = nready;
283 X_UNLOCK (reqlock);
284
285 return retval;
286 }
287
288 static unsigned int get_npending ()
289 {
290 unsigned int retval;
291
292 X_LOCK (reslock);
293 retval = npending;
294 X_UNLOCK (reslock);
295
296 return retval;
297 }
298
299 static unsigned int get_nthreads ()
300 {
301 unsigned int retval;
302
303 X_LOCK (wrklock);
304 retval = started;
305 X_UNLOCK (wrklock);
306
307 return retval;
308 }
309
310 #else
311
312 # define get_nready() nready
313 # define get_npending() npending
314 # define get_nthreads() started
315
316 #endif
317
318 /*
319 * a somewhat faster data structure might be nice, but
320 * with 8 priorities this actually needs <20 insns
321 * per shift, the most expensive operation.
322 */
323 typedef struct {
324 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
325 int size;
326 } reqq;
327
328 static reqq req_queue;
329 static reqq res_queue;
330
331 int reqq_push (reqq *q, aio_req req)
332 {
333 int pri = req->pri;
334 req->next = 0;
335
336 if (q->qe[pri])
337 {
338 q->qe[pri]->next = req;
339 q->qe[pri] = req;
340 }
341 else
342 q->qe[pri] = q->qs[pri] = req;
343
344 return q->size++;
345 }
346
347 aio_req reqq_shift (reqq *q)
348 {
349 int pri;
350
351 if (!q->size)
352 return 0;
353
354 --q->size;
355
356 for (pri = NUM_PRI; pri--; )
357 {
358 aio_req req = q->qs[pri];
359
360 if (req)
361 {
362 if (!(q->qs[pri] = req->next))
363 q->qe[pri] = 0;
364
365 return req;
366 }
367 }
368
369 abort ();
370 }
371
372 static int poll_cb ();
373 static int req_invoke (aio_req req);
374 static void req_destroy (aio_req req);
375 static void req_cancel (aio_req req);
376
377 /* must be called at most once */
378 static SV *req_sv (aio_req req, const char *klass)
379 {
380 if (!req->self)
381 {
382 req->self = (SV *)newHV ();
383 sv_magic (req->self, 0, PERL_MAGIC_ext, (char *)req, 0);
384 }
385
386 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1)));
387 }
388
389 static aio_req SvAIO_REQ (SV *sv)
390 {
391 MAGIC *mg;
392
393 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv))
394 croak ("object of class " AIO_REQ_KLASS " expected");
395
396 mg = mg_find (SvRV (sv), PERL_MAGIC_ext);
397
398 return mg ? (aio_req)mg->mg_ptr : 0;
399 }
400
401 static void aio_grp_feed (aio_req grp)
402 {
403 block_sig ();
404
405 while (grp->size < grp->int2 && !(grp->flags & FLAG_CANCELLED))
406 {
407 int old_len = grp->size;
408
409 if (grp->sv2 && SvOK (grp->sv2))
410 {
411 dSP;
412
413 ENTER;
414 SAVETMPS;
415 PUSHMARK (SP);
416 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
417 PUTBACK;
418 call_sv (grp->sv2, G_VOID | G_EVAL | G_KEEPERR);
419 SPAGAIN;
420 FREETMPS;
421 LEAVE;
422 }
423
424 /* stop if no progress has been made */
425 if (old_len == grp->size)
426 {
427 SvREFCNT_dec (grp->sv2);
428 grp->sv2 = 0;
429 break;
430 }
431 }
432
433 unblock_sig ();
434 }
435
436 static void aio_grp_dec (aio_req grp)
437 {
438 --grp->size;
439
440 /* call feeder, if applicable */
441 aio_grp_feed (grp);
442
443 /* finish, if done */
444 if (!grp->size && grp->int1)
445 {
446 block_sig ();
447
448 if (!req_invoke (grp))
449 {
450 req_destroy (grp);
451 unblock_sig ();
452 croak (0);
453 }
454
455 req_destroy (grp);
456 unblock_sig ();
457 }
458 }
459
460 static int req_invoke (aio_req req)
461 {
462 dSP;
463
464 if (req->flags & FLAG_SV2_RO_OFF)
465 SvREADONLY_off (req->sv2);
466
467 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
468 {
469 ENTER;
470 SAVETMPS;
471 PUSHMARK (SP);
472 EXTEND (SP, 1);
473
474 switch (req->type)
475 {
476 case REQ_READDIR:
477 {
478 SV *rv = &PL_sv_undef;
479
480 if (req->result >= 0)
481 {
482 int i;
483 char *buf = req->ptr2;
484 AV *av = newAV ();
485
486 av_extend (av, req->result - 1);
487
488 for (i = 0; i < req->result; ++i)
489 {
490 SV *sv = newSVpv (buf, 0);
491
492 av_store (av, i, sv);
493 buf += SvCUR (sv) + 1;
494 }
495
496 rv = sv_2mortal (newRV_noinc ((SV *)av));
497 }
498
499 PUSHs (rv);
500 }
501 break;
502
503 case REQ_OPEN:
504 {
505 /* convert fd to fh */
506 SV *fh;
507
508 PUSHs (sv_2mortal (newSViv (req->result)));
509 PUTBACK;
510 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
511 SPAGAIN;
512
513 fh = POPs;
514 PUSHMARK (SP);
515 XPUSHs (fh);
516 }
517 break;
518
519 case REQ_GROUP:
520 req->int1 = 2; /* mark group as finished */
521
522 if (req->sv1)
523 {
524 int i;
525 AV *av = (AV *)req->sv1;
526
527 EXTEND (SP, AvFILL (av) + 1);
528 for (i = 0; i <= AvFILL (av); ++i)
529 PUSHs (*av_fetch (av, i, 0));
530 }
531 break;
532
533 case REQ_NOP:
534 case REQ_BUSY:
535 break;
536
537 case REQ_READLINK:
538 if (req->result > 0)
539 {
540 SvCUR_set (req->sv2, req->result);
541 *SvEND (req->sv2) = 0;
542 PUSHs (req->sv2);
543 }
544 break;
545
546 case REQ_STAT:
547 case REQ_LSTAT:
548 case REQ_FSTAT:
549 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
550 PL_laststatval = req->result;
551 PL_statcache = *(Stat_t *)(req->ptr2);
552 PUSHs (sv_2mortal (newSViv (req->result)));
553 break;
554
555 case REQ_READ:
556 SvCUR_set (req->sv2, req->stroffset + (req->result > 0 ? req->result : 0));
557 *SvEND (req->sv2) = 0;
558 PUSHs (sv_2mortal (newSViv (req->result)));
559 break;
560
561 default:
562 PUSHs (sv_2mortal (newSViv (req->result)));
563 break;
564 }
565
566 errno = req->errorno;
567
568 PUTBACK;
569 call_sv (req->callback, G_VOID | G_EVAL);
570 SPAGAIN;
571
572 FREETMPS;
573 LEAVE;
574 }
575
576 if (req->grp)
577 {
578 aio_req grp = req->grp;
579
580 /* unlink request */
581 if (req->grp_next) req->grp_next->grp_prev = req->grp_prev;
582 if (req->grp_prev) req->grp_prev->grp_next = req->grp_next;
583
584 if (grp->grp_first == req)
585 grp->grp_first = req->grp_next;
586
587 aio_grp_dec (grp);
588 }
589
590 return !SvTRUE (ERRSV);
591 }
592
593 static void req_destroy (aio_req req)
594 {
595 if (req->self)
596 {
597 sv_unmagic (req->self, PERL_MAGIC_ext);
598 SvREFCNT_dec (req->self);
599 }
600
601 SvREFCNT_dec (req->sv1);
602 SvREFCNT_dec (req->sv2);
603 SvREFCNT_dec (req->callback);
604
605 if (req->flags & FLAG_PTR2_FREE)
606 free (req->ptr2);
607
608 Safefree (req);
609 }
610
611 static void req_cancel_subs (aio_req grp)
612 {
613 aio_req sub;
614
615 if (grp->type != REQ_GROUP)
616 return;
617
618 SvREFCNT_dec (grp->sv2);
619 grp->sv2 = 0;
620
621 for (sub = grp->grp_first; sub; sub = sub->grp_next)
622 req_cancel (sub);
623 }
624
625 static void req_cancel (aio_req req)
626 {
627 req->flags |= FLAG_CANCELLED;
628
629 req_cancel_subs (req);
630 }
631
632 #ifdef USE_SOCKETS_AS_HANDLES
633 # define TO_SOCKET(x) (win32_get_osfhandle (x))
634 #else
635 # define TO_SOCKET(x) (x)
636 #endif
637
638 static void
639 create_pipe (int fd[2])
640 {
641 #ifdef _WIN32
642 int arg = 1;
643 if (PerlSock_socketpair (AF_UNIX, SOCK_STREAM, 0, fd)
644 || ioctlsocket (TO_SOCKET (fd [0]), FIONBIO, &arg)
645 || ioctlsocket (TO_SOCKET (fd [1]), FIONBIO, &arg))
646 #else
647 if (pipe (fd)
648 || fcntl (fd [0], F_SETFL, O_NONBLOCK)
649 || fcntl (fd [1], F_SETFL, O_NONBLOCK))
650 #endif
651 croak ("unable to initialize result pipe");
652
653 respipe_osf [0] = TO_SOCKET (respipe [0]);
654 respipe_osf [1] = TO_SOCKET (respipe [1]);
655 }
656
657 X_THREAD_PROC (aio_proc);
658
659 static void start_thread (void)
660 {
661 worker *wrk = calloc (1, sizeof (worker));
662
663 if (!wrk)
664 croak ("unable to allocate worker thread data");
665
666 X_LOCK (wrklock);
667
668 if (thread_create (&wrk->tid, aio_proc, (void *)wrk))
669 {
670 wrk->prev = &wrk_first;
671 wrk->next = wrk_first.next;
672 wrk_first.next->prev = wrk;
673 wrk_first.next = wrk;
674 ++started;
675 }
676 else
677 free (wrk);
678
679 X_UNLOCK (wrklock);
680 }
681
682 static void maybe_start_thread ()
683 {
684 if (get_nthreads () >= wanted)
685 return;
686
687 /* todo: maybe use idle here, but might be less exact */
688 if (0 <= (int)get_nthreads () + (int)get_npending () - (int)nreqs)
689 return;
690
691 start_thread ();
692 }
693
694 static void req_send (aio_req req)
695 {
696 block_sig ();
697
698 ++nreqs;
699
700 X_LOCK (reqlock);
701 ++nready;
702 reqq_push (&req_queue, req);
703 X_COND_SIGNAL (reqwait);
704 X_UNLOCK (reqlock);
705
706 unblock_sig ();
707
708 maybe_start_thread ();
709 }
710
711 static void end_thread (void)
712 {
713 aio_req req;
714
715 Newz (0, req, 1, aio_cb);
716
717 req->type = REQ_QUIT;
718 req->pri = PRI_MAX + PRI_BIAS;
719
720 X_LOCK (reqlock);
721 reqq_push (&req_queue, req);
722 X_COND_SIGNAL (reqwait);
723 X_UNLOCK (reqlock);
724
725 X_LOCK (wrklock);
726 --started;
727 X_UNLOCK (wrklock);
728 }
729
730 static void set_max_idle (int nthreads)
731 {
732 if (WORDACCESS_UNSAFE) X_LOCK (reqlock);
733 max_idle = nthreads <= 0 ? 1 : nthreads;
734 if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock);
735 }
736
737 static void min_parallel (int nthreads)
738 {
739 if (wanted < nthreads)
740 wanted = nthreads;
741 }
742
743 static void max_parallel (int nthreads)
744 {
745 if (wanted > nthreads)
746 wanted = nthreads;
747
748 while (started > wanted)
749 end_thread ();
750 }
751
752 static void poll_wait ()
753 {
754 fd_set rfd;
755
756 while (nreqs)
757 {
758 int size;
759 if (WORDACCESS_UNSAFE) X_LOCK (reslock);
760 size = res_queue.size;
761 if (WORDACCESS_UNSAFE) X_UNLOCK (reslock);
762
763 if (size)
764 return;
765
766 maybe_start_thread ();
767
768 FD_ZERO (&rfd);
769 FD_SET (respipe [0], &rfd);
770
771 PerlSock_select (respipe [0] + 1, &rfd, 0, 0, 0);
772 }
773 }
774
775 static int poll_cb ()
776 {
777 dSP;
778 int count = 0;
779 int maxreqs = max_poll_reqs;
780 int do_croak = 0;
781 struct timeval tv_start, tv_now;
782 aio_req req;
783
784 if (max_poll_time)
785 gettimeofday (&tv_start, 0);
786
787 block_sig ();
788
789 for (;;)
790 {
791 for (;;)
792 {
793 maybe_start_thread ();
794
795 X_LOCK (reslock);
796 req = reqq_shift (&res_queue);
797
798 if (req)
799 {
800 --npending;
801
802 if (!res_queue.size)
803 {
804 /* read any signals sent by the worker threads */
805 char buf [4];
806 while (respipe_read (respipe [0], buf, 4) == 4)
807 ;
808 }
809 }
810
811 X_UNLOCK (reslock);
812
813 if (!req)
814 break;
815
816 --nreqs;
817
818 if (req->type == REQ_GROUP && req->size)
819 {
820 req->int1 = 1; /* mark request as delayed */
821 continue;
822 }
823 else
824 {
825 if (!req_invoke (req))
826 {
827 req_destroy (req);
828 unblock_sig ();
829 croak (0);
830 }
831
832 count++;
833 }
834
835 req_destroy (req);
836
837 if (maxreqs && !--maxreqs)
838 break;
839
840 if (max_poll_time)
841 {
842 gettimeofday (&tv_now, 0);
843
844 if (tvdiff (&tv_start, &tv_now) >= max_poll_time)
845 break;
846 }
847 }
848
849 if (nreqs <= max_outstanding)
850 break;
851
852 poll_wait ();
853
854 ++maxreqs;
855 }
856
857 unblock_sig ();
858 return count;
859 }
860
861 /*****************************************************************************/
862 /* work around various missing functions */
863
864 #if !HAVE_PREADWRITE
865 # define pread aio_pread
866 # define pwrite aio_pwrite
867
868 /*
869 * make our pread/pwrite safe against themselves, but not against
870 * normal read/write by using a mutex. slows down execution a lot,
871 * but that's your problem, not mine.
872 */
873 static mutex_t preadwritelock = X_MUTEX_INIT;
874
875 static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
876 {
877 ssize_t res;
878 off_t ooffset;
879
880 X_LOCK (preadwritelock);
881 ooffset = lseek (fd, 0, SEEK_CUR);
882 lseek (fd, offset, SEEK_SET);
883 res = read (fd, buf, count);
884 lseek (fd, ooffset, SEEK_SET);
885 X_UNLOCK (preadwritelock);
886
887 return res;
888 }
889
890 static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
891 {
892 ssize_t res;
893 off_t ooffset;
894
895 X_LOCK (preadwritelock);
896 ooffset = lseek (fd, 0, SEEK_CUR);
897 lseek (fd, offset, SEEK_SET);
898 res = write (fd, buf, count);
899 lseek (fd, offset, SEEK_SET);
900 X_UNLOCK (preadwritelock);
901
902 return res;
903 }
904 #endif
905
906 #ifndef HAVE_FUTIMES
907
908 # define utimes(path,times) aio_utimes (path, times)
909 # define futimes(fd,times) aio_futimes (fd, times)
910
911 int aio_utimes (const char *filename, const struct timeval times[2])
912 {
913 if (times)
914 {
915 struct utimbuf buf;
916
917 buf.actime = times[0].tv_sec;
918 buf.modtime = times[1].tv_sec;
919
920 return utime (filename, &buf);
921 }
922 else
923 return utime (filename, 0);
924 }
925
926 int aio_futimes (int fd, const struct timeval tv[2])
927 {
928 errno = ENOSYS;
929 return -1;
930 }
931
932 #endif
933
934 #if !HAVE_FDATASYNC
935 # define fdatasync fsync
936 #endif
937
938 #if !HAVE_READAHEAD
939 # define readahead(fd,offset,count) aio_readahead (fd, offset, count, self)
940
941 static ssize_t aio_readahead (int fd, off_t offset, size_t count, worker *self)
942 {
943 size_t todo = count;
944 dBUF;
945
946 while (todo > 0)
947 {
948 size_t len = todo < AIO_BUFSIZE ? todo : AIO_BUFSIZE;
949
950 pread (fd, aio_buf, len, offset);
951 offset += len;
952 todo -= len;
953 }
954
955 errno = 0;
956 return count;
957 }
958
959 #endif
960
961 #if !HAVE_READDIR_R
962 # define readdir_r aio_readdir_r
963
964 static mutex_t readdirlock = X_MUTEX_INIT;
965
966 static int readdir_r (DIR *dirp, X_DIRENT *ent, X_DIRENT **res)
967 {
968 X_DIRENT *e;
969 int errorno;
970
971 X_LOCK (readdirlock);
972
973 e = readdir (dirp);
974 errorno = errno;
975
976 if (e)
977 {
978 *res = ent;
979 strcpy (ent->d_name, e->d_name);
980 }
981 else
982 *res = 0;
983
984 X_UNLOCK (readdirlock);
985
986 errno = errorno;
987 return e ? 0 : -1;
988 }
989 #endif
990
991 /* sendfile always needs emulation */
992 static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self)
993 {
994 ssize_t res;
995
996 if (!count)
997 return 0;
998
999 #if HAVE_SENDFILE
1000 # if __linux
1001 res = sendfile (ofd, ifd, &offset, count);
1002
1003 # elif __freebsd
1004 /*
1005 * Of course, the freebsd sendfile is a dire hack with no thoughts
1006 * wasted on making it similar to other I/O functions.
1007 */
1008 {
1009 off_t sbytes;
1010 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0);
1011
1012 if (res < 0 && sbytes)
1013 /* maybe only on EAGAIN: as usual, the manpage leaves you guessing */
1014 res = sbytes;
1015 }
1016
1017 # elif __hpux
1018 res = sendfile (ofd, ifd, offset, count, 0, 0);
1019
1020 # elif __solaris
1021 {
1022 struct sendfilevec vec;
1023 size_t sbytes;
1024
1025 vec.sfv_fd = ifd;
1026 vec.sfv_flag = 0;
1027 vec.sfv_off = offset;
1028 vec.sfv_len = count;
1029
1030 res = sendfilev (ofd, &vec, 1, &sbytes);
1031
1032 if (res < 0 && sbytes)
1033 res = sbytes;
1034 }
1035
1036 # endif
1037 #else
1038 res = -1;
1039 errno = ENOSYS;
1040 #endif
1041
1042 if (res < 0
1043 && (errno == ENOSYS || errno == EINVAL || errno == ENOTSOCK
1044 #if __solaris
1045 || errno == EAFNOSUPPORT || errno == EPROTOTYPE
1046 #endif
1047 )
1048 )
1049 {
1050 /* emulate sendfile. this is a major pain in the ass */
1051 dBUF;
1052
1053 res = 0;
1054
1055 while (count)
1056 {
1057 ssize_t cnt;
1058
1059 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
1060
1061 if (cnt <= 0)
1062 {
1063 if (cnt && !res) res = -1;
1064 break;
1065 }
1066
1067 cnt = write (ofd, aio_buf, cnt);
1068
1069 if (cnt <= 0)
1070 {
1071 if (cnt && !res) res = -1;
1072 break;
1073 }
1074
1075 offset += cnt;
1076 res += cnt;
1077 count -= cnt;
1078 }
1079 }
1080
1081 return res;
1082 }
1083
1084 /* read a full directory */
1085 static void scandir_ (aio_req req, worker *self)
1086 {
1087 DIR *dirp;
1088 union
1089 {
1090 X_DIRENT d;
1091 char b [offsetof (X_DIRENT, d_name) + NAME_MAX + 1];
1092 } *u;
1093 X_DIRENT *entp;
1094 char *name, *names;
1095 int memlen = 4096;
1096 int memofs = 0;
1097 int res = 0;
1098
1099 X_LOCK (wrklock);
1100 self->dirp = dirp = opendir (req->ptr1);
1101 self->dbuf = u = malloc (sizeof (*u));
1102 req->flags |= FLAG_PTR2_FREE;
1103 req->ptr2 = names = malloc (memlen);
1104 X_UNLOCK (wrklock);
1105
1106 if (dirp && u && names)
1107 for (;;)
1108 {
1109 errno = 0;
1110 readdir_r (dirp, &u->d, &entp);
1111
1112 if (!entp)
1113 break;
1114
1115 name = entp->d_name;
1116
1117 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
1118 {
1119 int len = strlen (name) + 1;
1120
1121 res++;
1122
1123 while (memofs + len > memlen)
1124 {
1125 memlen *= 2;
1126 X_LOCK (wrklock);
1127 req->ptr2 = names = realloc (names, memlen);
1128 X_UNLOCK (wrklock);
1129
1130 if (!names)
1131 break;
1132 }
1133
1134 memcpy (names + memofs, name, len);
1135 memofs += len;
1136 }
1137 }
1138
1139 if (errno)
1140 res = -1;
1141
1142 req->result = res;
1143 }
1144
1145 /*****************************************************************************/
1146
1147 X_THREAD_PROC (aio_proc)
1148 {
1149 {//D
1150 aio_req req;
1151 struct timespec ts;
1152 worker *self = (worker *)thr_arg;
1153
1154 /* try to distribute timeouts somewhat randomly */
1155 ts.tv_nsec = ((unsigned long)self & 1023UL) * (1000000000UL / 1024UL);
1156
1157 for (;;)
1158 {
1159 ts.tv_sec = time (0) + IDLE_TIMEOUT;
1160
1161 X_LOCK (reqlock);
1162
1163 for (;;)
1164 {
1165 self->req = req = reqq_shift (&req_queue);
1166
1167 if (req)
1168 break;
1169
1170 ++idle;
1171
1172 if (X_COND_TIMEDWAIT (reqwait, reqlock, ts)
1173 == ETIMEDOUT)
1174 {
1175 if (idle > max_idle)
1176 {
1177 --idle;
1178 X_UNLOCK (reqlock);
1179 X_LOCK (wrklock);
1180 --started;
1181 X_UNLOCK (wrklock);
1182 goto quit;
1183 }
1184
1185 /* we are allowed to idle, so do so without any timeout */
1186 X_COND_WAIT (reqwait, reqlock);
1187 ts.tv_sec = time (0) + IDLE_TIMEOUT;
1188 }
1189
1190 --idle;
1191 }
1192
1193 --nready;
1194
1195 X_UNLOCK (reqlock);
1196
1197 errno = 0; /* strictly unnecessary */
1198
1199 if (!(req->flags & FLAG_CANCELLED))
1200 switch (req->type)
1201 {
1202 case REQ_READ: req->result = req->offs >= 0
1203 ? pread (req->int1, req->ptr1, req->size, req->offs)
1204 : read (req->int1, req->ptr1, req->size); break;
1205 case REQ_WRITE: req->result = req->offs >= 0
1206 ? pwrite (req->int1, req->ptr1, req->size, req->offs)
1207 : write (req->int1, req->ptr1, req->size); break;
1208
1209 case REQ_READAHEAD: req->result = readahead (req->int1, req->offs, req->size); break;
1210 case REQ_SENDFILE: req->result = sendfile_ (req->int1, req->int2, req->offs, req->size, self); break;
1211
1212 case REQ_STAT: req->result = stat (req->ptr1, (Stat_t *)req->ptr2); break;
1213 case REQ_LSTAT: req->result = lstat (req->ptr1, (Stat_t *)req->ptr2); break;
1214 case REQ_FSTAT: req->result = fstat (req->int1, (Stat_t *)req->ptr2); break;
1215
1216 case REQ_CHOWN: req->result = chown (req->ptr1, req->int2, req->int3); break;
1217 case REQ_FCHOWN: req->result = fchown (req->int1, req->int2, req->int3); break;
1218 case REQ_CHMOD: req->result = chmod (req->ptr1, req->mode); break;
1219 case REQ_FCHMOD: req->result = fchmod (req->int1, req->mode); break;
1220 case REQ_TRUNCATE: req->result = truncate (req->ptr1, req->offs); break;
1221 case REQ_FTRUNCATE: req->result = ftruncate (req->int1, req->offs); break;
1222
1223 case REQ_OPEN: req->result = open (req->ptr1, req->int1, req->mode); break;
1224 case REQ_CLOSE: req->result = close (req->int1); break;
1225 case REQ_UNLINK: req->result = unlink (req->ptr1); break;
1226 case REQ_RMDIR: req->result = rmdir (req->ptr1); break;
1227 case REQ_MKDIR: req->result = mkdir (req->ptr1, req->mode); break;
1228 case REQ_RENAME: req->result = rename (req->ptr2, req->ptr1); break;
1229 case REQ_LINK: req->result = link (req->ptr2, req->ptr1); break;
1230 case REQ_SYMLINK: req->result = symlink (req->ptr2, req->ptr1); break;
1231 case REQ_MKNOD: req->result = mknod (req->ptr2, req->mode, (dev_t)req->offs); break;
1232 case REQ_READLINK: req->result = readlink (req->ptr2, req->ptr1, NAME_MAX); break;
1233
1234 case REQ_FDATASYNC: req->result = fdatasync (req->int1); break;
1235 case REQ_FSYNC: req->result = fsync (req->int1); break;
1236 case REQ_READDIR: scandir_ (req, self); break;
1237
1238 case REQ_BUSY:
1239 #ifdef _WIN32
1240 Sleep (req->nv1 * 1000.);
1241 #else
1242 {
1243 struct timeval tv;
1244
1245 tv.tv_sec = req->nv1;
1246 tv.tv_usec = (req->nv1 - tv.tv_sec) * 1000000.;
1247
1248 req->result = select (0, 0, 0, 0, &tv);
1249 }
1250 #endif
1251 break;
1252
1253 case REQ_UTIME:
1254 case REQ_FUTIME:
1255 {
1256 struct timeval tv[2];
1257 struct timeval *times;
1258
1259 if (req->nv1 != -1. || req->nv2 != -1.)
1260 {
1261 tv[0].tv_sec = req->nv1;
1262 tv[0].tv_usec = (req->nv1 - tv[0].tv_sec) * 1000000.;
1263 tv[1].tv_sec = req->nv2;
1264 tv[1].tv_usec = (req->nv2 - tv[1].tv_sec) * 1000000.;
1265
1266 times = tv;
1267 }
1268 else
1269 times = 0;
1270
1271
1272 req->result = req->type == REQ_FUTIME
1273 ? futimes (req->int1, times)
1274 : utimes (req->ptr1, times);
1275 }
1276
1277 case REQ_GROUP:
1278 case REQ_NOP:
1279 break;
1280
1281 case REQ_QUIT:
1282 goto quit;
1283
1284 default:
1285 req->result = -1;
1286 break;
1287 }
1288
1289 req->errorno = errno;
1290
1291 X_LOCK (reslock);
1292
1293 ++npending;
1294
1295 if (!reqq_push (&res_queue, req))
1296 {
1297 /* write a dummy byte to the pipe so fh becomes ready */
1298 respipe_write (respipe_osf [1], (const void *)&respipe_osf, 1);
1299
1300 /* optionally signal the main thread asynchronously */
1301 if (main_sig)
1302 pthread_kill (main_tid, main_sig);
1303 }
1304
1305 self->req = 0;
1306 worker_clear (self);
1307
1308 X_UNLOCK (reslock);
1309 }
1310
1311 quit:
1312 X_LOCK (wrklock);
1313 worker_free (self);
1314 X_UNLOCK (wrklock);
1315
1316 return 0;
1317 }//D
1318 }
1319
1320 /*****************************************************************************/
1321
1322 static void atfork_prepare (void)
1323 {
1324 X_LOCK (wrklock);
1325 X_LOCK (reqlock);
1326 X_LOCK (reslock);
1327 #if !HAVE_PREADWRITE
1328 X_LOCK (preadwritelock);
1329 #endif
1330 #if !HAVE_READDIR_R
1331 X_LOCK (readdirlock);
1332 #endif
1333 }
1334
1335 static void atfork_parent (void)
1336 {
1337 #if !HAVE_READDIR_R
1338 X_UNLOCK (readdirlock);
1339 #endif
1340 #if !HAVE_PREADWRITE
1341 X_UNLOCK (preadwritelock);
1342 #endif
1343 X_UNLOCK (reslock);
1344 X_UNLOCK (reqlock);
1345 X_UNLOCK (wrklock);
1346 }
1347
1348 static void atfork_child (void)
1349 {
1350 aio_req prv;
1351
1352 while (prv = reqq_shift (&req_queue))
1353 req_destroy (prv);
1354
1355 while (prv = reqq_shift (&res_queue))
1356 req_destroy (prv);
1357
1358 while (wrk_first.next != &wrk_first)
1359 {
1360 worker *wrk = wrk_first.next;
1361
1362 if (wrk->req)
1363 req_destroy (wrk->req);
1364
1365 worker_clear (wrk);
1366 worker_free (wrk);
1367 }
1368
1369 started = 0;
1370 idle = 0;
1371 nreqs = 0;
1372 nready = 0;
1373 npending = 0;
1374
1375 respipe_close (respipe [0]);
1376 respipe_close (respipe [1]);
1377
1378 create_pipe (respipe);
1379
1380 atfork_parent ();
1381 }
1382
1383 #define dREQ \
1384 aio_req req; \
1385 int req_pri = next_pri; \
1386 next_pri = DEFAULT_PRI + PRI_BIAS; \
1387 \
1388 if (SvOK (callback) && !SvROK (callback)) \
1389 croak ("callback must be undef or of reference type"); \
1390 \
1391 Newz (0, req, 1, aio_cb); \
1392 if (!req) \
1393 croak ("out of memory during aio_req allocation"); \
1394 \
1395 req->callback = newSVsv (callback); \
1396 req->pri = req_pri
1397
1398 #define REQ_SEND \
1399 req_send (req); \
1400 \
1401 if (GIMME_V != G_VOID) \
1402 XPUSHs (req_sv (req, AIO_REQ_KLASS));
1403
1404 MODULE = IO::AIO PACKAGE = IO::AIO
1405
1406 PROTOTYPES: ENABLE
1407
1408 BOOT:
1409 {
1410 HV *stash = gv_stashpv ("IO::AIO", 1);
1411
1412 newCONSTSUB (stash, "EXDEV", newSViv (EXDEV));
1413 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY));
1414 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY));
1415 newCONSTSUB (stash, "O_CREAT", newSViv (O_CREAT));
1416 newCONSTSUB (stash, "O_TRUNC", newSViv (O_TRUNC));
1417 #ifdef _WIN32
1418 X_MUTEX_CHECK (wrklock);
1419 X_MUTEX_CHECK (reslock);
1420 X_MUTEX_CHECK (reqlock);
1421 X_MUTEX_CHECK (reqwait);
1422 X_MUTEX_CHECK (preadwritelock);
1423 X_MUTEX_CHECK (readdirlock);
1424
1425 X_COND_CHECK (reqwait);
1426 #else
1427 newCONSTSUB (stash, "S_IFIFO", newSViv (S_IFIFO));
1428 newCONSTSUB (stash, "SIGIO", newSViv (SIGIO));
1429 #endif
1430
1431 create_pipe (respipe);
1432
1433 X_THREAD_ATFORK (atfork_prepare, atfork_parent, atfork_child);
1434 }
1435
1436 void
1437 max_poll_reqs (int nreqs)
1438 PROTOTYPE: $
1439 CODE:
1440 max_poll_reqs = nreqs;
1441
1442 void
1443 max_poll_time (double nseconds)
1444 PROTOTYPE: $
1445 CODE:
1446 max_poll_time = nseconds * AIO_TICKS;
1447
1448 void
1449 min_parallel (int nthreads)
1450 PROTOTYPE: $
1451
1452 void
1453 max_parallel (int nthreads)
1454 PROTOTYPE: $
1455
1456 void
1457 max_idle (int nthreads)
1458 PROTOTYPE: $
1459 CODE:
1460 set_max_idle (nthreads);
1461
1462 int
1463 max_outstanding (int maxreqs)
1464 PROTOTYPE: $
1465 CODE:
1466 RETVAL = max_outstanding;
1467 max_outstanding = maxreqs;
1468 OUTPUT:
1469 RETVAL
1470
1471 void
1472 aio_open (SV8 *pathname, int flags, int mode, SV *callback=&PL_sv_undef)
1473 PROTOTYPE: $$$;$
1474 PPCODE:
1475 {
1476 dREQ;
1477
1478 req->type = REQ_OPEN;
1479 req->sv1 = newSVsv (pathname);
1480 req->ptr1 = SvPVbyte_nolen (req->sv1);
1481 req->int1 = flags;
1482 req->mode = mode;
1483
1484 REQ_SEND;
1485 }
1486
1487 void
1488 aio_close (SV *fh, SV *callback=&PL_sv_undef)
1489 PROTOTYPE: $;$
1490 ALIAS:
1491 aio_close = REQ_CLOSE
1492 aio_fsync = REQ_FSYNC
1493 aio_fdatasync = REQ_FDATASYNC
1494 PPCODE:
1495 {
1496 dREQ;
1497
1498 req->type = ix;
1499 req->sv1 = newSVsv (fh);
1500 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh)));
1501
1502 REQ_SEND (req);
1503 }
1504
1505 void
1506 aio_read (SV *fh, SV *offset, SV *length, SV8 *data, IV dataoffset, SV *callback=&PL_sv_undef)
1507 ALIAS:
1508 aio_read = REQ_READ
1509 aio_write = REQ_WRITE
1510 PROTOTYPE: $$$$$;$
1511 PPCODE:
1512 {
1513 STRLEN svlen;
1514 char *svptr = SvPVbyte (data, svlen);
1515 UV len = SvUV (length);
1516
1517 SvUPGRADE (data, SVt_PV);
1518 SvPOK_on (data);
1519
1520 if (dataoffset < 0)
1521 dataoffset += svlen;
1522
1523 if (dataoffset < 0 || dataoffset > svlen)
1524 croak ("dataoffset outside of data scalar");
1525
1526 if (ix == REQ_WRITE)
1527 {
1528 /* write: check length and adjust. */
1529 if (!SvOK (length) || len + dataoffset > svlen)
1530 len = svlen - dataoffset;
1531 }
1532 else
1533 {
1534 /* read: grow scalar as necessary */
1535 svptr = SvGROW (data, len + dataoffset + 1);
1536 }
1537
1538 if (len < 0)
1539 croak ("length must not be negative");
1540
1541 {
1542 dREQ;
1543
1544 req->type = ix;
1545 req->sv1 = newSVsv (fh);
1546 req->int1 = PerlIO_fileno (ix == REQ_READ ? IoIFP (sv_2io (fh))
1547 : IoOFP (sv_2io (fh)));
1548 req->offs = SvOK (offset) ? SvVAL64 (offset) : -1;
1549 req->size = len;
1550 req->sv2 = SvREFCNT_inc (data);
1551 req->ptr1 = (char *)svptr + dataoffset;
1552 req->stroffset = dataoffset;
1553
1554 if (!SvREADONLY (data))
1555 {
1556 SvREADONLY_on (data);
1557 req->flags |= FLAG_SV2_RO_OFF;
1558 }
1559
1560 REQ_SEND;
1561 }
1562 }
1563
1564 void
1565 aio_readlink (SV8 *path, SV *callback=&PL_sv_undef)
1566 PROTOTYPE: $$;$
1567 PPCODE:
1568 {
1569 SV *data;
1570 dREQ;
1571
1572 data = newSV (NAME_MAX);
1573 SvPOK_on (data);
1574
1575 req->type = REQ_READLINK;
1576 req->sv1 = newSVsv (path);
1577 req->ptr2 = SvPVbyte_nolen (req->sv1);
1578 req->sv2 = data;
1579 req->ptr1 = SvPVbyte_nolen (data);
1580
1581 REQ_SEND;
1582 }
1583
1584 void
1585 aio_sendfile (SV *out_fh, SV *in_fh, SV *in_offset, UV length, SV *callback=&PL_sv_undef)
1586 PROTOTYPE: $$$$;$
1587 PPCODE:
1588 {
1589 dREQ;
1590
1591 req->type = REQ_SENDFILE;
1592 req->sv1 = newSVsv (out_fh);
1593 req->int1 = PerlIO_fileno (IoIFP (sv_2io (out_fh)));
1594 req->sv2 = newSVsv (in_fh);
1595 req->int2 = PerlIO_fileno (IoIFP (sv_2io (in_fh)));
1596 req->offs = SvVAL64 (in_offset);
1597 req->size = length;
1598
1599 REQ_SEND;
1600 }
1601
1602 void
1603 aio_readahead (SV *fh, SV *offset, IV length, SV *callback=&PL_sv_undef)
1604 PROTOTYPE: $$$;$
1605 PPCODE:
1606 {
1607 dREQ;
1608
1609 req->type = REQ_READAHEAD;
1610 req->sv1 = newSVsv (fh);
1611 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh)));
1612 req->offs = SvVAL64 (offset);
1613 req->size = length;
1614
1615 REQ_SEND;
1616 }
1617
1618 void
1619 aio_stat (SV8 *fh_or_path, SV *callback=&PL_sv_undef)
1620 ALIAS:
1621 aio_stat = REQ_STAT
1622 aio_lstat = REQ_LSTAT
1623 PPCODE:
1624 {
1625 dREQ;
1626
1627 req->ptr2 = malloc (sizeof (Stat_t));
1628 if (!req->ptr2)
1629 {
1630 req_destroy (req);
1631 croak ("out of memory during aio_stat statdata allocation");
1632 }
1633
1634 req->flags |= FLAG_PTR2_FREE;
1635 req->sv1 = newSVsv (fh_or_path);
1636
1637 if (SvPOK (fh_or_path))
1638 {
1639 req->type = ix;
1640 req->ptr1 = SvPVbyte_nolen (req->sv1);
1641 }
1642 else
1643 {
1644 req->type = REQ_FSTAT;
1645 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh_or_path)));
1646 }
1647
1648 REQ_SEND;
1649 }
1650
1651 void
1652 aio_utime (SV8 *fh_or_path, SV *atime, SV *mtime, SV *callback=&PL_sv_undef)
1653 PPCODE:
1654 {
1655 dREQ;
1656
1657 req->nv1 = SvOK (atime) ? SvNV (atime) : -1.;
1658 req->nv2 = SvOK (mtime) ? SvNV (mtime) : -1.;
1659 req->sv1 = newSVsv (fh_or_path);
1660
1661 if (SvPOK (fh_or_path))
1662 {
1663 req->type = REQ_UTIME;
1664 req->ptr1 = SvPVbyte_nolen (req->sv1);
1665 }
1666 else
1667 {
1668 req->type = REQ_FUTIME;
1669 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh_or_path)));
1670 }
1671
1672 REQ_SEND;
1673 }
1674
1675 void
1676 aio_truncate (SV8 *fh_or_path, SV *offset, SV *callback=&PL_sv_undef)
1677 PPCODE:
1678 {
1679 dREQ;
1680
1681 req->sv1 = newSVsv (fh_or_path);
1682 req->offs = SvOK (offset) ? SvVAL64 (offset) : -1;
1683
1684 if (SvPOK (fh_or_path))
1685 {
1686 req->type = REQ_TRUNCATE;
1687 req->ptr1 = SvPVbyte_nolen (req->sv1);
1688 }
1689 else
1690 {
1691 req->type = REQ_FTRUNCATE;
1692 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh_or_path)));
1693 }
1694
1695 REQ_SEND;
1696 }
1697
1698 void
1699 aio_chmod (SV8 *fh_or_path, int mode, SV *callback=&PL_sv_undef)
1700 PPCODE:
1701 {
1702 dREQ;
1703
1704 req->mode = mode;
1705 req->sv1 = newSVsv (fh_or_path);
1706
1707 if (SvPOK (fh_or_path))
1708 {
1709 req->type = REQ_CHMOD;
1710 req->ptr1 = SvPVbyte_nolen (req->sv1);
1711 }
1712 else
1713 {
1714 req->type = REQ_FCHMOD;
1715 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh_or_path)));
1716 }
1717
1718 REQ_SEND;
1719 }
1720
1721 void
1722 aio_chown (SV8 *fh_or_path, SV *uid, SV *gid, SV *callback=&PL_sv_undef)
1723 PPCODE:
1724 {
1725 dREQ;
1726
1727 req->int2 = SvOK (uid) ? SvIV (uid) : -1;
1728 req->int3 = SvOK (gid) ? SvIV (gid) : -1;
1729 req->sv1 = newSVsv (fh_or_path);
1730
1731 if (SvPOK (fh_or_path))
1732 {
1733 req->type = REQ_CHOWN;
1734 req->ptr1 = SvPVbyte_nolen (req->sv1);
1735 }
1736 else
1737 {
1738 req->type = REQ_FCHOWN;
1739 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh_or_path)));
1740 }
1741
1742 REQ_SEND;
1743 }
1744
1745 void
1746 aio_unlink (SV8 *pathname, SV *callback=&PL_sv_undef)
1747 ALIAS:
1748 aio_unlink = REQ_UNLINK
1749 aio_rmdir = REQ_RMDIR
1750 aio_readdir = REQ_READDIR
1751 PPCODE:
1752 {
1753 dREQ;
1754
1755 req->type = ix;
1756 req->sv1 = newSVsv (pathname);
1757 req->ptr1 = SvPVbyte_nolen (req->sv1);
1758
1759 REQ_SEND;
1760 }
1761
1762 void
1763 aio_mkdir (SV8 *pathname, int mode, SV *callback=&PL_sv_undef)
1764 PPCODE:
1765 {
1766 dREQ;
1767
1768 req->type = REQ_MKDIR;
1769 req->sv1 = newSVsv (pathname);
1770 req->ptr1 = SvPVbyte_nolen (req->sv1);
1771 req->mode = mode;
1772
1773 REQ_SEND;
1774 }
1775
1776 void
1777 aio_link (SV8 *oldpath, SV8 *newpath, SV *callback=&PL_sv_undef)
1778 ALIAS:
1779 aio_link = REQ_LINK
1780 aio_symlink = REQ_SYMLINK
1781 aio_rename = REQ_RENAME
1782 PPCODE:
1783 {
1784 dREQ;
1785
1786 req->type = ix;
1787 req->sv2 = newSVsv (oldpath);
1788 req->ptr2 = SvPVbyte_nolen (req->sv2);
1789 req->sv1 = newSVsv (newpath);
1790 req->ptr1 = SvPVbyte_nolen (req->sv1);
1791
1792 REQ_SEND;
1793 }
1794
1795 void
1796 aio_mknod (SV8 *pathname, int mode, UV dev, SV *callback=&PL_sv_undef)
1797 PPCODE:
1798 {
1799 dREQ;
1800
1801 req->type = REQ_MKNOD;
1802 req->sv1 = newSVsv (pathname);
1803 req->ptr1 = SvPVbyte_nolen (req->sv1);
1804 req->mode = (mode_t)mode;
1805 req->offs = dev;
1806
1807 REQ_SEND;
1808 }
1809
1810 void
1811 aio_busy (double delay, SV *callback=&PL_sv_undef)
1812 PPCODE:
1813 {
1814 dREQ;
1815
1816 req->type = REQ_BUSY;
1817 req->nv1 = delay < 0. ? 0. : delay;
1818
1819 REQ_SEND;
1820 }
1821
1822 void
1823 aio_group (SV *callback=&PL_sv_undef)
1824 PROTOTYPE: ;$
1825 PPCODE:
1826 {
1827 dREQ;
1828
1829 req->type = REQ_GROUP;
1830
1831 req_send (req);
1832 XPUSHs (req_sv (req, AIO_GRP_KLASS));
1833 }
1834
1835 void
1836 aio_nop (SV *callback=&PL_sv_undef)
1837 PPCODE:
1838 {
1839 dREQ;
1840
1841 req->type = REQ_NOP;
1842
1843 REQ_SEND;
1844 }
1845
1846 int
1847 aioreq_pri (int pri = 0)
1848 PROTOTYPE: ;$
1849 CODE:
1850 RETVAL = next_pri - PRI_BIAS;
1851 if (items > 0)
1852 {
1853 if (pri < PRI_MIN) pri = PRI_MIN;
1854 if (pri > PRI_MAX) pri = PRI_MAX;
1855 next_pri = pri + PRI_BIAS;
1856 }
1857 OUTPUT:
1858 RETVAL
1859
1860 void
1861 aioreq_nice (int nice = 0)
1862 CODE:
1863 nice = next_pri - nice;
1864 if (nice < PRI_MIN) nice = PRI_MIN;
1865 if (nice > PRI_MAX) nice = PRI_MAX;
1866 next_pri = nice + PRI_BIAS;
1867
1868 void
1869 flush ()
1870 PROTOTYPE:
1871 CODE:
1872 while (nreqs)
1873 {
1874 poll_wait ();
1875 poll_cb ();
1876 }
1877
1878 int
1879 poll()
1880 PROTOTYPE:
1881 CODE:
1882 poll_wait ();
1883 RETVAL = poll_cb ();
1884 OUTPUT:
1885 RETVAL
1886
1887 int
1888 poll_fileno()
1889 PROTOTYPE:
1890 CODE:
1891 RETVAL = respipe [0];
1892 OUTPUT:
1893 RETVAL
1894
1895 int
1896 poll_cb(...)
1897 PROTOTYPE:
1898 CODE:
1899 RETVAL = poll_cb ();
1900 OUTPUT:
1901 RETVAL
1902
1903 void
1904 poll_wait()
1905 PROTOTYPE:
1906 CODE:
1907 poll_wait ();
1908
1909 void
1910 setsig (int signum = SIGIO)
1911 PROTOTYPE: ;$
1912 CODE:
1913 {
1914 if (block_sig_level)
1915 croak ("cannot call IO::AIO::setsig from within aio_block/callback");
1916
1917 X_LOCK (reslock);
1918 main_tid = pthread_self ();
1919 main_sig = signum;
1920 X_UNLOCK (reslock);
1921
1922 if (main_sig && npending)
1923 pthread_kill (main_tid, main_sig);
1924 }
1925
1926 void
1927 aio_block (SV *cb)
1928 PROTOTYPE: &
1929 PPCODE:
1930 {
1931 int count;
1932
1933 block_sig ();
1934 PUSHMARK (SP);
1935 PUTBACK;
1936 count = call_sv (cb, GIMME_V | G_NOARGS | G_EVAL);
1937 SPAGAIN;
1938 unblock_sig ();
1939
1940 if (SvTRUE (ERRSV))
1941 croak (0);
1942
1943 XSRETURN (count);
1944 }
1945
1946 int
1947 nreqs()
1948 PROTOTYPE:
1949 CODE:
1950 RETVAL = nreqs;
1951 OUTPUT:
1952 RETVAL
1953
1954 int
1955 nready()
1956 PROTOTYPE:
1957 CODE:
1958 RETVAL = get_nready ();
1959 OUTPUT:
1960 RETVAL
1961
1962 int
1963 npending()
1964 PROTOTYPE:
1965 CODE:
1966 RETVAL = get_npending ();
1967 OUTPUT:
1968 RETVAL
1969
1970 int
1971 nthreads()
1972 PROTOTYPE:
1973 CODE:
1974 if (WORDACCESS_UNSAFE) X_LOCK (wrklock);
1975 RETVAL = started;
1976 if (WORDACCESS_UNSAFE) X_UNLOCK (wrklock);
1977 OUTPUT:
1978 RETVAL
1979
1980 PROTOTYPES: DISABLE
1981
1982 MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1983
1984 void
1985 cancel (aio_req_ornot req)
1986 CODE:
1987 req_cancel (req);
1988
1989 void
1990 cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1991 CODE:
1992 SvREFCNT_dec (req->callback);
1993 req->callback = newSVsv (callback);
1994
1995 MODULE = IO::AIO PACKAGE = IO::AIO::GRP
1996
1997 void
1998 add (aio_req grp, ...)
1999 PPCODE:
2000 {
2001 int i;
2002 aio_req req;
2003
2004 if (main_sig && !block_sig_level)
2005 croak ("aio_group->add called outside aio_block/callback context while IO::AIO::setsig is in use");
2006
2007 if (grp->int1 == 2)
2008 croak ("cannot add requests to IO::AIO::GRP after the group finished");
2009
2010 for (i = 1; i < items; ++i )
2011 {
2012 if (GIMME_V != G_VOID)
2013 XPUSHs (sv_2mortal (newSVsv (ST (i))));
2014
2015 req = SvAIO_REQ (ST (i));
2016
2017 if (req)
2018 {
2019 ++grp->size;
2020 req->grp = grp;
2021
2022 req->grp_prev = 0;
2023 req->grp_next = grp->grp_first;
2024
2025 if (grp->grp_first)
2026 grp->grp_first->grp_prev = req;
2027
2028 grp->grp_first = req;
2029 }
2030 }
2031 }
2032
2033 void
2034 cancel_subs (aio_req_ornot req)
2035 CODE:
2036 req_cancel_subs (req);
2037
2038 void
2039 result (aio_req grp, ...)
2040 CODE:
2041 {
2042 int i;
2043 AV *av;
2044
2045 grp->errorno = errno;
2046
2047 av = newAV ();
2048
2049 for (i = 1; i < items; ++i )
2050 av_push (av, newSVsv (ST (i)));
2051
2052 SvREFCNT_dec (grp->sv1);
2053 grp->sv1 = (SV *)av;
2054 }
2055
2056 void
2057 errno (aio_req grp, int errorno = errno)
2058 CODE:
2059 grp->errorno = errorno;
2060
2061 void
2062 limit (aio_req grp, int limit)
2063 CODE:
2064 grp->int2 = limit;
2065 aio_grp_feed (grp);
2066
2067 void
2068 feed (aio_req grp, SV *callback=&PL_sv_undef)
2069 CODE:
2070 {
2071 SvREFCNT_dec (grp->sv2);
2072 grp->sv2 = newSVsv (callback);
2073
2074 if (grp->int2 <= 0)
2075 grp->int2 = 2;
2076
2077 aio_grp_feed (grp);
2078 }
2079