ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
Revision: 1.75
Committed: Thu Oct 26 06:44:48 2006 UTC (17 years, 6 months ago) by root
Branch: MAIN
Changes since 1.74: +8 -2 lines
Log Message:
*** empty log message ***

File Contents

# Content
1 /* solaris */
2 #define _POSIX_PTHREAD_SEMANTICS 1
3
4 #if __linux
5 # define _GNU_SOURCE
6 #endif
7
8 #define _REENTRANT 1
9
10 #include <errno.h>
11
12 #include "EXTERN.h"
13 #include "perl.h"
14 #include "XSUB.h"
15
16 #include "autoconf/config.h"
17
18 #include <pthread.h>
19
20 #include <stddef.h>
21 #include <errno.h>
22 #include <sys/time.h>
23 #include <sys/select.h>
24 #include <sys/types.h>
25 #include <sys/stat.h>
26 #include <limits.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <signal.h>
30 #include <sched.h>
31
32 #if HAVE_SENDFILE
33 # if __linux
34 # include <sys/sendfile.h>
35 # elif __freebsd
36 # include <sys/socket.h>
37 # include <sys/uio.h>
38 # elif __hpux
39 # include <sys/socket.h>
40 # elif __solaris /* not yet */
41 # include <sys/sendfile.h>
42 # else
43 # error sendfile support requested but not available
44 # endif
45 #endif
46
47 /* used for struct dirent, AIX doesn't provide it */
48 #ifndef NAME_MAX
49 # define NAME_MAX 4096
50 #endif
51
52 #ifndef PTHREAD_STACK_MIN
53 /* care for broken platforms, e.g. windows */
54 # define PTHREAD_STACK_MIN 16384
55 #endif
56
57 #if __ia64
58 # define STACKSIZE 65536
59 #elif __i386 || __x86_64 /* 16k is unreasonably high :( */
60 # define STACKSIZE PTHREAD_STACK_MIN
61 #else
62 # define STACKSIZE 16384
63 #endif
64
65 /* buffer size for various temporary buffers */
66 #define AIO_BUFSIZE 65536
67
68 #define dBUF \
69 char *aio_buf; \
70 LOCK (wrklock); \
71 self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \
72 UNLOCK (wrklock); \
73 if (!aio_buf) \
74 return -1;
75
76 enum {
77 REQ_QUIT,
78 REQ_OPEN, REQ_CLOSE,
79 REQ_READ, REQ_WRITE, REQ_READAHEAD,
80 REQ_SENDFILE,
81 REQ_STAT, REQ_LSTAT, REQ_FSTAT,
82 REQ_FSYNC, REQ_FDATASYNC,
83 REQ_UNLINK, REQ_RMDIR, REQ_RENAME,
84 REQ_READDIR,
85 REQ_LINK, REQ_SYMLINK,
86 REQ_GROUP, REQ_NOP,
87 REQ_BUSY,
88 };
89
90 #define AIO_REQ_KLASS "IO::AIO::REQ"
91 #define AIO_GRP_KLASS "IO::AIO::GRP"
92
93 typedef struct aio_cb
94 {
95 struct aio_cb *volatile next;
96
97 SV *data, *callback;
98 SV *fh, *fh2;
99 void *dataptr, *data2ptr;
100 Stat_t *statdata;
101 off_t offset;
102 size_t length;
103 ssize_t result;
104
105 STRLEN dataoffset;
106 int type;
107 int fd, fd2;
108 int errorno;
109 mode_t mode; /* open */
110
111 unsigned char flags;
112 unsigned char pri;
113
114 SV *self; /* the perl counterpart of this request, if any */
115 struct aio_cb *grp, *grp_prev, *grp_next, *grp_first;
116 } aio_cb;
117
118 enum {
119 FLAG_CANCELLED = 0x01,
120 };
121
122 typedef aio_cb *aio_req;
123 typedef aio_cb *aio_req_ornot;
124
125 enum {
126 PRI_MIN = -4,
127 PRI_MAX = 4,
128
129 DEFAULT_PRI = 0,
130 PRI_BIAS = -PRI_MIN,
131 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
132 };
133
134 static int next_pri = DEFAULT_PRI + PRI_BIAS;
135
136 static int started, wanted;
137 static volatile int nreqs;
138 static int respipe [2];
139
140 #if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)
141 # define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
142 #else
143 # define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
144 #endif
145
146 #define LOCK(mutex) pthread_mutex_lock (&(mutex))
147 #define UNLOCK(mutex) pthread_mutex_unlock (&(mutex))
148
149 /* worker threasd management */
150 static pthread_mutex_t wrklock = AIO_MUTEX_INIT;
151
152 typedef struct worker {
153 /* locked by wrklock */
154 struct worker *prev, *next;
155
156 pthread_t tid;
157
158 /* locked by reslock, reqlock or wrklock */
159 aio_req req; /* currently processed request */
160 void *dbuf;
161 DIR *dirp;
162 } worker;
163
164 static worker wrk_first = { &wrk_first, &wrk_first, 0 };
165
166 static void worker_clear (worker *wrk)
167 {
168 if (wrk->dirp)
169 {
170 closedir (wrk->dirp);
171 wrk->dirp = 0;
172 }
173
174 if (wrk->dbuf)
175 {
176 free (wrk->dbuf);
177 wrk->dbuf = 0;
178 }
179 }
180
181 static void worker_free (worker *wrk)
182 {
183 wrk->next->prev = wrk->prev;
184 wrk->prev->next = wrk->next;
185
186 free (wrk);
187 }
188
189 static pthread_mutex_t reslock = AIO_MUTEX_INIT;
190 static pthread_mutex_t reqlock = AIO_MUTEX_INIT;
191 static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER;
192
193 /*
194 * a somewhat faster data structure might be nice, but
195 * with 8 priorities this actually needs <20 insns
196 * per shift, the most expensive operation.
197 */
198 typedef struct {
199 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
200 int size;
201 } reqq;
202
203 static reqq req_queue;
204 static reqq res_queue;
205
206 int reqq_push (reqq *q, aio_req req)
207 {
208 int pri = req->pri;
209 req->next = 0;
210
211 if (q->qe[pri])
212 {
213 q->qe[pri]->next = req;
214 q->qe[pri] = req;
215 }
216 else
217 q->qe[pri] = q->qs[pri] = req;
218
219 return q->size++;
220 }
221
222 aio_req reqq_shift (reqq *q)
223 {
224 int pri;
225
226 if (!q->size)
227 return 0;
228
229 --q->size;
230
231 for (pri = NUM_PRI; pri--; )
232 {
233 aio_req req = q->qs[pri];
234
235 if (req)
236 {
237 if (!(q->qs[pri] = req->next))
238 q->qe[pri] = 0;
239
240 return req;
241 }
242 }
243
244 abort ();
245 }
246
247 static int poll_cb ();
248 static void req_invoke (aio_req req);
249 static void req_free (aio_req req);
250 static void req_cancel (aio_req req);
251
252 /* must be called at most once */
253 static SV *req_sv (aio_req req, const char *klass)
254 {
255 if (!req->self)
256 {
257 req->self = (SV *)newHV ();
258 sv_magic (req->self, 0, PERL_MAGIC_ext, (char *)req, 0);
259 }
260
261 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1)));
262 }
263
264 static aio_req SvAIO_REQ (SV *sv)
265 {
266 MAGIC *mg;
267
268 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv))
269 croak ("object of class " AIO_REQ_KLASS " expected");
270
271 mg = mg_find (SvRV (sv), PERL_MAGIC_ext);
272
273 return mg ? (aio_req)mg->mg_ptr : 0;
274 }
275
276 static void aio_grp_feed (aio_req grp)
277 {
278 while (grp->length < grp->fd2 && !(grp->flags & FLAG_CANCELLED))
279 {
280 int old_len = grp->length;
281
282 if (grp->fh2 && SvOK (grp->fh2))
283 {
284 dSP;
285
286 ENTER;
287 SAVETMPS;
288 PUSHMARK (SP);
289 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
290 PUTBACK;
291 call_sv (grp->fh2, G_VOID | G_EVAL | G_KEEPERR);
292 SPAGAIN;
293 FREETMPS;
294 LEAVE;
295 }
296
297 /* stop if no progress has been made */
298 if (old_len == grp->length)
299 {
300 SvREFCNT_dec (grp->fh2);
301 grp->fh2 = 0;
302 break;
303 }
304 }
305 }
306
307 static void aio_grp_dec (aio_req grp)
308 {
309 --grp->length;
310
311 /* call feeder, if applicable */
312 aio_grp_feed (grp);
313
314 /* finish, if done */
315 if (!grp->length && grp->fd)
316 {
317 req_invoke (grp);
318 req_free (grp);
319 }
320 }
321
322 static void poll_wait ()
323 {
324 fd_set rfd;
325
326 while (nreqs)
327 {
328 int size;
329 #if !(__i386 || __x86_64) /* safe without sempahore on these archs */
330 LOCK (reslock);
331 #endif
332 size = res_queue.size;
333 #if !(__i386 || __x86_64) /* safe without sempahore on these archs */
334 UNLOCK (reslock);
335 #endif
336
337 if (size)
338 return;
339
340 FD_ZERO(&rfd);
341 FD_SET(respipe [0], &rfd);
342
343 select (respipe [0] + 1, &rfd, 0, 0, 0);
344 }
345 }
346
347 static void req_invoke (aio_req req)
348 {
349 dSP;
350
351 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
352 {
353 errno = req->errorno;
354
355 ENTER;
356 SAVETMPS;
357 PUSHMARK (SP);
358 EXTEND (SP, 1);
359
360 switch (req->type)
361 {
362 case REQ_READDIR:
363 {
364 SV *rv = &PL_sv_undef;
365
366 if (req->result >= 0)
367 {
368 int i;
369 char *buf = req->data2ptr;
370 AV *av = newAV ();
371
372 av_extend (av, req->result - 1);
373
374 for (i = 0; i < req->result; ++i)
375 {
376 SV *sv = newSVpv (buf, 0);
377
378 av_store (av, i, sv);
379 buf += SvCUR (sv) + 1;
380 }
381
382 rv = sv_2mortal (newRV_noinc ((SV *)av));
383 }
384
385 PUSHs (rv);
386 }
387 break;
388
389 case REQ_OPEN:
390 {
391 /* convert fd to fh */
392 SV *fh;
393
394 PUSHs (sv_2mortal (newSViv (req->result)));
395 PUTBACK;
396 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
397 SPAGAIN;
398
399 fh = SvREFCNT_inc (POPs);
400
401 PUSHMARK (SP);
402 XPUSHs (sv_2mortal (fh));
403 }
404 break;
405
406 case REQ_GROUP:
407 req->fd = 2; /* mark group as finished */
408
409 if (req->data)
410 {
411 int i;
412 AV *av = (AV *)req->data;
413
414 EXTEND (SP, AvFILL (av) + 1);
415 for (i = 0; i <= AvFILL (av); ++i)
416 PUSHs (*av_fetch (av, i, 0));
417 }
418 break;
419
420 case REQ_NOP:
421 case REQ_BUSY:
422 break;
423
424 default:
425 PUSHs (sv_2mortal (newSViv (req->result)));
426 break;
427 }
428
429
430 PUTBACK;
431 call_sv (req->callback, G_VOID | G_EVAL);
432 SPAGAIN;
433
434 FREETMPS;
435 LEAVE;
436 }
437
438 if (req->grp)
439 {
440 aio_req grp = req->grp;
441
442 /* unlink request */
443 if (req->grp_next) req->grp_next->grp_prev = req->grp_prev;
444 if (req->grp_prev) req->grp_prev->grp_next = req->grp_next;
445
446 if (grp->grp_first == req)
447 grp->grp_first = req->grp_next;
448
449 aio_grp_dec (grp);
450 }
451
452 if (SvTRUE (ERRSV))
453 {
454 req_free (req);
455 croak (0);
456 }
457 }
458
459 static void req_free (aio_req req)
460 {
461 if (req->self)
462 {
463 sv_unmagic (req->self, PERL_MAGIC_ext);
464 SvREFCNT_dec (req->self);
465 }
466
467 SvREFCNT_dec (req->data);
468 SvREFCNT_dec (req->fh);
469 SvREFCNT_dec (req->fh2);
470 SvREFCNT_dec (req->callback);
471 Safefree (req->statdata);
472
473 if (req->type == REQ_READDIR)
474 free (req->data2ptr);
475
476 Safefree (req);
477 }
478
479 static void req_cancel_subs (aio_req grp)
480 {
481 aio_req sub;
482
483 if (grp->type != REQ_GROUP)
484 return;
485
486 SvREFCNT_dec (grp->fh2);
487 grp->fh2 = 0;
488
489 for (sub = grp->grp_first; sub; sub = sub->grp_next)
490 req_cancel (sub);
491 }
492
493 static void req_cancel (aio_req req)
494 {
495 req->flags |= FLAG_CANCELLED;
496
497 req_cancel_subs (req);
498 }
499
500 static int poll_cb ()
501 {
502 dSP;
503 int count = 0;
504 int do_croak = 0;
505 aio_req req;
506
507 for (;;)
508 {
509 LOCK (reslock);
510 req = reqq_shift (&res_queue);
511
512 if (req)
513 {
514 if (!res_queue.size)
515 {
516 /* read any signals sent by the worker threads */
517 char buf [32];
518 while (read (respipe [0], buf, 32) == 32)
519 ;
520 }
521 }
522
523 UNLOCK (reslock);
524
525 if (!req)
526 break;
527
528 --nreqs;
529
530 if (req->type == REQ_QUIT)
531 started--;
532 else if (req->type == REQ_GROUP && req->length)
533 {
534 req->fd = 1; /* mark request as delayed */
535 continue;
536 }
537 else
538 {
539 if (req->type == REQ_READ)
540 SvCUR_set (req->data, req->dataoffset + (req->result > 0 ? req->result : 0));
541
542 if (req->data2ptr && (req->type == REQ_READ || req->type == REQ_WRITE))
543 SvREADONLY_off (req->data);
544
545 if (req->statdata)
546 {
547 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
548 PL_laststatval = req->result;
549 PL_statcache = *(req->statdata);
550 }
551
552 req_invoke (req);
553
554 count++;
555 }
556
557 req_free (req);
558 }
559
560 return count;
561 }
562
563 static void *aio_proc(void *arg);
564
565 static void start_thread (void)
566 {
567 sigset_t fullsigset, oldsigset;
568 pthread_attr_t attr;
569
570 worker *wrk = calloc (1, sizeof (worker));
571
572 if (!wrk)
573 croak ("unable to allocate worker thread data");
574
575 pthread_attr_init (&attr);
576 pthread_attr_setstacksize (&attr, STACKSIZE);
577 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
578
579 sigfillset (&fullsigset);
580
581 LOCK (wrklock);
582 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
583
584 if (pthread_create (&wrk->tid, &attr, aio_proc, (void *)wrk) == 0)
585 {
586 wrk->prev = &wrk_first;
587 wrk->next = wrk_first.next;
588 wrk_first.next->prev = wrk;
589 wrk_first.next = wrk;
590 started++;
591 }
592 else
593 free (wrk);
594
595 sigprocmask (SIG_SETMASK, &oldsigset, 0);
596 UNLOCK (wrklock);
597 }
598
599 static void req_send (aio_req req)
600 {
601 while (started < wanted && nreqs >= started)
602 start_thread ();
603
604 ++nreqs;
605
606 LOCK (reqlock);
607 reqq_push (&req_queue, req);
608 pthread_cond_signal (&reqwait);
609 UNLOCK (reqlock);
610 }
611
612 static void end_thread (void)
613 {
614 aio_req req;
615
616 Newz (0, req, 1, aio_cb);
617
618 req->type = REQ_QUIT;
619 req->pri = PRI_MAX + PRI_BIAS;
620
621 req_send (req);
622 }
623
624 static void min_parallel (int nthreads)
625 {
626 if (wanted < nthreads)
627 wanted = nthreads;
628 }
629
630 static void max_parallel (int nthreads)
631 {
632 int cur = started;
633
634 if (wanted > nthreads)
635 wanted = nthreads;
636
637 while (cur > wanted)
638 {
639 end_thread ();
640 cur--;
641 }
642
643 while (started > wanted)
644 {
645 poll_wait ();
646 poll_cb ();
647 }
648 }
649
650 static void create_pipe ()
651 {
652 if (pipe (respipe))
653 croak ("unable to initialize result pipe");
654
655 if (fcntl (respipe [0], F_SETFL, O_NONBLOCK))
656 croak ("cannot set result pipe to nonblocking mode");
657
658 if (fcntl (respipe [1], F_SETFL, O_NONBLOCK))
659 croak ("cannot set result pipe to nonblocking mode");
660 }
661
662 /*****************************************************************************/
663 /* work around various missing functions */
664
665 #if !HAVE_PREADWRITE
666 # define pread aio_pread
667 # define pwrite aio_pwrite
668
669 /*
670 * make our pread/pwrite safe against themselves, but not against
671 * normal read/write by using a mutex. slows down execution a lot,
672 * but that's your problem, not mine.
673 */
674 static pthread_mutex_t preadwritelock = PTHREAD_MUTEX_INITIALIZER;
675
676 static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
677 {
678 ssize_t res;
679 off_t ooffset;
680
681 LOCK (preadwritelock);
682 ooffset = lseek (fd, 0, SEEK_CUR);
683 lseek (fd, offset, SEEK_SET);
684 res = read (fd, buf, count);
685 lseek (fd, ooffset, SEEK_SET);
686 UNLOCK (preadwritelock);
687
688 return res;
689 }
690
691 static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
692 {
693 ssize_t res;
694 off_t ooffset;
695
696 LOCK (preadwritelock);
697 ooffset = lseek (fd, 0, SEEK_CUR);
698 lseek (fd, offset, SEEK_SET);
699 res = write (fd, buf, count);
700 lseek (fd, offset, SEEK_SET);
701 UNLOCK (preadwritelock);
702
703 return res;
704 }
705 #endif
706
707 #if !HAVE_FDATASYNC
708 # define fdatasync fsync
709 #endif
710
711 #if !HAVE_READAHEAD
712 # define readahead(fd,offset,count) aio_readahead (fd, offset, count, self)
713
714 static ssize_t aio_readahead (int fd, off_t offset, size_t count, worker *self)
715 {
716 dBUF;
717
718 while (count > 0)
719 {
720 size_t len = count < AIO_BUFSIZE ? count : AIO_BUFSIZE;
721
722 pread (fd, aio_buf, len, offset);
723 offset += len;
724 count -= len;
725 }
726
727 errno = 0;
728 }
729
730 #endif
731
732 #if !HAVE_READDIR_R
733 # define readdir_r aio_readdir_r
734
735 static pthread_mutex_t readdirlock = PTHREAD_MUTEX_INITIALIZER;
736
737 static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res)
738 {
739 struct dirent *e;
740 int errorno;
741
742 LOCK (readdirlock);
743
744 e = readdir (dirp);
745 errorno = errno;
746
747 if (e)
748 {
749 *res = ent;
750 strcpy (ent->d_name, e->d_name);
751 }
752 else
753 *res = 0;
754
755 UNLOCK (readdirlock);
756
757 errno = errorno;
758 return e ? 0 : -1;
759 }
760 #endif
761
762 /* sendfile always needs emulation */
763 static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self)
764 {
765 ssize_t res;
766
767 if (!count)
768 return 0;
769
770 #if HAVE_SENDFILE
771 # if __linux
772 res = sendfile (ofd, ifd, &offset, count);
773
774 # elif __freebsd
775 /*
776 * Of course, the freebsd sendfile is a dire hack with no thoughts
777 * wasted on making it similar to other I/O functions.
778 */
779 {
780 off_t sbytes;
781 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0);
782
783 if (res < 0 && sbytes)
784 /* maybe only on EAGAIN only: as usual, the manpage leaves you guessing */
785 res = sbytes;
786 }
787
788 # elif __hpux
789 res = sendfile (ofd, ifd, offset, count, 0, 0);
790
791 # elif __solaris
792 {
793 struct sendfilevec vec;
794 size_t sbytes;
795
796 vec.sfv_fd = ifd;
797 vec.sfv_flag = 0;
798 vec.sfv_off = offset;
799 vec.sfv_len = count;
800
801 res = sendfilev (ofd, &vec, 1, &sbytes);
802
803 if (res < 0 && sbytes)
804 res = sbytes;
805 }
806
807 # endif
808 #else
809 res = -1;
810 errno = ENOSYS;
811 #endif
812
813 if (res < 0
814 && (errno == ENOSYS || errno == EINVAL || errno == ENOTSOCK
815 #if __solaris
816 || errno == EAFNOSUPPORT || errno == EPROTOTYPE
817 #endif
818 )
819 )
820 {
821 /* emulate sendfile. this is a major pain in the ass */
822 dBUF;
823
824 res = 0;
825
826 while (count)
827 {
828 ssize_t cnt;
829
830 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
831
832 if (cnt <= 0)
833 {
834 if (cnt && !res) res = -1;
835 break;
836 }
837
838 cnt = write (ofd, aio_buf, cnt);
839
840 if (cnt <= 0)
841 {
842 if (cnt && !res) res = -1;
843 break;
844 }
845
846 offset += cnt;
847 res += cnt;
848 count -= cnt;
849 }
850 }
851
852 return res;
853 }
854
855 /* read a full directory */
856 static void scandir_ (aio_req req, worker *self)
857 {
858 DIR *dirp;
859 union
860 {
861 struct dirent d;
862 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1];
863 } *u;
864 struct dirent *entp;
865 char *name, *names;
866 int memlen = 4096;
867 int memofs = 0;
868 int res = 0;
869 int errorno;
870
871 LOCK (wrklock);
872 self->dirp = dirp = opendir (req->dataptr);
873 self->dbuf = u = malloc (sizeof (*u));
874 UNLOCK (wrklock);
875
876 req->data2ptr = names = malloc (memlen);
877
878 if (dirp && u && names)
879 for (;;)
880 {
881 errno = 0;
882 readdir_r (dirp, &u->d, &entp);
883
884 if (!entp)
885 break;
886
887 name = entp->d_name;
888
889 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
890 {
891 int len = strlen (name) + 1;
892
893 res++;
894
895 while (memofs + len > memlen)
896 {
897 memlen *= 2;
898 LOCK (wrklock);
899 req->data2ptr = names = realloc (names, memlen);
900 UNLOCK (wrklock);
901
902 if (!names)
903 break;
904 }
905
906 memcpy (names + memofs, name, len);
907 memofs += len;
908 }
909 }
910
911 if (errno)
912 res = -1;
913
914 req->result = res;
915 }
916
917 /*****************************************************************************/
918
919 static void *aio_proc (void *thr_arg)
920 {
921 aio_req req;
922 int type;
923 worker *self = (worker *)thr_arg;
924
925 do
926 {
927 LOCK (reqlock);
928
929 for (;;)
930 {
931 self->req = req = reqq_shift (&req_queue);
932
933 if (req)
934 break;
935
936 pthread_cond_wait (&reqwait, &reqlock);
937 }
938
939 UNLOCK (reqlock);
940
941 errno = 0; /* strictly unnecessary */
942 type = req->type; /* remember type for QUIT check */
943
944 if (!(req->flags & FLAG_CANCELLED))
945 switch (type)
946 {
947 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break;
948 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break;
949
950 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break;
951 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length, self); break;
952
953 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break;
954 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break;
955 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break;
956
957 case REQ_OPEN: req->result = open (req->dataptr, req->fd, req->mode); break;
958 case REQ_CLOSE: req->result = close (req->fd); break;
959 case REQ_UNLINK: req->result = unlink (req->dataptr); break;
960 case REQ_RMDIR: req->result = rmdir (req->dataptr); break;
961 case REQ_RENAME: req->result = rename (req->data2ptr, req->dataptr); break;
962 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break;
963 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break;
964
965 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break;
966 case REQ_FSYNC: req->result = fsync (req->fd); break;
967 case REQ_READDIR: scandir_ (req, self); break;
968
969 case REQ_BUSY:
970 {
971 struct timeval tv;
972
973 tv.tv_sec = req->fd;
974 tv.tv_usec = req->fd2;
975
976 req->result = select (0, 0, 0, 0, &tv);
977 }
978
979 case REQ_GROUP:
980 case REQ_NOP:
981 case REQ_QUIT:
982 break;
983
984 default:
985 req->result = ENOSYS;
986 break;
987 }
988
989 req->errorno = errno;
990
991 LOCK (reslock);
992
993 if (!reqq_push (&res_queue, req))
994 /* write a dummy byte to the pipe so fh becomes ready */
995 write (respipe [1], &respipe, 1);
996
997 self->req = 0;
998 worker_clear (self);
999
1000 UNLOCK (reslock);
1001 }
1002 while (type != REQ_QUIT);
1003
1004 LOCK (wrklock);
1005 worker_free (self);
1006 UNLOCK (wrklock);
1007
1008 return 0;
1009 }
1010
1011 /*****************************************************************************/
1012
1013 static void atfork_prepare (void)
1014 {
1015 LOCK (wrklock);
1016 LOCK (reqlock);
1017 LOCK (reslock);
1018 #if !HAVE_PREADWRITE
1019 LOCK (preadwritelock);
1020 #endif
1021 #if !HAVE_READDIR_R
1022 LOCK (readdirlock);
1023 #endif
1024 }
1025
1026 static void atfork_parent (void)
1027 {
1028 #if !HAVE_READDIR_R
1029 UNLOCK (readdirlock);
1030 #endif
1031 #if !HAVE_PREADWRITE
1032 UNLOCK (preadwritelock);
1033 #endif
1034 UNLOCK (reslock);
1035 UNLOCK (reqlock);
1036 UNLOCK (wrklock);
1037 }
1038
1039 static void atfork_child (void)
1040 {
1041 aio_req prv;
1042
1043 while (prv = reqq_shift (&req_queue))
1044 req_free (prv);
1045
1046 while (prv = reqq_shift (&res_queue))
1047 req_free (prv);
1048
1049 while (wrk_first.next != &wrk_first)
1050 {
1051 worker *wrk = wrk_first.next;
1052
1053 if (wrk->req)
1054 req_free (wrk->req);
1055
1056 worker_clear (wrk);
1057 worker_free (wrk);
1058 }
1059
1060 started = 0;
1061 nreqs = 0;
1062
1063 close (respipe [0]);
1064 close (respipe [1]);
1065 create_pipe ();
1066
1067 atfork_parent ();
1068 }
1069
1070 #define dREQ \
1071 aio_req req; \
1072 int req_pri = next_pri; \
1073 next_pri = DEFAULT_PRI + PRI_BIAS; \
1074 \
1075 if (SvOK (callback) && !SvROK (callback)) \
1076 croak ("callback must be undef or of reference type"); \
1077 \
1078 Newz (0, req, 1, aio_cb); \
1079 if (!req) \
1080 croak ("out of memory during aio_req allocation"); \
1081 \
1082 req->callback = newSVsv (callback); \
1083 req->pri = req_pri
1084
1085 #define REQ_SEND \
1086 req_send (req); \
1087 \
1088 if (GIMME_V != G_VOID) \
1089 XPUSHs (req_sv (req, AIO_REQ_KLASS));
1090
1091 MODULE = IO::AIO PACKAGE = IO::AIO
1092
1093 PROTOTYPES: ENABLE
1094
1095 BOOT:
1096 {
1097 HV *stash = gv_stashpv ("IO::AIO", 1);
1098 newCONSTSUB (stash, "EXDEV", newSViv (EXDEV));
1099 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY));
1100 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY));
1101
1102 create_pipe ();
1103 pthread_atfork (atfork_prepare, atfork_parent, atfork_child);
1104 }
1105
1106 void
1107 min_parallel (nthreads)
1108 int nthreads
1109 PROTOTYPE: $
1110
1111 void
1112 max_parallel (nthreads)
1113 int nthreads
1114 PROTOTYPE: $
1115
1116 void
1117 aio_open (pathname,flags,mode,callback=&PL_sv_undef)
1118 SV * pathname
1119 int flags
1120 int mode
1121 SV * callback
1122 PROTOTYPE: $$$;$
1123 PPCODE:
1124 {
1125 dREQ;
1126
1127 req->type = REQ_OPEN;
1128 req->data = newSVsv (pathname);
1129 req->dataptr = SvPVbyte_nolen (req->data);
1130 req->fd = flags;
1131 req->mode = mode;
1132
1133 REQ_SEND;
1134 }
1135
1136 void
1137 aio_close (fh,callback=&PL_sv_undef)
1138 SV * fh
1139 SV * callback
1140 PROTOTYPE: $;$
1141 ALIAS:
1142 aio_close = REQ_CLOSE
1143 aio_fsync = REQ_FSYNC
1144 aio_fdatasync = REQ_FDATASYNC
1145 PPCODE:
1146 {
1147 dREQ;
1148
1149 req->type = ix;
1150 req->fh = newSVsv (fh);
1151 req->fd = PerlIO_fileno (IoIFP (sv_2io (fh)));
1152
1153 REQ_SEND (req);
1154 }
1155
1156 void
1157 aio_read (fh,offset,length,data,dataoffset,callback=&PL_sv_undef)
1158 SV * fh
1159 UV offset
1160 UV length
1161 SV * data
1162 UV dataoffset
1163 SV * callback
1164 ALIAS:
1165 aio_read = REQ_READ
1166 aio_write = REQ_WRITE
1167 PROTOTYPE: $$$$$;$
1168 PPCODE:
1169 {
1170 aio_req req;
1171 STRLEN svlen;
1172 char *svptr = SvPVbyte (data, svlen);
1173
1174 SvUPGRADE (data, SVt_PV);
1175 SvPOK_on (data);
1176
1177 if (dataoffset < 0)
1178 dataoffset += svlen;
1179
1180 if (dataoffset < 0 || dataoffset > svlen)
1181 croak ("data offset outside of string");
1182
1183 if (ix == REQ_WRITE)
1184 {
1185 /* write: check length and adjust. */
1186 if (length < 0 || length + dataoffset > svlen)
1187 length = svlen - dataoffset;
1188 }
1189 else
1190 {
1191 /* read: grow scalar as necessary */
1192 svptr = SvGROW (data, length + dataoffset);
1193 }
1194
1195 if (length < 0)
1196 croak ("length must not be negative");
1197
1198 {
1199 dREQ;
1200
1201 req->type = ix;
1202 req->fh = newSVsv (fh);
1203 req->fd = PerlIO_fileno (ix == REQ_READ ? IoIFP (sv_2io (fh))
1204 : IoOFP (sv_2io (fh)));
1205 req->offset = offset;
1206 req->length = length;
1207 req->data = SvREFCNT_inc (data);
1208 req->dataptr = (char *)svptr + dataoffset;
1209
1210 if (!SvREADONLY (data))
1211 {
1212 SvREADONLY_on (data);
1213 req->data2ptr = (void *)data;
1214 }
1215
1216 REQ_SEND;
1217 }
1218 }
1219
1220 void
1221 aio_sendfile (out_fh,in_fh,in_offset,length,callback=&PL_sv_undef)
1222 SV * out_fh
1223 SV * in_fh
1224 UV in_offset
1225 UV length
1226 SV * callback
1227 PROTOTYPE: $$$$;$
1228 PPCODE:
1229 {
1230 dREQ;
1231
1232 req->type = REQ_SENDFILE;
1233 req->fh = newSVsv (out_fh);
1234 req->fd = PerlIO_fileno (IoIFP (sv_2io (out_fh)));
1235 req->fh2 = newSVsv (in_fh);
1236 req->fd2 = PerlIO_fileno (IoIFP (sv_2io (in_fh)));
1237 req->offset = in_offset;
1238 req->length = length;
1239
1240 REQ_SEND;
1241 }
1242
1243 void
1244 aio_readahead (fh,offset,length,callback=&PL_sv_undef)
1245 SV * fh
1246 UV offset
1247 IV length
1248 SV * callback
1249 PROTOTYPE: $$$;$
1250 PPCODE:
1251 {
1252 dREQ;
1253
1254 req->type = REQ_READAHEAD;
1255 req->fh = newSVsv (fh);
1256 req->fd = PerlIO_fileno (IoIFP (sv_2io (fh)));
1257 req->offset = offset;
1258 req->length = length;
1259
1260 REQ_SEND;
1261 }
1262
1263 void
1264 aio_stat (fh_or_path,callback=&PL_sv_undef)
1265 SV * fh_or_path
1266 SV * callback
1267 ALIAS:
1268 aio_stat = REQ_STAT
1269 aio_lstat = REQ_LSTAT
1270 PPCODE:
1271 {
1272 dREQ;
1273
1274 New (0, req->statdata, 1, Stat_t);
1275 if (!req->statdata)
1276 {
1277 req_free (req);
1278 croak ("out of memory during aio_req->statdata allocation");
1279 }
1280
1281 if (SvPOK (fh_or_path))
1282 {
1283 req->type = ix;
1284 req->data = newSVsv (fh_or_path);
1285 req->dataptr = SvPVbyte_nolen (req->data);
1286 }
1287 else
1288 {
1289 req->type = REQ_FSTAT;
1290 req->fh = newSVsv (fh_or_path);
1291 req->fd = PerlIO_fileno (IoIFP (sv_2io (fh_or_path)));
1292 }
1293
1294 REQ_SEND;
1295 }
1296
1297 void
1298 aio_unlink (pathname,callback=&PL_sv_undef)
1299 SV * pathname
1300 SV * callback
1301 ALIAS:
1302 aio_unlink = REQ_UNLINK
1303 aio_rmdir = REQ_RMDIR
1304 aio_readdir = REQ_READDIR
1305 PPCODE:
1306 {
1307 dREQ;
1308
1309 req->type = ix;
1310 req->data = newSVsv (pathname);
1311 req->dataptr = SvPVbyte_nolen (req->data);
1312
1313 REQ_SEND;
1314 }
1315
1316 void
1317 aio_link (oldpath,newpath,callback=&PL_sv_undef)
1318 SV * oldpath
1319 SV * newpath
1320 SV * callback
1321 ALIAS:
1322 aio_link = REQ_LINK
1323 aio_symlink = REQ_SYMLINK
1324 aio_rename = REQ_RENAME
1325 PPCODE:
1326 {
1327 dREQ;
1328
1329 req->type = ix;
1330 req->fh = newSVsv (oldpath);
1331 req->data2ptr = SvPVbyte_nolen (req->fh);
1332 req->data = newSVsv (newpath);
1333 req->dataptr = SvPVbyte_nolen (req->data);
1334
1335 REQ_SEND;
1336 }
1337
1338 void
1339 aio_busy (delay,callback=&PL_sv_undef)
1340 double delay
1341 SV * callback
1342 PPCODE:
1343 {
1344 dREQ;
1345
1346 req->type = REQ_BUSY;
1347 req->fd = delay < 0. ? 0 : delay;
1348 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd);
1349
1350 REQ_SEND;
1351 }
1352
1353 void
1354 aio_group (callback=&PL_sv_undef)
1355 SV * callback
1356 PROTOTYPE: ;$
1357 PPCODE:
1358 {
1359 dREQ;
1360
1361 req->type = REQ_GROUP;
1362 req_send (req);
1363
1364 XPUSHs (req_sv (req, AIO_GRP_KLASS));
1365 }
1366
1367 void
1368 aio_nop (callback=&PL_sv_undef)
1369 SV * callback
1370 PPCODE:
1371 {
1372 dREQ;
1373
1374 req->type = REQ_NOP;
1375
1376 REQ_SEND;
1377 }
1378
1379 void
1380 aioreq_pri (int pri = DEFAULT_PRI)
1381 CODE:
1382 if (pri < PRI_MIN) pri = PRI_MIN;
1383 if (pri > PRI_MAX) pri = PRI_MAX;
1384 next_pri = pri + PRI_BIAS;
1385
1386 void
1387 aioreq_nice (int nice = 0)
1388 CODE:
1389 nice = next_pri - nice;
1390 if (nice < PRI_MIN) nice = PRI_MIN;
1391 if (nice > PRI_MAX) nice = PRI_MAX;
1392 next_pri = nice + PRI_BIAS;
1393
1394 void
1395 flush ()
1396 PROTOTYPE:
1397 CODE:
1398 while (nreqs)
1399 {
1400 poll_wait ();
1401 poll_cb ();
1402 }
1403
1404 void
1405 poll()
1406 PROTOTYPE:
1407 CODE:
1408 if (nreqs)
1409 {
1410 poll_wait ();
1411 poll_cb ();
1412 }
1413
1414 int
1415 poll_fileno()
1416 PROTOTYPE:
1417 CODE:
1418 RETVAL = respipe [0];
1419 OUTPUT:
1420 RETVAL
1421
1422 int
1423 poll_cb(...)
1424 PROTOTYPE:
1425 CODE:
1426 RETVAL = poll_cb ();
1427 OUTPUT:
1428 RETVAL
1429
1430 void
1431 poll_wait()
1432 PROTOTYPE:
1433 CODE:
1434 if (nreqs)
1435 poll_wait ();
1436
1437 int
1438 nreqs()
1439 PROTOTYPE:
1440 CODE:
1441 RETVAL = nreqs;
1442 OUTPUT:
1443 RETVAL
1444
1445 PROTOTYPES: DISABLE
1446
1447 MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1448
1449 void
1450 cancel (aio_req_ornot req)
1451 CODE:
1452 req_cancel (req);
1453
1454 void
1455 cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1456 CODE:
1457 SvREFCNT_dec (req->callback);
1458 req->callback = newSVsv (callback);
1459
1460 MODULE = IO::AIO PACKAGE = IO::AIO::GRP
1461
1462 void
1463 add (aio_req grp, ...)
1464 PPCODE:
1465 {
1466 int i;
1467 aio_req req;
1468
1469 if (grp->fd == 2)
1470 croak ("cannot add requests to IO::AIO::GRP after the group finished");
1471
1472 for (i = 1; i < items; ++i )
1473 {
1474 if (GIMME_V != G_VOID)
1475 XPUSHs (sv_2mortal (newSVsv (ST (i))));
1476
1477 req = SvAIO_REQ (ST (i));
1478
1479 if (req)
1480 {
1481 ++grp->length;
1482 req->grp = grp;
1483
1484 req->grp_prev = 0;
1485 req->grp_next = grp->grp_first;
1486
1487 if (grp->grp_first)
1488 grp->grp_first->grp_prev = req;
1489
1490 grp->grp_first = req;
1491 }
1492 }
1493 }
1494
1495 void
1496 cancel_subs (aio_req_ornot req)
1497 CODE:
1498 req_cancel_subs (req);
1499
1500 void
1501 result (aio_req grp, ...)
1502 CODE:
1503 {
1504 int i;
1505 AV *av = newAV ();
1506
1507 for (i = 1; i < items; ++i )
1508 av_push (av, newSVsv (ST (i)));
1509
1510 SvREFCNT_dec (grp->data);
1511 grp->data = (SV *)av;
1512 }
1513
1514 void
1515 limit (aio_req grp, int limit)
1516 CODE:
1517 grp->fd2 = limit;
1518 aio_grp_feed (grp);
1519
1520 void
1521 feed (aio_req grp, SV *callback=&PL_sv_undef)
1522 CODE:
1523 {
1524 SvREFCNT_dec (grp->fh2);
1525 grp->fh2 = newSVsv (callback);
1526
1527 if (grp->fd2 <= 0)
1528 grp->fd2 = 2;
1529
1530 aio_grp_feed (grp);
1531 }
1532