ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
(Generate patch)

Comparing IO-AIO/AIO.xs (file contents):
Revision 1.10 by root, Wed Jul 13 00:13:09 2005 UTC vs.
Revision 1.85 by root, Sat Oct 28 23:32:29 2006 UTC

1/* solaris */
2#define _POSIX_PTHREAD_SEMANTICS 1
3
4#if __linux && !defined(_GNU_SOURCE)
5# define _GNU_SOURCE
6#endif
7
8/* just in case */
9#define _REENTRANT 1
10
11#include <errno.h>
12
1#include "EXTERN.h" 13#include "EXTERN.h"
2#include "perl.h" 14#include "perl.h"
3#include "XSUB.h" 15#include "XSUB.h"
4 16
5#define _XOPEN_SOURCE 500 17#include "autoconf/config.h"
6 18
19#include <pthread.h>
20
21#include <stddef.h>
22#include <errno.h>
23#include <sys/time.h>
24#include <sys/select.h>
7#include <sys/types.h> 25#include <sys/types.h>
8#include <sys/stat.h> 26#include <sys/stat.h>
9 27#include <limits.h>
10#include <unistd.h> 28#include <unistd.h>
11#include <fcntl.h> 29#include <fcntl.h>
12#include <signal.h> 30#include <signal.h>
13#include <sched.h> 31#include <sched.h>
32
33#if HAVE_SENDFILE
14#if __linux 34# if __linux
35# include <sys/sendfile.h>
36# elif __freebsd
15#include <sys/syscall.h> 37# include <sys/socket.h>
38# include <sys/uio.h>
39# elif __hpux
40# include <sys/socket.h>
41# elif __solaris /* not yet */
42# include <sys/sendfile.h>
43# else
44# error sendfile support requested but not available
16#endif 45# endif
46#endif
17 47
18#include <pthread.h> 48/* number of seconds after which idle threads exit */
49#define IDLE_TIMEOUT 10
19 50
20typedef void *InputStream; /* hack, but 5.6.1 is simply toooo old ;) */ 51/* used for struct dirent, AIX doesn't provide it */
21typedef void *OutputStream; /* hack, but 5.6.1 is simply toooo old ;) */ 52#ifndef NAME_MAX
22typedef void *InOutStream; /* hack, but 5.6.1 is simply toooo old ;) */ 53# define NAME_MAX 4096
54#endif
55
56#ifndef PTHREAD_STACK_MIN
57/* care for broken platforms, e.g. windows */
58# define PTHREAD_STACK_MIN 16384
59#endif
23 60
24#if __ia64 61#if __ia64
25# define STACKSIZE 65536 62# define STACKSIZE 65536
63#elif __i386 || __x86_64 /* 16k is unreasonably high :( */
64# define STACKSIZE PTHREAD_STACK_MIN
26#else 65#else
27# define STACKSIZE 4096 66# define STACKSIZE 16384
67#endif
68
69/* wether word reads are potentially non-atomic.
70 * this is conservatice, likely most arches this runs
71 * on have atomic word read/writes.
72 */
73#ifndef WORDACCESS_UNSAFE
74# if __i386 || __x86_64
75# define WORDACCESS_UNSAFE 0
76# else
77# define WORDACCESS_UNSAFE 1
28#endif 78# endif
79#endif
80
81/* buffer size for various temporary buffers */
82#define AIO_BUFSIZE 65536
83
84#define dBUF \
85 char *aio_buf; \
86 LOCK (wrklock); \
87 self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \
88 UNLOCK (wrklock); \
89 if (!aio_buf) \
90 return -1;
29 91
30enum { 92enum {
31 REQ_QUIT, 93 REQ_QUIT,
32 REQ_OPEN, REQ_CLOSE, 94 REQ_OPEN, REQ_CLOSE,
33 REQ_READ, REQ_WRITE, REQ_READAHEAD, 95 REQ_READ, REQ_WRITE, REQ_READAHEAD,
96 REQ_SENDFILE,
34 REQ_STAT, REQ_LSTAT, REQ_FSTAT, REQ_UNLINK, 97 REQ_STAT, REQ_LSTAT, REQ_FSTAT,
35 REQ_FSYNC, REQ_FDATASYNC, 98 REQ_FSYNC, REQ_FDATASYNC,
99 REQ_UNLINK, REQ_RMDIR, REQ_RENAME,
100 REQ_MKNOD, REQ_READDIR,
101 REQ_LINK, REQ_SYMLINK,
102 REQ_GROUP, REQ_NOP,
103 REQ_BUSY,
36}; 104};
37 105
106#define AIO_REQ_KLASS "IO::AIO::REQ"
107#define AIO_GRP_KLASS "IO::AIO::GRP"
108
38typedef struct aio_cb { 109typedef struct aio_cb
110{
39 struct aio_cb *volatile next; 111 struct aio_cb *volatile next;
40 112
41 int type; 113 SV *data, *callback;
42 114 SV *fh, *fh2;
43 int fd; 115 void *dataptr, *data2ptr;
116 Stat_t *statdata;
44 off_t offset; 117 off_t offset;
45 size_t length; 118 size_t length;
46 ssize_t result; 119 ssize_t result;
120
121 STRLEN dataoffset;
122 int type;
123 int fd, fd2;
124 int errorno;
47 mode_t mode; /* open */ 125 mode_t mode; /* open */
48 int errorno;
49 SV *data, *callback;
50 void *dataptr;
51 STRLEN dataoffset;
52 126
53 Stat_t *statdata; 127 unsigned char flags;
128 unsigned char pri;
129
130 SV *self; /* the perl counterpart of this request, if any */
131 struct aio_cb *grp, *grp_prev, *grp_next, *grp_first;
54} aio_cb; 132} aio_cb;
55 133
134enum {
135 FLAG_CANCELLED = 0x01,
136};
137
56typedef aio_cb *aio_req; 138typedef aio_cb *aio_req;
139typedef aio_cb *aio_req_ornot;
57 140
58static int started; 141enum {
59static int nreqs; 142 PRI_MIN = -4,
60static int max_outstanding = 1<<30; 143 PRI_MAX = 4,
144
145 DEFAULT_PRI = 0,
146 PRI_BIAS = -PRI_MIN,
147 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
148};
149
150#define AIO_TICKS ((1000000 + 1023) >> 10)
151
152static unsigned int max_poll_time = 0;
153static unsigned int max_poll_reqs = 0;
154
155/* calculcate time difference in ~1/AIO_TICKS of a second */
156static int tvdiff (struct timeval *tv1, struct timeval *tv2)
157{
158 return (tv2->tv_sec - tv1->tv_sec ) * AIO_TICKS
159 + ((tv2->tv_usec - tv1->tv_usec) >> 10);
160}
161
162static int next_pri = DEFAULT_PRI + PRI_BIAS;
163
164static unsigned int started, idle, wanted;
165
166#if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)
167# define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
168#else
169# define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
170#endif
171
172#define LOCK(mutex) pthread_mutex_lock (&(mutex))
173#define UNLOCK(mutex) pthread_mutex_unlock (&(mutex))
174
175/* worker threads management */
176static pthread_mutex_t wrklock = AIO_MUTEX_INIT;
177
178typedef struct worker {
179 /* locked by wrklock */
180 struct worker *prev, *next;
181
182 pthread_t tid;
183
184 /* locked by reslock, reqlock or wrklock */
185 aio_req req; /* currently processed request */
186 void *dbuf;
187 DIR *dirp;
188} worker;
189
190static worker wrk_first = { &wrk_first, &wrk_first, 0 };
191
192static void worker_clear (worker *wrk)
193{
194 if (wrk->dirp)
195 {
196 closedir (wrk->dirp);
197 wrk->dirp = 0;
198 }
199
200 if (wrk->dbuf)
201 {
202 free (wrk->dbuf);
203 wrk->dbuf = 0;
204 }
205}
206
207static void worker_free (worker *wrk)
208{
209 wrk->next->prev = wrk->prev;
210 wrk->prev->next = wrk->next;
211
212 free (wrk);
213}
214
215static volatile unsigned int nreqs, nready, npending;
216static volatile unsigned int max_idle = 4;
217static volatile unsigned int max_outstanding = 0xffffffff;
61static int respipe [2]; 218static int respipe [2];
62 219
63static pthread_mutex_t reslock = PTHREAD_MUTEX_INITIALIZER; 220static pthread_mutex_t reslock = AIO_MUTEX_INIT;
64static pthread_mutex_t reqlock = PTHREAD_MUTEX_INITIALIZER; 221static pthread_mutex_t reqlock = AIO_MUTEX_INIT;
65static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER; 222static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER;
66 223
67static volatile aio_req reqs, reqe; /* queue start, queue end */ 224#if WORDACCESS_UNSAFE
68static volatile aio_req ress, rese; /* queue start, queue end */
69 225
70static void 226static unsigned int get_nready ()
71poll_wait ()
72{ 227{
73 if (!nreqs) 228 unsigned int retval;
229
230 LOCK (reqlock);
231 retval = nready;
232 UNLOCK (reqlock);
233
234 return retval;
235}
236
237static unsigned int get_npending ()
238{
239 unsigned int retval;
240
241 LOCK (reslock);
242 retval = npending;
243 UNLOCK (reslock);
244
245 return retval;
246}
247
248static unsigned int get_nthreads ()
249{
250 unsigned int retval;
251
252 LOCK (wrklock);
253 retval = started;
254 UNLOCK (wrklock);
255
256 return retval;
257}
258
259#else
260
261# define get_nready() nready
262# define get_npending() npending
263# define get_nthreads() started
264
265#endif
266
267/*
268 * a somewhat faster data structure might be nice, but
269 * with 8 priorities this actually needs <20 insns
270 * per shift, the most expensive operation.
271 */
272typedef struct {
273 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
274 int size;
275} reqq;
276
277static reqq req_queue;
278static reqq res_queue;
279
280int reqq_push (reqq *q, aio_req req)
281{
282 int pri = req->pri;
283 req->next = 0;
284
285 if (q->qe[pri])
286 {
287 q->qe[pri]->next = req;
288 q->qe[pri] = req;
289 }
290 else
291 q->qe[pri] = q->qs[pri] = req;
292
293 return q->size++;
294}
295
296aio_req reqq_shift (reqq *q)
297{
298 int pri;
299
300 if (!q->size)
74 return; 301 return 0;
75 302
76 fd_set rfd; 303 --q->size;
77 FD_ZERO(&rfd);
78 FD_SET(respipe [0], &rfd);
79 304
80 select (respipe [0] + 1, &rfd, 0, 0, 0); 305 for (pri = NUM_PRI; pri--; )
81} 306 {
307 aio_req req = q->qs[pri];
82 308
83static int 309 if (req)
84poll_cb () 310 {
311 if (!(q->qs[pri] = req->next))
312 q->qe[pri] = 0;
313
314 return req;
315 }
316 }
317
318 abort ();
319}
320
321static int poll_cb ();
322static void req_invoke (aio_req req);
323static void req_free (aio_req req);
324static void req_cancel (aio_req req);
325
326/* must be called at most once */
327static SV *req_sv (aio_req req, const char *klass)
328{
329 if (!req->self)
330 {
331 req->self = (SV *)newHV ();
332 sv_magic (req->self, 0, PERL_MAGIC_ext, (char *)req, 0);
333 }
334
335 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1)));
336}
337
338static aio_req SvAIO_REQ (SV *sv)
339{
340 MAGIC *mg;
341
342 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv))
343 croak ("object of class " AIO_REQ_KLASS " expected");
344
345 mg = mg_find (SvRV (sv), PERL_MAGIC_ext);
346
347 return mg ? (aio_req)mg->mg_ptr : 0;
348}
349
350static void aio_grp_feed (aio_req grp)
351{
352 while (grp->length < grp->fd2 && !(grp->flags & FLAG_CANCELLED))
353 {
354 int old_len = grp->length;
355
356 if (grp->fh2 && SvOK (grp->fh2))
357 {
358 dSP;
359
360 ENTER;
361 SAVETMPS;
362 PUSHMARK (SP);
363 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
364 PUTBACK;
365 call_sv (grp->fh2, G_VOID | G_EVAL | G_KEEPERR);
366 SPAGAIN;
367 FREETMPS;
368 LEAVE;
369 }
370
371 /* stop if no progress has been made */
372 if (old_len == grp->length)
373 {
374 SvREFCNT_dec (grp->fh2);
375 grp->fh2 = 0;
376 break;
377 }
378 }
379}
380
381static void aio_grp_dec (aio_req grp)
382{
383 --grp->length;
384
385 /* call feeder, if applicable */
386 aio_grp_feed (grp);
387
388 /* finish, if done */
389 if (!grp->length && grp->fd)
390 {
391 req_invoke (grp);
392 req_free (grp);
393 }
394}
395
396static void req_invoke (aio_req req)
85{ 397{
86 dSP; 398 dSP;
87 int count = 0; 399
88 aio_req req; 400 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
89
90 { 401 {
91 /* read and signals sent by the worker threads */ 402 ENTER;
92 char buf [32]; 403 SAVETMPS;
93 while (read (respipe [0], buf, 32) > 0) 404 PUSHMARK (SP);
94 ; 405 EXTEND (SP, 1);
95 }
96 406
97 for (;;) 407 switch (req->type)
98 {
99 pthread_mutex_lock (&reslock);
100
101 req = ress;
102
103 if (ress)
104 { 408 {
105 ress = ress->next; 409 case REQ_READDIR:
106 if (!ress) rese = 0;
107 }
108
109 pthread_mutex_unlock (&reslock);
110
111 if (!req)
112 break;
113
114 nreqs--;
115
116 if (req->type == REQ_QUIT)
117 started--;
118 else
119 {
120 int errorno = errno;
121 errno = req->errorno;
122
123 if (req->type == REQ_READ)
124 SvCUR_set (req->data, req->dataoffset
125 + req->result > 0 ? req->result : 0);
126
127 if (req->data)
128 SvREFCNT_dec (req->data);
129
130 if (req->type == REQ_STAT || req->type == REQ_LSTAT || req->type == REQ_FSTAT)
131 { 410 {
132 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT; 411 SV *rv = &PL_sv_undef;
133 PL_laststatval = req->result;
134 PL_statcache = *(req->statdata);
135 412
136 Safefree (req->statdata); 413 if (req->result >= 0)
414 {
415 int i;
416 char *buf = req->data2ptr;
417 AV *av = newAV ();
418
419 av_extend (av, req->result - 1);
420
421 for (i = 0; i < req->result; ++i)
422 {
423 SV *sv = newSVpv (buf, 0);
424
425 av_store (av, i, sv);
426 buf += SvCUR (sv) + 1;
427 }
428
429 rv = sv_2mortal (newRV_noinc ((SV *)av));
430 }
431
432 PUSHs (rv);
137 } 433 }
434 break;
138 435
139 PUSHMARK (SP); 436 case REQ_OPEN:
140 XPUSHs (sv_2mortal (newSViv (req->result)));
141
142 if (req->type == REQ_OPEN)
143 { 437 {
144 /* convert fd to fh */ 438 /* convert fd to fh */
145 SV *fh; 439 SV *fh;
146 440
441 PUSHs (sv_2mortal (newSViv (req->result)));
147 PUTBACK; 442 PUTBACK;
148 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL); 443 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
149 SPAGAIN; 444 SPAGAIN;
150 445
151 fh = POPs; 446 fh = SvREFCNT_inc (POPs);
152 447
153 PUSHMARK (SP); 448 PUSHMARK (SP);
154 XPUSHs (fh); 449 XPUSHs (sv_2mortal (fh));
155 } 450 }
451 break;
156 452
157 if (SvOK (req->callback)) 453 case REQ_GROUP:
454 req->fd = 2; /* mark group as finished */
455
456 if (req->data)
158 { 457 {
159 PUTBACK; 458 int i;
160 call_sv (req->callback, G_VOID | G_EVAL); 459 AV *av = (AV *)req->data;
161 SPAGAIN; 460
461 EXTEND (SP, AvFILL (av) + 1);
462 for (i = 0; i <= AvFILL (av); ++i)
463 PUSHs (*av_fetch (av, i, 0));
162 } 464 }
163 465 break;
164 if (req->callback)
165 SvREFCNT_dec (req->callback);
166 466
167 errno = errorno; 467 case REQ_NOP:
168 count++; 468 case REQ_BUSY:
469 break;
470
471 default:
472 PUSHs (sv_2mortal (newSViv (req->result)));
473 break;
169 } 474 }
170 475
476 errno = req->errorno;
477
478 PUTBACK;
479 call_sv (req->callback, G_VOID | G_EVAL);
480 SPAGAIN;
481
482 FREETMPS;
483 LEAVE;
484 }
485
486 if (req->grp)
487 {
488 aio_req grp = req->grp;
489
490 /* unlink request */
491 if (req->grp_next) req->grp_next->grp_prev = req->grp_prev;
492 if (req->grp_prev) req->grp_prev->grp_next = req->grp_next;
493
494 if (grp->grp_first == req)
495 grp->grp_first = req->grp_next;
496
497 aio_grp_dec (grp);
498 }
499
500 if (SvTRUE (ERRSV))
501 {
502 req_free (req);
503 croak (0);
504 }
505}
506
507static void req_free (aio_req req)
508{
509 if (req->self)
510 {
511 sv_unmagic (req->self, PERL_MAGIC_ext);
512 SvREFCNT_dec (req->self);
513 }
514
515 SvREFCNT_dec (req->data);
516 SvREFCNT_dec (req->fh);
517 SvREFCNT_dec (req->fh2);
518 SvREFCNT_dec (req->callback);
519 Safefree (req->statdata);
520
521 if (req->type == REQ_READDIR)
522 free (req->data2ptr);
523
171 Safefree (req); 524 Safefree (req);
172 } 525}
173 526
174 return count; 527static void req_cancel_subs (aio_req grp)
528{
529 aio_req sub;
530
531 if (grp->type != REQ_GROUP)
532 return;
533
534 SvREFCNT_dec (grp->fh2);
535 grp->fh2 = 0;
536
537 for (sub = grp->grp_first; sub; sub = sub->grp_next)
538 req_cancel (sub);
539}
540
541static void req_cancel (aio_req req)
542{
543 req->flags |= FLAG_CANCELLED;
544
545 req_cancel_subs (req);
175} 546}
176 547
177static void *aio_proc(void *arg); 548static void *aio_proc(void *arg);
178 549
179static void
180start_thread (void) 550static void start_thread (void)
181{ 551{
182 sigset_t fullsigset, oldsigset; 552 sigset_t fullsigset, oldsigset;
183 pthread_t tid;
184 pthread_attr_t attr; 553 pthread_attr_t attr;
554
555 worker *wrk = calloc (1, sizeof (worker));
556
557 if (!wrk)
558 croak ("unable to allocate worker thread data");
185 559
186 pthread_attr_init (&attr); 560 pthread_attr_init (&attr);
187 pthread_attr_setstacksize (&attr, STACKSIZE); 561 pthread_attr_setstacksize (&attr, STACKSIZE);
188 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED); 562 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
563#ifdef PTHREAD_SCOPE_PROCESS
564 pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS);
565#endif
189 566
190 sigfillset (&fullsigset); 567 sigfillset (&fullsigset);
568
569 LOCK (wrklock);
191 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset); 570 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
192 571
193 if (pthread_create (&tid, &attr, aio_proc, 0) == 0) 572 if (pthread_create (&wrk->tid, &attr, aio_proc, (void *)wrk) == 0)
573 {
574 wrk->prev = &wrk_first;
575 wrk->next = wrk_first.next;
576 wrk_first.next->prev = wrk;
577 wrk_first.next = wrk;
194 started++; 578 ++started;
579 }
580 else
581 free (wrk);
195 582
196 sigprocmask (SIG_SETMASK, &oldsigset, 0); 583 sigprocmask (SIG_SETMASK, &oldsigset, 0);
584 UNLOCK (wrklock);
197} 585}
198 586
199static void 587static void maybe_start_thread ()
200send_req (aio_req req)
201{ 588{
589 if (get_nthreads () >= wanted)
590 return;
591
592 /* todo: maybe use idle here, but might be less exact */
593 if (0 <= (int)get_nthreads () + (int)get_npending () - (int)nreqs)
594 return;
595
596 start_thread ();
597}
598
599static void req_send (aio_req req)
600{
202 nreqs++; 601 ++nreqs;
203 602
204 pthread_mutex_lock (&reqlock); 603 LOCK (reqlock);
205 604 ++nready;
206 req->next = 0; 605 reqq_push (&req_queue, req);
207
208 if (reqe)
209 {
210 reqe->next = req;
211 reqe = req;
212 }
213 else
214 reqe = reqs = req;
215
216 pthread_cond_signal (&reqwait); 606 pthread_cond_signal (&reqwait);
217 pthread_mutex_unlock (&reqlock); 607 UNLOCK (reqlock);
218 608
219 while (nreqs > max_outstanding) 609 maybe_start_thread ();
220 {
221 poll_wait ();
222 poll_cb ();
223 }
224} 610}
225 611
226static void 612static void end_thread (void)
227end_thread (void)
228{ 613{
229 aio_req req; 614 aio_req req;
615
230 New (0, req, 1, aio_cb); 616 Newz (0, req, 1, aio_cb);
617
231 req->type = REQ_QUIT; 618 req->type = REQ_QUIT;
619 req->pri = PRI_MAX + PRI_BIAS;
232 620
233 send_req (req); 621 LOCK (reqlock);
234} 622 reqq_push (&req_queue, req);
623 pthread_cond_signal (&reqwait);
624 UNLOCK (reqlock);
235 625
236static void 626 LOCK (wrklock);
237read_write (int dowrite, int fd, off_t offset, size_t length, 627 --started;
238 SV *data, STRLEN dataoffset, SV *callback) 628 UNLOCK (wrklock);
629}
630
631static void set_max_idle (int nthreads)
239{ 632{
633 if (WORDACCESS_UNSAFE) LOCK (reqlock);
634 max_idle = nthreads <= 0 ? 1 : nthreads;
635 if (WORDACCESS_UNSAFE) UNLOCK (reqlock);
636}
637
638static void min_parallel (int nthreads)
639{
640 if (wanted < nthreads)
641 wanted = nthreads;
642}
643
644static void max_parallel (int nthreads)
645{
646 if (wanted > nthreads)
647 wanted = nthreads;
648
649 while (started > wanted)
650 end_thread ();
651}
652
653static void poll_wait ()
654{
655 fd_set rfd;
656
657 while (nreqs)
658 {
659 int size;
660 if (WORDACCESS_UNSAFE) LOCK (reslock);
661 size = res_queue.size;
662 if (WORDACCESS_UNSAFE) UNLOCK (reslock);
663
664 if (size)
665 return;
666
667 maybe_start_thread ();
668
669 FD_ZERO(&rfd);
670 FD_SET(respipe [0], &rfd);
671
672 select (respipe [0] + 1, &rfd, 0, 0, 0);
673 }
674}
675
676static int poll_cb ()
677{
678 dSP;
679 int count = 0;
680 int maxreqs = max_poll_reqs;
681 int do_croak = 0;
682 struct timeval tv_start, tv_now;
240 aio_req req; 683 aio_req req;
241 STRLEN svlen;
242 char *svptr = SvPV (data, svlen);
243 684
244 SvUPGRADE (data, SVt_PV); 685 if (max_poll_time)
245 SvPOK_on (data); 686 gettimeofday (&tv_start, 0);
246 687
247 if (dataoffset < 0) 688 for (;;)
248 dataoffset += svlen;
249
250 if (dataoffset < 0 || dataoffset > svlen)
251 croak ("data offset outside of string");
252
253 if (dowrite)
254 {
255 /* write: check length and adjust. */
256 if (length < 0 || length + dataoffset > svlen)
257 length = svlen - dataoffset;
258 } 689 {
259 else
260 {
261 /* read: grow scalar as necessary */
262 svptr = SvGROW (data, length + dataoffset);
263 }
264
265 if (length < 0)
266 croak ("length must not be negative");
267
268 Newz (0, req, 1, aio_cb);
269
270 if (!req)
271 croak ("out of memory during aio_req allocation");
272
273 req->type = dowrite ? REQ_WRITE : REQ_READ;
274 req->fd = fd;
275 req->offset = offset;
276 req->length = length;
277 req->data = SvREFCNT_inc (data);
278 req->dataptr = (char *)svptr + dataoffset;
279 req->callback = SvREFCNT_inc (callback);
280
281 send_req (req);
282}
283
284static void *
285aio_proc (void *thr_arg)
286{
287 aio_req req;
288 int type;
289
290 do
291 {
292 pthread_mutex_lock (&reqlock);
293
294 for (;;) 690 for (;;)
295 { 691 {
296 req = reqs; 692 maybe_start_thread ();
297 693
694 LOCK (reslock);
695 req = reqq_shift (&res_queue);
696
298 if (reqs) 697 if (req)
299 { 698 {
300 reqs = reqs->next; 699 --npending;
301 if (!reqs) reqe = 0; 700
701 if (!res_queue.size)
702 {
703 /* read any signals sent by the worker threads */
704 char buf [32];
705 while (read (respipe [0], buf, 32) == 32)
706 ;
707 }
302 } 708 }
709
710 UNLOCK (reslock);
711
712 if (!req)
713 break;
714
715 --nreqs;
716
717 if (req->type == REQ_GROUP && req->length)
718 {
719 req->fd = 1; /* mark request as delayed */
720 continue;
721 }
722 else
723 {
724 if (req->type == REQ_READ)
725 SvCUR_set (req->data, req->dataoffset + (req->result > 0 ? req->result : 0));
726
727 if (req->data2ptr && (req->type == REQ_READ || req->type == REQ_WRITE))
728 SvREADONLY_off (req->data);
729
730 if (req->statdata)
731 {
732 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
733 PL_laststatval = req->result;
734 PL_statcache = *(req->statdata);
735 }
736
737 req_invoke (req);
738
739 count++;
740 }
741
742 req_free (req);
743
744 if (maxreqs && !--maxreqs)
745 break;
746
747 if (max_poll_time)
748 {
749 gettimeofday (&tv_now, 0);
750
751 if (tvdiff (&tv_start, &tv_now) >= max_poll_time)
752 break;
753 }
754 }
755
756 if (nreqs <= max_outstanding)
757 break;
758
759 poll_wait ();
760
761 ++maxreqs;
762 }
763
764 return count;
765}
766
767static void create_pipe ()
768{
769 if (pipe (respipe))
770 croak ("unable to initialize result pipe");
771
772 if (fcntl (respipe [0], F_SETFL, O_NONBLOCK))
773 croak ("cannot set result pipe to nonblocking mode");
774
775 if (fcntl (respipe [1], F_SETFL, O_NONBLOCK))
776 croak ("cannot set result pipe to nonblocking mode");
777}
778
779/*****************************************************************************/
780/* work around various missing functions */
781
782#if !HAVE_PREADWRITE
783# define pread aio_pread
784# define pwrite aio_pwrite
785
786/*
787 * make our pread/pwrite safe against themselves, but not against
788 * normal read/write by using a mutex. slows down execution a lot,
789 * but that's your problem, not mine.
790 */
791static pthread_mutex_t preadwritelock = PTHREAD_MUTEX_INITIALIZER;
792
793static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
794{
795 ssize_t res;
796 off_t ooffset;
797
798 LOCK (preadwritelock);
799 ooffset = lseek (fd, 0, SEEK_CUR);
800 lseek (fd, offset, SEEK_SET);
801 res = read (fd, buf, count);
802 lseek (fd, ooffset, SEEK_SET);
803 UNLOCK (preadwritelock);
804
805 return res;
806}
807
808static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
809{
810 ssize_t res;
811 off_t ooffset;
812
813 LOCK (preadwritelock);
814 ooffset = lseek (fd, 0, SEEK_CUR);
815 lseek (fd, offset, SEEK_SET);
816 res = write (fd, buf, count);
817 lseek (fd, offset, SEEK_SET);
818 UNLOCK (preadwritelock);
819
820 return res;
821}
822#endif
823
824#if !HAVE_FDATASYNC
825# define fdatasync fsync
826#endif
827
828#if !HAVE_READAHEAD
829# define readahead(fd,offset,count) aio_readahead (fd, offset, count, self)
830
831static ssize_t aio_readahead (int fd, off_t offset, size_t count, worker *self)
832{
833 dBUF;
834
835 while (count > 0)
836 {
837 size_t len = count < AIO_BUFSIZE ? count : AIO_BUFSIZE;
838
839 pread (fd, aio_buf, len, offset);
840 offset += len;
841 count -= len;
842 }
843
844 errno = 0;
845}
846
847#endif
848
849#if !HAVE_READDIR_R
850# define readdir_r aio_readdir_r
851
852static pthread_mutex_t readdirlock = PTHREAD_MUTEX_INITIALIZER;
853
854static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res)
855{
856 struct dirent *e;
857 int errorno;
858
859 LOCK (readdirlock);
860
861 e = readdir (dirp);
862 errorno = errno;
863
864 if (e)
865 {
866 *res = ent;
867 strcpy (ent->d_name, e->d_name);
868 }
869 else
870 *res = 0;
871
872 UNLOCK (readdirlock);
873
874 errno = errorno;
875 return e ? 0 : -1;
876}
877#endif
878
879/* sendfile always needs emulation */
880static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self)
881{
882 ssize_t res;
883
884 if (!count)
885 return 0;
886
887#if HAVE_SENDFILE
888# if __linux
889 res = sendfile (ofd, ifd, &offset, count);
890
891# elif __freebsd
892 /*
893 * Of course, the freebsd sendfile is a dire hack with no thoughts
894 * wasted on making it similar to other I/O functions.
895 */
896 {
897 off_t sbytes;
898 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0);
899
900 if (res < 0 && sbytes)
901 /* maybe only on EAGAIN: as usual, the manpage leaves you guessing */
902 res = sbytes;
903 }
904
905# elif __hpux
906 res = sendfile (ofd, ifd, offset, count, 0, 0);
907
908# elif __solaris
909 {
910 struct sendfilevec vec;
911 size_t sbytes;
912
913 vec.sfv_fd = ifd;
914 vec.sfv_flag = 0;
915 vec.sfv_off = offset;
916 vec.sfv_len = count;
917
918 res = sendfilev (ofd, &vec, 1, &sbytes);
919
920 if (res < 0 && sbytes)
921 res = sbytes;
922 }
923
924# endif
925#else
926 res = -1;
927 errno = ENOSYS;
928#endif
929
930 if (res < 0
931 && (errno == ENOSYS || errno == EINVAL || errno == ENOTSOCK
932#if __solaris
933 || errno == EAFNOSUPPORT || errno == EPROTOTYPE
934#endif
935 )
936 )
937 {
938 /* emulate sendfile. this is a major pain in the ass */
939 dBUF;
940
941 res = 0;
942
943 while (count)
944 {
945 ssize_t cnt;
946
947 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
948
949 if (cnt <= 0)
950 {
951 if (cnt && !res) res = -1;
952 break;
953 }
954
955 cnt = write (ofd, aio_buf, cnt);
956
957 if (cnt <= 0)
958 {
959 if (cnt && !res) res = -1;
960 break;
961 }
962
963 offset += cnt;
964 res += cnt;
965 count -= cnt;
966 }
967 }
968
969 return res;
970}
971
972/* read a full directory */
973static void scandir_ (aio_req req, worker *self)
974{
975 DIR *dirp;
976 union
977 {
978 struct dirent d;
979 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1];
980 } *u;
981 struct dirent *entp;
982 char *name, *names;
983 int memlen = 4096;
984 int memofs = 0;
985 int res = 0;
986 int errorno;
987
988 LOCK (wrklock);
989 self->dirp = dirp = opendir (req->dataptr);
990 self->dbuf = u = malloc (sizeof (*u));
991 req->data2ptr = names = malloc (memlen);
992 UNLOCK (wrklock);
993
994 if (dirp && u && names)
995 for (;;)
996 {
997 errno = 0;
998 readdir_r (dirp, &u->d, &entp);
999
1000 if (!entp)
1001 break;
1002
1003 name = entp->d_name;
1004
1005 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
1006 {
1007 int len = strlen (name) + 1;
1008
1009 res++;
1010
1011 while (memofs + len > memlen)
1012 {
1013 memlen *= 2;
1014 LOCK (wrklock);
1015 req->data2ptr = names = realloc (names, memlen);
1016 UNLOCK (wrklock);
1017
1018 if (!names)
1019 break;
1020 }
1021
1022 memcpy (names + memofs, name, len);
1023 memofs += len;
1024 }
1025 }
1026
1027 if (errno)
1028 res = -1;
1029
1030 req->result = res;
1031}
1032
1033/*****************************************************************************/
1034
1035static void *aio_proc (void *thr_arg)
1036{
1037 aio_req req;
1038 struct timespec ts;
1039 worker *self = (worker *)thr_arg;
1040
1041 /* try to distribute timeouts somewhat evenly */
1042 ts.tv_nsec = (((unsigned long)self + (unsigned long)ts.tv_sec) & 1023UL)
1043 * (1000000000UL / 1024UL);
1044
1045 for (;;)
1046 {
1047 ts.tv_sec = time (0) + IDLE_TIMEOUT;
1048
1049 LOCK (reqlock);
1050
1051 for (;;)
1052 {
1053 self->req = req = reqq_shift (&req_queue);
303 1054
304 if (req) 1055 if (req)
305 break; 1056 break;
306 1057
1058 ++idle;
1059
1060 if (pthread_cond_timedwait (&reqwait, &reqlock, &ts)
1061 == ETIMEDOUT)
1062 {
1063 if (idle > max_idle)
1064 {
1065 --idle;
1066 UNLOCK (reqlock);
1067 LOCK (wrklock);
1068 --started;
1069 UNLOCK (wrklock);
1070 goto quit;
1071 }
1072
1073 /* we are allowed to idle, so do so without any timeout */
307 pthread_cond_wait (&reqwait, &reqlock); 1074 pthread_cond_wait (&reqwait, &reqlock);
1075 ts.tv_sec = time (0) + IDLE_TIMEOUT;
1076 }
1077
1078 --idle;
308 } 1079 }
309 1080
310 pthread_mutex_unlock (&reqlock); 1081 --nready;
1082
1083 UNLOCK (reqlock);
311 1084
312 errno = 0; /* strictly unnecessary */ 1085 errno = 0; /* strictly unnecessary */
313 1086
314 type = req->type; 1087 if (!(req->flags & FLAG_CANCELLED))
315
316 switch (type) 1088 switch (req->type)
317 { 1089 {
318 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break; 1090 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break;
319 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break; 1091 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break;
320#if SYS_readahead 1092
321 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break; 1093 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break;
322#else 1094 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length, self); break;
323 case REQ_READAHEAD: req->result = -1; errno = ENOSYS; break;
324#endif
325 1095
326 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break; 1096 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break;
327 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break; 1097 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break;
328 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break; 1098 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break;
329 1099
330 case REQ_OPEN: req->result = open (req->dataptr, req->fd, req->mode); break; 1100 case REQ_OPEN: req->result = open (req->dataptr, req->fd, req->mode); break;
331 case REQ_CLOSE: req->result = close (req->fd); break; 1101 case REQ_CLOSE: req->result = close (req->fd); break;
332 case REQ_UNLINK: req->result = unlink (req->dataptr); break; 1102 case REQ_UNLINK: req->result = unlink (req->dataptr); break;
333
334 case REQ_FSYNC: req->result = fsync (req->fd); break; 1103 case REQ_RMDIR: req->result = rmdir (req->dataptr); break;
1104 case REQ_RENAME: req->result = rename (req->data2ptr, req->dataptr); break;
1105 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break;
1106 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break;
1107 case REQ_MKNOD: req->result = mknod (req->data2ptr, req->mode, (dev_t)req->offset); break;
1108
335 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break; 1109 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break;
1110 case REQ_FSYNC: req->result = fsync (req->fd); break;
1111 case REQ_READDIR: scandir_ (req, self); break;
336 1112
1113 case REQ_BUSY:
1114 {
1115 struct timeval tv;
1116
1117 tv.tv_sec = req->fd;
1118 tv.tv_usec = req->fd2;
1119
1120 req->result = select (0, 0, 0, 0, &tv);
1121 }
1122
1123 case REQ_GROUP:
1124 case REQ_NOP:
1125 break;
1126
337 case REQ_QUIT: 1127 case REQ_QUIT:
338 break; 1128 goto quit;
339 1129
340 default: 1130 default:
341 req->result = ENOSYS; 1131 req->result = ENOSYS;
342 break; 1132 break;
343 } 1133 }
344 1134
345 req->errorno = errno; 1135 req->errorno = errno;
346 1136
347 pthread_mutex_lock (&reslock); 1137 LOCK (reslock);
348 1138
349 req->next = 0; 1139 ++npending;
350 1140
351 if (rese) 1141 if (!reqq_push (&res_queue, req))
352 {
353 rese->next = req;
354 rese = req;
355 }
356 else
357 {
358 rese = ress = req;
359
360 /* write a dummy byte to the pipe so fh becomes ready */ 1142 /* write a dummy byte to the pipe so fh becomes ready */
361 write (respipe [1], &respipe, 1); 1143 write (respipe [1], &respipe, 1);
362 }
363 1144
364 pthread_mutex_unlock (&reslock); 1145 self->req = 0;
1146 worker_clear (self);
1147
1148 UNLOCK (reslock);
365 } 1149 }
366 while (type != REQ_QUIT); 1150
1151quit:
1152 LOCK (wrklock);
1153 worker_free (self);
1154 UNLOCK (wrklock);
367 1155
368 return 0; 1156 return 0;
369} 1157}
370 1158
1159/*****************************************************************************/
1160
1161static void atfork_prepare (void)
1162{
1163 LOCK (wrklock);
1164 LOCK (reqlock);
1165 LOCK (reslock);
1166#if !HAVE_PREADWRITE
1167 LOCK (preadwritelock);
1168#endif
1169#if !HAVE_READDIR_R
1170 LOCK (readdirlock);
1171#endif
1172}
1173
1174static void atfork_parent (void)
1175{
1176#if !HAVE_READDIR_R
1177 UNLOCK (readdirlock);
1178#endif
1179#if !HAVE_PREADWRITE
1180 UNLOCK (preadwritelock);
1181#endif
1182 UNLOCK (reslock);
1183 UNLOCK (reqlock);
1184 UNLOCK (wrklock);
1185}
1186
1187static void atfork_child (void)
1188{
1189 aio_req prv;
1190
1191 while (prv = reqq_shift (&req_queue))
1192 req_free (prv);
1193
1194 while (prv = reqq_shift (&res_queue))
1195 req_free (prv);
1196
1197 while (wrk_first.next != &wrk_first)
1198 {
1199 worker *wrk = wrk_first.next;
1200
1201 if (wrk->req)
1202 req_free (wrk->req);
1203
1204 worker_clear (wrk);
1205 worker_free (wrk);
1206 }
1207
1208 started = 0;
1209 idle = 0;
1210 nreqs = 0;
1211 nready = 0;
1212 npending = 0;
1213
1214 close (respipe [0]);
1215 close (respipe [1]);
1216 create_pipe ();
1217
1218 atfork_parent ();
1219}
1220
1221#define dREQ \
1222 aio_req req; \
1223 int req_pri = next_pri; \
1224 next_pri = DEFAULT_PRI + PRI_BIAS; \
1225 \
1226 if (SvOK (callback) && !SvROK (callback)) \
1227 croak ("callback must be undef or of reference type"); \
1228 \
1229 Newz (0, req, 1, aio_cb); \
1230 if (!req) \
1231 croak ("out of memory during aio_req allocation"); \
1232 \
1233 req->callback = newSVsv (callback); \
1234 req->pri = req_pri
1235
1236#define REQ_SEND \
1237 req_send (req); \
1238 \
1239 if (GIMME_V != G_VOID) \
1240 XPUSHs (req_sv (req, AIO_REQ_KLASS));
1241
371MODULE = IO::AIO PACKAGE = IO::AIO 1242MODULE = IO::AIO PACKAGE = IO::AIO
372 1243
373PROTOTYPES: ENABLE 1244PROTOTYPES: ENABLE
374 1245
375BOOT: 1246BOOT:
376{ 1247{
377 if (pipe (respipe)) 1248 HV *stash = gv_stashpv ("IO::AIO", 1);
378 croak ("unable to initialize result pipe");
379 1249
380 if (fcntl (respipe [0], F_SETFL, O_NONBLOCK)) 1250 newCONSTSUB (stash, "EXDEV", newSViv (EXDEV));
381 croak ("cannot set result pipe to nonblocking mode"); 1251 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY));
1252 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY));
1253 newCONSTSUB (stash, "O_CREAT", newSViv (O_CREAT));
1254 newCONSTSUB (stash, "O_TRUNC", newSViv (O_TRUNC));
1255 newCONSTSUB (stash, "S_IFIFO", newSViv (S_IFIFO));
382 1256
383 if (fcntl (respipe [1], F_SETFL, O_NONBLOCK)) 1257 create_pipe ();
384 croak ("cannot set result pipe to nonblocking mode"); 1258 pthread_atfork (atfork_prepare, atfork_parent, atfork_child);
385} 1259}
386 1260
387void 1261void
388min_parallel(nthreads) 1262max_poll_reqs (int nreqs)
389 int nthreads
390 PROTOTYPE: $ 1263 PROTOTYPE: $
391 CODE: 1264 CODE:
392 while (nthreads > started) 1265 max_poll_reqs = nreqs;
393 start_thread ();
394 1266
395void 1267void
396max_parallel(nthreads) 1268max_poll_time (double nseconds)
397 int nthreads
398 PROTOTYPE: $ 1269 PROTOTYPE: $
399 CODE: 1270 CODE:
400{ 1271 max_poll_time = nseconds * AIO_TICKS;
401 int cur = started;
402 while (cur > nthreads)
403 {
404 end_thread ();
405 cur--;
406 }
407 1272
408 while (started > nthreads) 1273void
409 { 1274min_parallel (int nthreads)
410 poll_wait (); 1275 PROTOTYPE: $
411 poll_cb (); 1276
412 } 1277void
413} 1278max_parallel (int nthreads)
1279 PROTOTYPE: $
1280
1281void
1282max_idle (int nthreads)
1283 PROTOTYPE: $
1284 CODE:
1285 set_max_idle (nthreads);
414 1286
415int 1287int
416max_outstanding(nreqs) 1288max_outstanding (int maxreqs)
417 int nreqs 1289 PROTOTYPE: $
418 PROTOTYPE: $
419 CODE: 1290 CODE:
420 RETVAL = max_outstanding; 1291 RETVAL = max_outstanding;
421 max_outstanding = nreqs; 1292 max_outstanding = maxreqs;
1293 OUTPUT:
1294 RETVAL
422 1295
423void 1296void
424aio_open(pathname,flags,mode,callback=&PL_sv_undef) 1297aio_open (pathname,flags,mode,callback=&PL_sv_undef)
425 SV * pathname 1298 SV * pathname
426 int flags 1299 int flags
427 int mode 1300 int mode
428 SV * callback 1301 SV * callback
429 PROTOTYPE: $$$;$ 1302 PROTOTYPE: $$$;$
430 CODE: 1303 PPCODE:
431{ 1304{
432 aio_req req; 1305 dREQ;
433
434 Newz (0, req, 1, aio_cb);
435
436 if (!req)
437 croak ("out of memory during aio_req allocation");
438 1306
439 req->type = REQ_OPEN; 1307 req->type = REQ_OPEN;
440 req->data = newSVsv (pathname); 1308 req->data = newSVsv (pathname);
441 req->dataptr = SvPV_nolen (req->data); 1309 req->dataptr = SvPVbyte_nolen (req->data);
442 req->fd = flags; 1310 req->fd = flags;
443 req->mode = mode; 1311 req->mode = mode;
444 req->callback = SvREFCNT_inc (callback);
445 1312
446 send_req (req); 1313 REQ_SEND;
447} 1314}
448 1315
449void 1316void
450aio_close(fh,callback=&PL_sv_undef) 1317aio_close (fh,callback=&PL_sv_undef)
451 InputStream fh 1318 SV * fh
452 SV * callback 1319 SV * callback
453 PROTOTYPE: $;$ 1320 PROTOTYPE: $;$
454 ALIAS: 1321 ALIAS:
455 aio_close = REQ_CLOSE 1322 aio_close = REQ_CLOSE
456 aio_fsync = REQ_FSYNC 1323 aio_fsync = REQ_FSYNC
457 aio_fdatasync = REQ_FDATASYNC 1324 aio_fdatasync = REQ_FDATASYNC
458 CODE: 1325 PPCODE:
1326{
1327 dREQ;
1328
1329 req->type = ix;
1330 req->fh = newSVsv (fh);
1331 req->fd = PerlIO_fileno (IoIFP (sv_2io (fh)));
1332
1333 REQ_SEND (req);
1334}
1335
1336void
1337aio_read (fh,offset,length,data,dataoffset,callback=&PL_sv_undef)
1338 SV * fh
1339 UV offset
1340 UV length
1341 SV * data
1342 UV dataoffset
1343 SV * callback
1344 ALIAS:
1345 aio_read = REQ_READ
1346 aio_write = REQ_WRITE
1347 PROTOTYPE: $$$$$;$
1348 PPCODE:
459{ 1349{
460 aio_req req; 1350 aio_req req;
1351 STRLEN svlen;
1352 char *svptr = SvPVbyte (data, svlen);
461 1353
462 Newz (0, req, 1, aio_cb); 1354 SvUPGRADE (data, SVt_PV);
1355 SvPOK_on (data);
463 1356
464 if (!req) 1357 if (dataoffset < 0)
465 croak ("out of memory during aio_req allocation"); 1358 dataoffset += svlen;
466 1359
467 req->type = ix; 1360 if (dataoffset < 0 || dataoffset > svlen)
468 req->fd = PerlIO_fileno (fh); 1361 croak ("data offset outside of string");
469 req->callback = SvREFCNT_inc (callback);
470 1362
471 send_req (req); 1363 if (ix == REQ_WRITE)
472} 1364 {
473 1365 /* write: check length and adjust. */
474void 1366 if (length < 0 || length + dataoffset > svlen)
475aio_read(fh,offset,length,data,dataoffset,callback=&PL_sv_undef) 1367 length = svlen - dataoffset;
476 InputStream fh 1368 }
477 UV offset 1369 else
478 IV length 1370 {
479 SV * data 1371 /* read: grow scalar as necessary */
480 IV dataoffset 1372 svptr = SvGROW (data, length + dataoffset);
481 SV * callback 1373 }
482 PROTOTYPE: $$$$$;$
483 CODE:
484 read_write (0, PerlIO_fileno (fh), offset, length, data, dataoffset, callback);
485
486void
487aio_write(fh,offset,length,data,dataoffset,callback=&PL_sv_undef)
488 OutputStream fh
489 UV offset
490 IV length
491 SV * data
492 IV dataoffset
493 SV * callback
494 PROTOTYPE: $$$$$;$
495 CODE:
496 read_write (1, PerlIO_fileno (fh), offset, length, data, dataoffset, callback);
497
498void
499aio_readahead(fh,offset,length,callback=&PL_sv_undef)
500 InputStream fh
501 UV offset
502 IV length
503 SV * callback
504 PROTOTYPE: $$$;$
505 CODE:
506{
507 aio_req req;
508 1374
509 if (length < 0) 1375 if (length < 0)
510 croak ("length must not be negative"); 1376 croak ("length must not be negative");
511 1377
512 Newz (0, req, 1, aio_cb); 1378 {
1379 dREQ;
513 1380
514 if (!req) 1381 req->type = ix;
515 croak ("out of memory during aio_req allocation"); 1382 req->fh = newSVsv (fh);
1383 req->fd = PerlIO_fileno (ix == REQ_READ ? IoIFP (sv_2io (fh))
1384 : IoOFP (sv_2io (fh)));
1385 req->offset = offset;
1386 req->length = length;
1387 req->data = SvREFCNT_inc (data);
1388 req->dataptr = (char *)svptr + dataoffset;
1389
1390 if (!SvREADONLY (data))
1391 {
1392 SvREADONLY_on (data);
1393 req->data2ptr = (void *)data;
1394 }
1395
1396 REQ_SEND;
1397 }
1398}
1399
1400void
1401aio_sendfile (out_fh,in_fh,in_offset,length,callback=&PL_sv_undef)
1402 SV * out_fh
1403 SV * in_fh
1404 UV in_offset
1405 UV length
1406 SV * callback
1407 PROTOTYPE: $$$$;$
1408 PPCODE:
1409{
1410 dREQ;
1411
1412 req->type = REQ_SENDFILE;
1413 req->fh = newSVsv (out_fh);
1414 req->fd = PerlIO_fileno (IoIFP (sv_2io (out_fh)));
1415 req->fh2 = newSVsv (in_fh);
1416 req->fd2 = PerlIO_fileno (IoIFP (sv_2io (in_fh)));
1417 req->offset = in_offset;
1418 req->length = length;
1419
1420 REQ_SEND;
1421}
1422
1423void
1424aio_readahead (fh,offset,length,callback=&PL_sv_undef)
1425 SV * fh
1426 UV offset
1427 IV length
1428 SV * callback
1429 PROTOTYPE: $$$;$
1430 PPCODE:
1431{
1432 dREQ;
516 1433
517 req->type = REQ_READAHEAD; 1434 req->type = REQ_READAHEAD;
1435 req->fh = newSVsv (fh);
518 req->fd = PerlIO_fileno (fh); 1436 req->fd = PerlIO_fileno (IoIFP (sv_2io (fh)));
519 req->offset = offset; 1437 req->offset = offset;
520 req->length = length; 1438 req->length = length;
521 req->callback = SvREFCNT_inc (callback);
522 1439
523 send_req (req); 1440 REQ_SEND;
524} 1441}
525 1442
526void 1443void
527aio_stat(fh_or_path,callback=&PL_sv_undef) 1444aio_stat (fh_or_path,callback=&PL_sv_undef)
528 SV * fh_or_path 1445 SV * fh_or_path
529 SV * callback 1446 SV * callback
530 ALIAS: 1447 ALIAS:
531 aio_stat = REQ_STAT 1448 aio_stat = REQ_STAT
532 aio_lstat = REQ_LSTAT 1449 aio_lstat = REQ_LSTAT
533 CODE: 1450 PPCODE:
534{ 1451{
535 aio_req req; 1452 dREQ;
536
537 Newz (0, req, 1, aio_cb);
538
539 if (!req)
540 croak ("out of memory during aio_req allocation");
541 1453
542 New (0, req->statdata, 1, Stat_t); 1454 New (0, req->statdata, 1, Stat_t);
543
544 if (!req->statdata) 1455 if (!req->statdata)
1456 {
1457 req_free (req);
545 croak ("out of memory during aio_req->statdata allocation"); 1458 croak ("out of memory during aio_req->statdata allocation");
1459 }
546 1460
547 if (SvPOK (fh_or_path)) 1461 if (SvPOK (fh_or_path))
548 { 1462 {
549 req->type = ix; 1463 req->type = ix;
550 req->data = newSVsv (fh_or_path); 1464 req->data = newSVsv (fh_or_path);
551 req->dataptr = SvPV_nolen (req->data); 1465 req->dataptr = SvPVbyte_nolen (req->data);
552 } 1466 }
553 else 1467 else
554 { 1468 {
555 req->type = REQ_FSTAT; 1469 req->type = REQ_FSTAT;
1470 req->fh = newSVsv (fh_or_path);
556 req->fd = PerlIO_fileno (IoIFP (sv_2io (fh_or_path))); 1471 req->fd = PerlIO_fileno (IoIFP (sv_2io (fh_or_path)));
557 } 1472 }
558 1473
559 req->callback = SvREFCNT_inc (callback); 1474 REQ_SEND;
560
561 send_req (req);
562} 1475}
563 1476
564void 1477void
565aio_unlink(pathname,callback=&PL_sv_undef) 1478aio_unlink (pathname,callback=&PL_sv_undef)
566 SV * pathname 1479 SV * pathname
567 SV * callback 1480 SV * callback
1481 ALIAS:
1482 aio_unlink = REQ_UNLINK
1483 aio_rmdir = REQ_RMDIR
1484 aio_readdir = REQ_READDIR
1485 PPCODE:
1486{
1487 dREQ;
1488
1489 req->type = ix;
1490 req->data = newSVsv (pathname);
1491 req->dataptr = SvPVbyte_nolen (req->data);
1492
1493 REQ_SEND;
1494}
1495
1496void
1497aio_link (oldpath,newpath,callback=&PL_sv_undef)
1498 SV * oldpath
1499 SV * newpath
1500 SV * callback
1501 ALIAS:
1502 aio_link = REQ_LINK
1503 aio_symlink = REQ_SYMLINK
1504 aio_rename = REQ_RENAME
1505 PPCODE:
1506{
1507 dREQ;
1508
1509 req->type = ix;
1510 req->fh = newSVsv (oldpath);
1511 req->data2ptr = SvPVbyte_nolen (req->fh);
1512 req->data = newSVsv (newpath);
1513 req->dataptr = SvPVbyte_nolen (req->data);
1514
1515 REQ_SEND;
1516}
1517
1518void
1519aio_mknod (pathname,mode,dev,callback=&PL_sv_undef)
1520 SV * pathname
1521 SV * callback
1522 UV mode
1523 UV dev
1524 PPCODE:
1525{
1526 dREQ;
1527
1528 req->type = REQ_MKNOD;
1529 req->data = newSVsv (pathname);
1530 req->dataptr = SvPVbyte_nolen (req->data);
1531 req->mode = (mode_t)mode;
1532 req->offset = dev;
1533
1534 REQ_SEND;
1535}
1536
1537void
1538aio_busy (delay,callback=&PL_sv_undef)
1539 double delay
1540 SV * callback
1541 PPCODE:
1542{
1543 dREQ;
1544
1545 req->type = REQ_BUSY;
1546 req->fd = delay < 0. ? 0 : delay;
1547 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd);
1548
1549 REQ_SEND;
1550}
1551
1552void
1553aio_group (callback=&PL_sv_undef)
1554 SV * callback
1555 PROTOTYPE: ;$
1556 PPCODE:
1557{
1558 dREQ;
1559
1560 req->type = REQ_GROUP;
1561 req_send (req);
1562
1563 XPUSHs (req_sv (req, AIO_GRP_KLASS));
1564}
1565
1566void
1567aio_nop (callback=&PL_sv_undef)
1568 SV * callback
1569 PPCODE:
1570{
1571 dREQ;
1572
1573 req->type = REQ_NOP;
1574
1575 REQ_SEND;
1576}
1577
1578int
1579aioreq_pri (int pri = 0)
1580 PROTOTYPE: ;$
568 CODE: 1581 CODE:
569{ 1582 RETVAL = next_pri - PRI_BIAS;
570 aio_req req; 1583 if (items > 0)
571 1584 {
572 Newz (0, req, 1, aio_cb); 1585 if (pri < PRI_MIN) pri = PRI_MIN;
573 1586 if (pri > PRI_MAX) pri = PRI_MAX;
574 if (!req) 1587 next_pri = pri + PRI_BIAS;
575 croak ("out of memory during aio_req allocation"); 1588 }
576 1589 OUTPUT:
577 req->type = REQ_UNLINK; 1590 RETVAL
578 req->data = newSVsv (pathname);
579 req->dataptr = SvPV_nolen (req->data);
580 req->callback = SvREFCNT_inc (callback);
581
582 send_req (req);
583}
584 1591
585void 1592void
1593aioreq_nice (int nice = 0)
1594 CODE:
1595 nice = next_pri - nice;
1596 if (nice < PRI_MIN) nice = PRI_MIN;
1597 if (nice > PRI_MAX) nice = PRI_MAX;
1598 next_pri = nice + PRI_BIAS;
1599
1600void
586flush() 1601flush ()
587 PROTOTYPE: 1602 PROTOTYPE:
588 CODE: 1603 CODE:
589 while (nreqs) 1604 while (nreqs)
590 { 1605 {
591 poll_wait (); 1606 poll_wait ();
592 poll_cb (); 1607 poll_cb (0);
593 } 1608 }
594 1609
595void 1610void
596poll() 1611poll()
597 PROTOTYPE: 1612 PROTOTYPE:
598 CODE: 1613 CODE:
599 if (nreqs) 1614 if (nreqs)
600 { 1615 {
601 poll_wait (); 1616 poll_wait ();
602 poll_cb (); 1617 poll_cb (0);
603 } 1618 }
604 1619
605int 1620int
606poll_fileno() 1621poll_fileno()
607 PROTOTYPE: 1622 PROTOTYPE:
631 CODE: 1646 CODE:
632 RETVAL = nreqs; 1647 RETVAL = nreqs;
633 OUTPUT: 1648 OUTPUT:
634 RETVAL 1649 RETVAL
635 1650
1651int
1652nready()
1653 PROTOTYPE:
1654 CODE:
1655 RETVAL = get_nready ();
1656 OUTPUT:
1657 RETVAL
1658
1659int
1660npending()
1661 PROTOTYPE:
1662 CODE:
1663 RETVAL = get_npending ();
1664 OUTPUT:
1665 RETVAL
1666
1667int
1668nthreads()
1669 PROTOTYPE:
1670 CODE:
1671 if (WORDACCESS_UNSAFE) LOCK (wrklock);
1672 RETVAL = started;
1673 if (WORDACCESS_UNSAFE) UNLOCK (wrklock);
1674 OUTPUT:
1675 RETVAL
1676
1677PROTOTYPES: DISABLE
1678
1679MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1680
1681void
1682cancel (aio_req_ornot req)
1683 CODE:
1684 req_cancel (req);
1685
1686void
1687cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1688 CODE:
1689 SvREFCNT_dec (req->callback);
1690 req->callback = newSVsv (callback);
1691
1692MODULE = IO::AIO PACKAGE = IO::AIO::GRP
1693
1694void
1695add (aio_req grp, ...)
1696 PPCODE:
1697{
1698 int i;
1699 aio_req req;
1700
1701 if (grp->fd == 2)
1702 croak ("cannot add requests to IO::AIO::GRP after the group finished");
1703
1704 for (i = 1; i < items; ++i )
1705 {
1706 if (GIMME_V != G_VOID)
1707 XPUSHs (sv_2mortal (newSVsv (ST (i))));
1708
1709 req = SvAIO_REQ (ST (i));
1710
1711 if (req)
1712 {
1713 ++grp->length;
1714 req->grp = grp;
1715
1716 req->grp_prev = 0;
1717 req->grp_next = grp->grp_first;
1718
1719 if (grp->grp_first)
1720 grp->grp_first->grp_prev = req;
1721
1722 grp->grp_first = req;
1723 }
1724 }
1725}
1726
1727void
1728cancel_subs (aio_req_ornot req)
1729 CODE:
1730 req_cancel_subs (req);
1731
1732void
1733result (aio_req grp, ...)
1734 CODE:
1735{
1736 int i;
1737 AV *av;
1738
1739 grp->errorno = errno;
1740
1741 av = newAV ();
1742
1743 for (i = 1; i < items; ++i )
1744 av_push (av, newSVsv (ST (i)));
1745
1746 SvREFCNT_dec (grp->data);
1747 grp->data = (SV *)av;
1748}
1749
1750void
1751errno (aio_req grp, int errorno = errno)
1752 CODE:
1753 grp->errorno = errorno;
1754
1755void
1756limit (aio_req grp, int limit)
1757 CODE:
1758 grp->fd2 = limit;
1759 aio_grp_feed (grp);
1760
1761void
1762feed (aio_req grp, SV *callback=&PL_sv_undef)
1763 CODE:
1764{
1765 SvREFCNT_dec (grp->fh2);
1766 grp->fh2 = newSVsv (callback);
1767
1768 if (grp->fd2 <= 0)
1769 grp->fd2 = 2;
1770
1771 aio_grp_feed (grp);
1772}
1773

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines