ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
(Generate patch)

Comparing IO-AIO/AIO.xs (file contents):
Revision 1.21 by root, Sun Aug 7 03:26:10 2005 UTC vs.
Revision 1.86 by root, Mon Oct 30 23:30:00 2006 UTC

1/* solaris */
2#define _POSIX_PTHREAD_SEMANTICS 1
3
4#if __linux && !defined(_GNU_SOURCE)
5# define _GNU_SOURCE
6#endif
7
8/* just in case */
1#define _REENTRANT 1 9#define _REENTRANT 1
10
2#include <errno.h> 11#include <errno.h>
3 12
4#include "EXTERN.h" 13#include "EXTERN.h"
5#include "perl.h" 14#include "perl.h"
6#include "XSUB.h" 15#include "XSUB.h"
7 16
8#include "autoconf/config.h" 17#include "autoconf/config.h"
9 18
19#include <pthread.h>
20
21#include <stddef.h>
22#include <errno.h>
23#include <sys/time.h>
24#include <sys/select.h>
10#include <sys/types.h> 25#include <sys/types.h>
11#include <sys/stat.h> 26#include <sys/stat.h>
12 27#include <limits.h>
13#include <unistd.h> 28#include <unistd.h>
14#include <fcntl.h> 29#include <fcntl.h>
15#include <signal.h> 30#include <signal.h>
16#include <sched.h> 31#include <sched.h>
17 32
18#include <pthread.h> 33#if HAVE_SENDFILE
34# if __linux
35# include <sys/sendfile.h>
36# elif __freebsd
37# include <sys/socket.h>
38# include <sys/uio.h>
39# elif __hpux
40# include <sys/socket.h>
41# elif __solaris /* not yet */
42# include <sys/sendfile.h>
43# else
44# error sendfile support requested but not available
45# endif
46#endif
19 47
20typedef void *InputStream; /* hack, but 5.6.1 is simply toooo old ;) */ 48/* number of seconds after which idle threads exit */
21typedef void *OutputStream; /* hack, but 5.6.1 is simply toooo old ;) */ 49#define IDLE_TIMEOUT 10
22typedef void *InOutStream; /* hack, but 5.6.1 is simply toooo old ;) */ 50
51/* used for struct dirent, AIX doesn't provide it */
52#ifndef NAME_MAX
53# define NAME_MAX 4096
54#endif
55
56#ifndef PTHREAD_STACK_MIN
57/* care for broken platforms, e.g. windows */
58# define PTHREAD_STACK_MIN 16384
59#endif
23 60
24#if __ia64 61#if __ia64
25# define STACKSIZE 65536 62# define STACKSIZE 65536
63#elif __i386 || __x86_64 /* 16k is unreasonably high :( */
64# define STACKSIZE PTHREAD_STACK_MIN
26#else 65#else
27# define STACKSIZE 4096 66# define STACKSIZE 16384
28#endif 67#endif
68
69/* wether word reads are potentially non-atomic.
70 * this is conservatice, likely most arches this runs
71 * on have atomic word read/writes.
72 */
73#ifndef WORDACCESS_UNSAFE
74# if __i386 || __x86_64
75# define WORDACCESS_UNSAFE 0
76# else
77# define WORDACCESS_UNSAFE 1
78# endif
79#endif
80
81/* buffer size for various temporary buffers */
82#define AIO_BUFSIZE 65536
83
84#define dBUF \
85 char *aio_buf; \
86 LOCK (wrklock); \
87 self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \
88 UNLOCK (wrklock); \
89 if (!aio_buf) \
90 return -1;
29 91
30enum { 92enum {
31 REQ_QUIT, 93 REQ_QUIT,
32 REQ_OPEN, REQ_CLOSE, 94 REQ_OPEN, REQ_CLOSE,
33 REQ_READ, REQ_WRITE, REQ_READAHEAD, 95 REQ_READ, REQ_WRITE, REQ_READAHEAD,
96 REQ_SENDFILE,
34 REQ_STAT, REQ_LSTAT, REQ_FSTAT, REQ_UNLINK, 97 REQ_STAT, REQ_LSTAT, REQ_FSTAT,
35 REQ_FSYNC, REQ_FDATASYNC, 98 REQ_FSYNC, REQ_FDATASYNC,
99 REQ_UNLINK, REQ_RMDIR, REQ_RENAME,
100 REQ_MKNOD, REQ_READDIR,
101 REQ_LINK, REQ_SYMLINK, REQ_READLINK,
102 REQ_GROUP, REQ_NOP,
103 REQ_BUSY,
36}; 104};
37 105
106#define AIO_REQ_KLASS "IO::AIO::REQ"
107#define AIO_GRP_KLASS "IO::AIO::GRP"
108
38typedef struct aio_cb { 109typedef struct aio_cb
110{
39 struct aio_cb *volatile next; 111 struct aio_cb *volatile next;
40 112
113 SV *callback, *fh;
114 SV *sv1, *sv2;
115 void *ptr1, *ptr2;
116 Stat_t *statdata;
117 off_t offs;
118 size_t size;
119 ssize_t result;
120
121 STRLEN stroffset;
41 int type; 122 int type;
42 123 int int1, int2;
43 int fd; 124 int errorno;
44 off_t offset;
45 size_t length;
46 ssize_t result;
47 mode_t mode; /* open */ 125 mode_t mode; /* open */
48 int errorno;
49 SV *data, *callback, *fh;
50 void *dataptr;
51 STRLEN dataoffset;
52 126
53 Stat_t *statdata; 127 unsigned char flags;
128 unsigned char pri;
129
130 SV *self; /* the perl counterpart of this request, if any */
131 struct aio_cb *grp, *grp_prev, *grp_next, *grp_first;
54} aio_cb; 132} aio_cb;
55 133
134enum {
135 FLAG_CANCELLED = 0x01,
136 FLAG_DATA_RO_OFF = 0x80, /* data was set readonly */
137};
138
56typedef aio_cb *aio_req; 139typedef aio_cb *aio_req;
140typedef aio_cb *aio_req_ornot;
57 141
58static int started; 142enum {
59static volatile int nreqs; 143 PRI_MIN = -4,
60static int max_outstanding = 1<<30; 144 PRI_MAX = 4,
145
146 DEFAULT_PRI = 0,
147 PRI_BIAS = -PRI_MIN,
148 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
149};
150
151#define AIO_TICKS ((1000000 + 1023) >> 10)
152
153static unsigned int max_poll_time = 0;
154static unsigned int max_poll_reqs = 0;
155
156/* calculcate time difference in ~1/AIO_TICKS of a second */
157static int tvdiff (struct timeval *tv1, struct timeval *tv2)
158{
159 return (tv2->tv_sec - tv1->tv_sec ) * AIO_TICKS
160 + ((tv2->tv_usec - tv1->tv_usec) >> 10);
161}
162
163static int next_pri = DEFAULT_PRI + PRI_BIAS;
164
165static unsigned int started, idle, wanted;
166
167#if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)
168# define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
169#else
170# define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
171#endif
172
173#define LOCK(mutex) pthread_mutex_lock (&(mutex))
174#define UNLOCK(mutex) pthread_mutex_unlock (&(mutex))
175
176/* worker threads management */
177static pthread_mutex_t wrklock = AIO_MUTEX_INIT;
178
179typedef struct worker {
180 /* locked by wrklock */
181 struct worker *prev, *next;
182
183 pthread_t tid;
184
185 /* locked by reslock, reqlock or wrklock */
186 aio_req req; /* currently processed request */
187 void *dbuf;
188 DIR *dirp;
189} worker;
190
191static worker wrk_first = { &wrk_first, &wrk_first, 0 };
192
193static void worker_clear (worker *wrk)
194{
195 if (wrk->dirp)
196 {
197 closedir (wrk->dirp);
198 wrk->dirp = 0;
199 }
200
201 if (wrk->dbuf)
202 {
203 free (wrk->dbuf);
204 wrk->dbuf = 0;
205 }
206}
207
208static void worker_free (worker *wrk)
209{
210 wrk->next->prev = wrk->prev;
211 wrk->prev->next = wrk->next;
212
213 free (wrk);
214}
215
216static volatile unsigned int nreqs, nready, npending;
217static volatile unsigned int max_idle = 4;
218static volatile unsigned int max_outstanding = 0xffffffff;
61static int respipe [2]; 219static int respipe [2];
62 220
63static pthread_mutex_t reslock = PTHREAD_MUTEX_INITIALIZER; 221static pthread_mutex_t reslock = AIO_MUTEX_INIT;
64static pthread_mutex_t reqlock = PTHREAD_MUTEX_INITIALIZER; 222static pthread_mutex_t reqlock = AIO_MUTEX_INIT;
65static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER; 223static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER;
66 224
67static volatile aio_req reqs, reqe; /* queue start, queue end */ 225#if WORDACCESS_UNSAFE
68static volatile aio_req ress, rese; /* queue start, queue end */
69 226
70static void 227static unsigned int get_nready ()
71poll_wait ()
72{ 228{
73 if (nreqs && !ress) 229 unsigned int retval;
74 {
75 fd_set rfd;
76 FD_ZERO(&rfd);
77 FD_SET(respipe [0], &rfd);
78 230
79 select (respipe [0] + 1, &rfd, 0, 0, 0); 231 LOCK (reqlock);
232 retval = nready;
233 UNLOCK (reqlock);
234
235 return retval;
236}
237
238static unsigned int get_npending ()
239{
240 unsigned int retval;
241
242 LOCK (reslock);
243 retval = npending;
244 UNLOCK (reslock);
245
246 return retval;
247}
248
249static unsigned int get_nthreads ()
250{
251 unsigned int retval;
252
253 LOCK (wrklock);
254 retval = started;
255 UNLOCK (wrklock);
256
257 return retval;
258}
259
260#else
261
262# define get_nready() nready
263# define get_npending() npending
264# define get_nthreads() started
265
266#endif
267
268/*
269 * a somewhat faster data structure might be nice, but
270 * with 8 priorities this actually needs <20 insns
271 * per shift, the most expensive operation.
272 */
273typedef struct {
274 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
275 int size;
276} reqq;
277
278static reqq req_queue;
279static reqq res_queue;
280
281int reqq_push (reqq *q, aio_req req)
282{
283 int pri = req->pri;
284 req->next = 0;
285
286 if (q->qe[pri])
80 } 287 {
81} 288 q->qe[pri]->next = req;
289 q->qe[pri] = req;
290 }
291 else
292 q->qe[pri] = q->qs[pri] = req;
82 293
83static int 294 return q->size++;
84poll_cb () 295}
296
297aio_req reqq_shift (reqq *q)
298{
299 int pri;
300
301 if (!q->size)
302 return 0;
303
304 --q->size;
305
306 for (pri = NUM_PRI; pri--; )
307 {
308 aio_req req = q->qs[pri];
309
310 if (req)
311 {
312 if (!(q->qs[pri] = req->next))
313 q->qe[pri] = 0;
314
315 return req;
316 }
317 }
318
319 abort ();
320}
321
322static int poll_cb ();
323static void req_invoke (aio_req req);
324static void req_free (aio_req req);
325static void req_cancel (aio_req req);
326
327/* must be called at most once */
328static SV *req_sv (aio_req req, const char *klass)
329{
330 if (!req->self)
331 {
332 req->self = (SV *)newHV ();
333 sv_magic (req->self, 0, PERL_MAGIC_ext, (char *)req, 0);
334 }
335
336 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1)));
337}
338
339static aio_req SvAIO_REQ (SV *sv)
340{
341 MAGIC *mg;
342
343 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv))
344 croak ("object of class " AIO_REQ_KLASS " expected");
345
346 mg = mg_find (SvRV (sv), PERL_MAGIC_ext);
347
348 return mg ? (aio_req)mg->mg_ptr : 0;
349}
350
351static void aio_grp_feed (aio_req grp)
352{
353 while (grp->size < grp->int2 && !(grp->flags & FLAG_CANCELLED))
354 {
355 int old_len = grp->size;
356
357 if (grp->sv2 && SvOK (grp->sv2))
358 {
359 dSP;
360
361 ENTER;
362 SAVETMPS;
363 PUSHMARK (SP);
364 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
365 PUTBACK;
366 call_sv (grp->sv2, G_VOID | G_EVAL | G_KEEPERR);
367 SPAGAIN;
368 FREETMPS;
369 LEAVE;
370 }
371
372 /* stop if no progress has been made */
373 if (old_len == grp->size)
374 {
375 SvREFCNT_dec (grp->sv2);
376 grp->sv2 = 0;
377 break;
378 }
379 }
380}
381
382static void aio_grp_dec (aio_req grp)
383{
384 --grp->size;
385
386 /* call feeder, if applicable */
387 aio_grp_feed (grp);
388
389 /* finish, if done */
390 if (!grp->size && grp->int1)
391 {
392 req_invoke (grp);
393 req_free (grp);
394 }
395}
396
397static void req_invoke (aio_req req)
85{ 398{
86 dSP; 399 dSP;
87 int count = 0;
88 aio_req req, prv;
89 400
90 pthread_mutex_lock (&reslock); 401 if (req->flags & FLAG_DATA_RO_OFF)
402 SvREADONLY_off (req->sv1);
91 403
404 if (req->statdata)
92 { 405 {
93 /* read any signals sent by the worker threads */ 406 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
94 char buf [32]; 407 PL_laststatval = req->result;
95 while (read (respipe [0], buf, 32) > 0) 408 PL_statcache = *(req->statdata);
96 ;
97 }
98
99 req = ress;
100 ress = rese = 0;
101
102 pthread_mutex_unlock (&reslock);
103
104 while (req)
105 { 409 }
106 nreqs--;
107 410
108 if (req->type == REQ_QUIT) 411 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
109 started--; 412 {
110 else 413 ENTER;
414 SAVETMPS;
415 PUSHMARK (SP);
416 EXTEND (SP, 1);
417
418 switch (req->type)
111 { 419 {
112 int errorno = errno; 420 case REQ_READDIR:
113 errno = req->errorno;
114
115 if (req->type == REQ_READ)
116 SvCUR_set (req->data, req->dataoffset
117 + req->result > 0 ? req->result : 0);
118
119 if (req->data)
120 SvREFCNT_dec (req->data);
121
122 if (req->fh)
123 SvREFCNT_dec (req->fh);
124
125 if (req->type == REQ_STAT || req->type == REQ_LSTAT || req->type == REQ_FSTAT)
126 { 421 {
127 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT; 422 SV *rv = &PL_sv_undef;
128 PL_laststatval = req->result;
129 PL_statcache = *(req->statdata);
130 423
131 Safefree (req->statdata); 424 if (req->result >= 0)
425 {
426 int i;
427 char *buf = req->ptr2;
428 AV *av = newAV ();
429
430 av_extend (av, req->result - 1);
431
432 for (i = 0; i < req->result; ++i)
433 {
434 SV *sv = newSVpv (buf, 0);
435
436 av_store (av, i, sv);
437 buf += SvCUR (sv) + 1;
438 }
439
440 rv = sv_2mortal (newRV_noinc ((SV *)av));
441 }
442
443 PUSHs (rv);
132 } 444 }
445 break;
133 446
134 ENTER; 447 case REQ_OPEN:
135 PUSHMARK (SP);
136 XPUSHs (sv_2mortal (newSViv (req->result)));
137
138 if (req->type == REQ_OPEN)
139 { 448 {
140 /* convert fd to fh */ 449 /* convert fd to fh */
141 SV *fh; 450 SV *fh;
142 451
452 PUSHs (sv_2mortal (newSViv (req->result)));
143 PUTBACK; 453 PUTBACK;
144 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL); 454 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
145 SPAGAIN; 455 SPAGAIN;
146 456
147 fh = SvREFCNT_inc (POPs); 457 fh = SvREFCNT_inc (POPs);
148 458
149 PUSHMARK (SP); 459 PUSHMARK (SP);
150 XPUSHs (sv_2mortal (fh)); 460 XPUSHs (sv_2mortal (fh));
151 } 461 }
462 break;
152 463
153 if (SvOK (req->callback)) 464 case REQ_GROUP:
465 req->int1 = 2; /* mark group as finished */
466
467 if (req->sv1)
154 { 468 {
155 PUTBACK; 469 int i;
156 call_sv (req->callback, G_VOID | G_EVAL); 470 AV *av = (AV *)req->sv1;
157 SPAGAIN; 471
472 EXTEND (SP, AvFILL (av) + 1);
473 for (i = 0; i <= AvFILL (av); ++i)
474 PUSHs (*av_fetch (av, i, 0));
158 } 475 }
476 break;
159 477
160 LEAVE; 478 case REQ_NOP:
479 case REQ_BUSY:
480 break;
481
482 case REQ_READLINK:
483 if (req->result > 0)
161 484 {
162 if (req->callback) 485 SvCUR_set (req->sv1, req->result);
163 SvREFCNT_dec (req->callback); 486 *SvEND (req->sv1) = 0;
487 PUSHs (req->sv1);
488 }
489 break;
164 490
165 errno = errorno; 491 case REQ_READ:
166 count++; 492 SvCUR_set (req->sv1, req->stroffset + (req->result > 0 ? req->result : 0));
493 *SvEND (req->sv1) = 0;
494 /* fall through */
495 default:
496 PUSHs (sv_2mortal (newSViv (req->result)));
497 break;
167 } 498 }
168 499
169 prv = req; 500 errno = req->errorno;
170 req = req->next;
171 Safefree (prv);
172 501
173 /* TODO: croak on errors? */ 502 PUTBACK;
503 call_sv (req->callback, G_VOID | G_EVAL);
504 SPAGAIN;
505
506 FREETMPS;
507 LEAVE;
508 }
509
510 if (req->grp)
174 } 511 {
512 aio_req grp = req->grp;
175 513
176 return count; 514 /* unlink request */
515 if (req->grp_next) req->grp_next->grp_prev = req->grp_prev;
516 if (req->grp_prev) req->grp_prev->grp_next = req->grp_next;
517
518 if (grp->grp_first == req)
519 grp->grp_first = req->grp_next;
520
521 aio_grp_dec (grp);
522 }
523
524 if (SvTRUE (ERRSV))
525 {
526 req_free (req);
527 croak (0);
528 }
529}
530
531static void req_free (aio_req req)
532{
533 if (req->self)
534 {
535 sv_unmagic (req->self, PERL_MAGIC_ext);
536 SvREFCNT_dec (req->self);
537 }
538
539 SvREFCNT_dec (req->fh);
540 SvREFCNT_dec (req->sv1);
541 SvREFCNT_dec (req->sv2);
542 SvREFCNT_dec (req->callback);
543 Safefree (req->statdata);
544
545 if (req->type == REQ_READDIR)
546 free (req->ptr2);
547
548 Safefree (req);
549}
550
551static void req_cancel_subs (aio_req grp)
552{
553 aio_req sub;
554
555 if (grp->type != REQ_GROUP)
556 return;
557
558 SvREFCNT_dec (grp->sv2);
559 grp->sv2 = 0;
560
561 for (sub = grp->grp_first; sub; sub = sub->grp_next)
562 req_cancel (sub);
563}
564
565static void req_cancel (aio_req req)
566{
567 req->flags |= FLAG_CANCELLED;
568
569 req_cancel_subs (req);
177} 570}
178 571
179static void *aio_proc(void *arg); 572static void *aio_proc(void *arg);
180 573
181static void
182start_thread (void) 574static void start_thread (void)
183{ 575{
184 sigset_t fullsigset, oldsigset; 576 sigset_t fullsigset, oldsigset;
185 pthread_t tid;
186 pthread_attr_t attr; 577 pthread_attr_t attr;
578
579 worker *wrk = calloc (1, sizeof (worker));
580
581 if (!wrk)
582 croak ("unable to allocate worker thread data");
187 583
188 pthread_attr_init (&attr); 584 pthread_attr_init (&attr);
189 pthread_attr_setstacksize (&attr, STACKSIZE); 585 pthread_attr_setstacksize (&attr, STACKSIZE);
190 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED); 586 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
587#ifdef PTHREAD_SCOPE_PROCESS
588 pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS);
589#endif
191 590
192 sigfillset (&fullsigset); 591 sigfillset (&fullsigset);
592
593 LOCK (wrklock);
193 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset); 594 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
194 595
195 if (pthread_create (&tid, &attr, aio_proc, 0) == 0) 596 if (pthread_create (&wrk->tid, &attr, aio_proc, (void *)wrk) == 0)
597 {
598 wrk->prev = &wrk_first;
599 wrk->next = wrk_first.next;
600 wrk_first.next->prev = wrk;
601 wrk_first.next = wrk;
196 started++; 602 ++started;
603 }
604 else
605 free (wrk);
197 606
198 sigprocmask (SIG_SETMASK, &oldsigset, 0); 607 sigprocmask (SIG_SETMASK, &oldsigset, 0);
608 UNLOCK (wrklock);
199} 609}
200 610
201static void 611static void maybe_start_thread ()
202send_req (aio_req req)
203{ 612{
613 if (get_nthreads () >= wanted)
614 return;
615
616 /* todo: maybe use idle here, but might be less exact */
617 if (0 <= (int)get_nthreads () + (int)get_npending () - (int)nreqs)
618 return;
619
620 start_thread ();
621}
622
623static void req_send (aio_req req)
624{
204 nreqs++; 625 ++nreqs;
205 626
206 pthread_mutex_lock (&reqlock); 627 LOCK (reqlock);
207 628 ++nready;
208 req->next = 0; 629 reqq_push (&req_queue, req);
209
210 if (reqe)
211 {
212 reqe->next = req;
213 reqe = req;
214 }
215 else
216 reqe = reqs = req;
217
218 pthread_cond_signal (&reqwait); 630 pthread_cond_signal (&reqwait);
219 pthread_mutex_unlock (&reqlock); 631 UNLOCK (reqlock);
220 632
633 maybe_start_thread ();
634}
635
636static void end_thread (void)
637{
638 aio_req req;
639
640 Newz (0, req, 1, aio_cb);
641
642 req->type = REQ_QUIT;
643 req->pri = PRI_MAX + PRI_BIAS;
644
645 LOCK (reqlock);
646 reqq_push (&req_queue, req);
647 pthread_cond_signal (&reqwait);
648 UNLOCK (reqlock);
649
650 LOCK (wrklock);
651 --started;
652 UNLOCK (wrklock);
653}
654
655static void set_max_idle (int nthreads)
656{
657 if (WORDACCESS_UNSAFE) LOCK (reqlock);
658 max_idle = nthreads <= 0 ? 1 : nthreads;
659 if (WORDACCESS_UNSAFE) UNLOCK (reqlock);
660}
661
662static void min_parallel (int nthreads)
663{
664 if (wanted < nthreads)
665 wanted = nthreads;
666}
667
668static void max_parallel (int nthreads)
669{
670 if (wanted > nthreads)
671 wanted = nthreads;
672
673 while (started > wanted)
674 end_thread ();
675}
676
677static void poll_wait ()
678{
679 fd_set rfd;
680
681 while (nreqs)
682 {
683 int size;
684 if (WORDACCESS_UNSAFE) LOCK (reslock);
685 size = res_queue.size;
686 if (WORDACCESS_UNSAFE) UNLOCK (reslock);
687
688 if (size)
689 return;
690
691 maybe_start_thread ();
692
693 FD_ZERO(&rfd);
694 FD_SET(respipe [0], &rfd);
695
696 select (respipe [0] + 1, &rfd, 0, 0, 0);
697 }
698}
699
700static int poll_cb ()
701{
702 dSP;
703 int count = 0;
704 int maxreqs = max_poll_reqs;
705 int do_croak = 0;
706 struct timeval tv_start, tv_now;
707 aio_req req;
708
709 if (max_poll_time)
710 gettimeofday (&tv_start, 0);
711
712 for (;;)
713 {
714 for (;;)
715 {
716 maybe_start_thread ();
717
718 LOCK (reslock);
719 req = reqq_shift (&res_queue);
720
721 if (req)
722 {
723 --npending;
724
725 if (!res_queue.size)
726 {
727 /* read any signals sent by the worker threads */
728 char buf [32];
729 while (read (respipe [0], buf, 32) == 32)
730 ;
731 }
732 }
733
734 UNLOCK (reslock);
735
736 if (!req)
737 break;
738
739 --nreqs;
740
741 if (req->type == REQ_GROUP && req->size)
742 {
743 req->int1 = 1; /* mark request as delayed */
744 continue;
745 }
746 else
747 {
748 req_invoke (req);
749
750 count++;
751 }
752
753 req_free (req);
754
755 if (maxreqs && !--maxreqs)
756 break;
757
758 if (max_poll_time)
759 {
760 gettimeofday (&tv_now, 0);
761
762 if (tvdiff (&tv_start, &tv_now) >= max_poll_time)
763 break;
764 }
765 }
766
221 while (nreqs > max_outstanding) 767 if (nreqs <= max_outstanding)
222 { 768 break;
769
223 poll_wait (); 770 poll_wait ();
224 poll_cb ();
225 }
226}
227 771
228static void 772 ++maxreqs;
229end_thread (void) 773 }
230{
231 aio_req req;
232 New (0, req, 1, aio_cb);
233 req->type = REQ_QUIT;
234 774
235 send_req (req); 775 return count;
236} 776}
237 777
778static void create_pipe ()
779{
780 if (pipe (respipe))
781 croak ("unable to initialize result pipe");
782
783 if (fcntl (respipe [0], F_SETFL, O_NONBLOCK))
784 croak ("cannot set result pipe to nonblocking mode");
785
786 if (fcntl (respipe [1], F_SETFL, O_NONBLOCK))
787 croak ("cannot set result pipe to nonblocking mode");
788}
789
790/*****************************************************************************/
238/* work around various missing functions */ 791/* work around various missing functions */
239 792
240#if !HAVE_PREADWRITE 793#if !HAVE_PREADWRITE
241# define pread aio_pread 794# define pread aio_pread
242# define pwrite aio_pwrite 795# define pwrite aio_pwrite
244/* 797/*
245 * make our pread/pwrite safe against themselves, but not against 798 * make our pread/pwrite safe against themselves, but not against
246 * normal read/write by using a mutex. slows down execution a lot, 799 * normal read/write by using a mutex. slows down execution a lot,
247 * but that's your problem, not mine. 800 * but that's your problem, not mine.
248 */ 801 */
249static pthread_mutex_t iolock = PTHREAD_MUTEX_INITIALIZER; 802static pthread_mutex_t preadwritelock = PTHREAD_MUTEX_INITIALIZER;
250 803
251static ssize_t 804static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
252pread (int fd, void *buf, size_t count, off_t offset)
253{ 805{
254 ssize_t res; 806 ssize_t res;
255 off_t ooffset; 807 off_t ooffset;
256 808
257 pthread_mutex_lock (&iolock); 809 LOCK (preadwritelock);
258 ooffset = lseek (fd, 0, SEEK_CUR); 810 ooffset = lseek (fd, 0, SEEK_CUR);
259 lseek (fd, offset, SEEK_SET); 811 lseek (fd, offset, SEEK_SET);
260 res = read (fd, buf, count); 812 res = read (fd, buf, count);
261 lseek (fd, ooffset, SEEK_SET); 813 lseek (fd, ooffset, SEEK_SET);
262 pthread_mutex_unlock (&iolock); 814 UNLOCK (preadwritelock);
263 815
264 return res; 816 return res;
265} 817}
266 818
267static ssize_t
268pwrite (int fd, void *buf, size_t count, off_t offset) 819static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
269{ 820{
270 ssize_t res; 821 ssize_t res;
271 off_t ooffset; 822 off_t ooffset;
272 823
273 pthread_mutex_lock (&iolock); 824 LOCK (preadwritelock);
274 ooffset = lseek (fd, 0, SEEK_CUR); 825 ooffset = lseek (fd, 0, SEEK_CUR);
275 lseek (fd, offset, SEEK_SET); 826 lseek (fd, offset, SEEK_SET);
276 res = write (fd, buf, count); 827 res = write (fd, buf, count);
277 lseek (fd, offset, SEEK_SET); 828 lseek (fd, offset, SEEK_SET);
278 pthread_mutex_unlock (&iolock); 829 UNLOCK (preadwritelock);
279 830
280 return res; 831 return res;
281} 832}
282#endif 833#endif
283 834
284#if !HAVE_FDATASYNC 835#if !HAVE_FDATASYNC
285# define fdatasync fsync 836# define fdatasync fsync
286#endif 837#endif
287 838
288#if !HAVE_READAHEAD 839#if !HAVE_READAHEAD
289# define readahead aio_readahead 840# define readahead(fd,offset,count) aio_readahead (fd, offset, count, self)
290 841
291static char readahead_buf[4096]; 842static ssize_t aio_readahead (int fd, off_t offset, size_t count, worker *self)
292
293static ssize_t
294readahead (int fd, off_t offset, size_t count)
295{ 843{
844 dBUF;
845
296 while (count > 0) 846 while (count > 0)
297 { 847 {
298 size_t len = count < sizeof (readahead_buf) ? count : sizeof (readahead_buf); 848 size_t len = count < AIO_BUFSIZE ? count : AIO_BUFSIZE;
299 849
300 pread (fd, readahead_buf, len, offset); 850 pread (fd, aio_buf, len, offset);
301 offset += len; 851 offset += len;
302 count -= len; 852 count -= len;
303 } 853 }
304 854
305 errno = 0; 855 errno = 0;
306} 856}
857
307#endif 858#endif
308 859
309static void * 860#if !HAVE_READDIR_R
861# define readdir_r aio_readdir_r
862
863static pthread_mutex_t readdirlock = PTHREAD_MUTEX_INITIALIZER;
864
865static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res)
866{
867 struct dirent *e;
868 int errorno;
869
870 LOCK (readdirlock);
871
872 e = readdir (dirp);
873 errorno = errno;
874
875 if (e)
876 {
877 *res = ent;
878 strcpy (ent->d_name, e->d_name);
879 }
880 else
881 *res = 0;
882
883 UNLOCK (readdirlock);
884
885 errno = errorno;
886 return e ? 0 : -1;
887}
888#endif
889
890/* sendfile always needs emulation */
891static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self)
892{
893 ssize_t res;
894
895 if (!count)
896 return 0;
897
898#if HAVE_SENDFILE
899# if __linux
900 res = sendfile (ofd, ifd, &offset, count);
901
902# elif __freebsd
903 /*
904 * Of course, the freebsd sendfile is a dire hack with no thoughts
905 * wasted on making it similar to other I/O functions.
906 */
907 {
908 off_t sbytes;
909 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0);
910
911 if (res < 0 && sbytes)
912 /* maybe only on EAGAIN: as usual, the manpage leaves you guessing */
913 res = sbytes;
914 }
915
916# elif __hpux
917 res = sendfile (ofd, ifd, offset, count, 0, 0);
918
919# elif __solaris
920 {
921 struct sendfilevec vec;
922 size_t sbytes;
923
924 vec.sfv_fd = ifd;
925 vec.sfv_flag = 0;
926 vec.sfv_off = offset;
927 vec.sfv_len = count;
928
929 res = sendfilev (ofd, &vec, 1, &sbytes);
930
931 if (res < 0 && sbytes)
932 res = sbytes;
933 }
934
935# endif
936#else
937 res = -1;
938 errno = ENOSYS;
939#endif
940
941 if (res < 0
942 && (errno == ENOSYS || errno == EINVAL || errno == ENOTSOCK
943#if __solaris
944 || errno == EAFNOSUPPORT || errno == EPROTOTYPE
945#endif
946 )
947 )
948 {
949 /* emulate sendfile. this is a major pain in the ass */
950 dBUF;
951
952 res = 0;
953
954 while (count)
955 {
956 ssize_t cnt;
957
958 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
959
960 if (cnt <= 0)
961 {
962 if (cnt && !res) res = -1;
963 break;
964 }
965
966 cnt = write (ofd, aio_buf, cnt);
967
968 if (cnt <= 0)
969 {
970 if (cnt && !res) res = -1;
971 break;
972 }
973
974 offset += cnt;
975 res += cnt;
976 count -= cnt;
977 }
978 }
979
980 return res;
981}
982
983/* read a full directory */
984static void scandir_ (aio_req req, worker *self)
985{
986 DIR *dirp;
987 union
988 {
989 struct dirent d;
990 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1];
991 } *u;
992 struct dirent *entp;
993 char *name, *names;
994 int memlen = 4096;
995 int memofs = 0;
996 int res = 0;
997 int errorno;
998
999 LOCK (wrklock);
1000 self->dirp = dirp = opendir (req->ptr1);
1001 self->dbuf = u = malloc (sizeof (*u));
1002 req->ptr2 = names = malloc (memlen);
1003 UNLOCK (wrklock);
1004
1005 if (dirp && u && names)
1006 for (;;)
1007 {
1008 errno = 0;
1009 readdir_r (dirp, &u->d, &entp);
1010
1011 if (!entp)
1012 break;
1013
1014 name = entp->d_name;
1015
1016 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
1017 {
1018 int len = strlen (name) + 1;
1019
1020 res++;
1021
1022 while (memofs + len > memlen)
1023 {
1024 memlen *= 2;
1025 LOCK (wrklock);
1026 req->ptr2 = names = realloc (names, memlen);
1027 UNLOCK (wrklock);
1028
1029 if (!names)
1030 break;
1031 }
1032
1033 memcpy (names + memofs, name, len);
1034 memofs += len;
1035 }
1036 }
1037
1038 if (errno)
1039 res = -1;
1040
1041 req->result = res;
1042}
1043
1044/*****************************************************************************/
1045
310aio_proc (void *thr_arg) 1046static void *aio_proc (void *thr_arg)
311{ 1047{
312 aio_req req; 1048 aio_req req;
313 int type; 1049 struct timespec ts;
1050 worker *self = (worker *)thr_arg;
314 1051
315 do 1052 /* try to distribute timeouts somewhat evenly */
1053 ts.tv_nsec = (((unsigned long)self + (unsigned long)ts.tv_sec) & 1023UL)
1054 * (1000000000UL / 1024UL);
1055
1056 for (;;)
316 { 1057 {
317 pthread_mutex_lock (&reqlock); 1058 ts.tv_sec = time (0) + IDLE_TIMEOUT;
1059
1060 LOCK (reqlock);
318 1061
319 for (;;) 1062 for (;;)
320 { 1063 {
321 req = reqs; 1064 self->req = req = reqq_shift (&req_queue);
322
323 if (reqs)
324 {
325 reqs = reqs->next;
326 if (!reqs) reqe = 0;
327 }
328 1065
329 if (req) 1066 if (req)
330 break; 1067 break;
331 1068
1069 ++idle;
1070
1071 if (pthread_cond_timedwait (&reqwait, &reqlock, &ts)
1072 == ETIMEDOUT)
1073 {
1074 if (idle > max_idle)
1075 {
1076 --idle;
1077 UNLOCK (reqlock);
1078 LOCK (wrklock);
1079 --started;
1080 UNLOCK (wrklock);
1081 goto quit;
1082 }
1083
1084 /* we are allowed to idle, so do so without any timeout */
332 pthread_cond_wait (&reqwait, &reqlock); 1085 pthread_cond_wait (&reqwait, &reqlock);
1086 ts.tv_sec = time (0) + IDLE_TIMEOUT;
1087 }
1088
1089 --idle;
333 } 1090 }
334 1091
335 pthread_mutex_unlock (&reqlock); 1092 --nready;
1093
1094 UNLOCK (reqlock);
336 1095
337 errno = 0; /* strictly unnecessary */ 1096 errno = 0; /* strictly unnecessary */
338 1097
339 type = req->type; 1098 if (!(req->flags & FLAG_CANCELLED))
340
341 switch (type) 1099 switch (req->type)
342 { 1100 {
343 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break; 1101 case REQ_READ: req->result = pread (req->int1, req->ptr1, req->size, req->offs); break;
344 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break; 1102 case REQ_WRITE: req->result = pwrite (req->int1, req->ptr1, req->size, req->offs); break;
345 1103
346 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break; 1104 case REQ_READAHEAD: req->result = readahead (req->int1, req->offs, req->size); break;
1105 case REQ_SENDFILE: req->result = sendfile_ (req->int1, req->int2, req->offs, req->size, self); break;
347 1106
348 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break; 1107 case REQ_STAT: req->result = stat (req->ptr1, req->statdata); break;
349 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break; 1108 case REQ_LSTAT: req->result = lstat (req->ptr1, req->statdata); break;
350 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break; 1109 case REQ_FSTAT: req->result = fstat (req->int1, req->statdata); break;
351 1110
352 case REQ_OPEN: req->result = open (req->dataptr, req->fd, req->mode); break; 1111 case REQ_OPEN: req->result = open (req->ptr1, req->int1, req->mode); break;
353 case REQ_CLOSE: req->result = close (req->fd); break; 1112 case REQ_CLOSE: req->result = close (req->int1); break;
354 case REQ_UNLINK: req->result = unlink (req->dataptr); break; 1113 case REQ_UNLINK: req->result = unlink (req->ptr1); break;
1114 case REQ_RMDIR: req->result = rmdir (req->ptr1); break;
1115 case REQ_RENAME: req->result = rename (req->ptr2, req->ptr1); break;
1116 case REQ_LINK: req->result = link (req->ptr2, req->ptr1); break;
1117 case REQ_SYMLINK: req->result = symlink (req->ptr2, req->ptr1); break;
1118 case REQ_MKNOD: req->result = mknod (req->ptr2, req->mode, (dev_t)req->offs); break;
1119 case REQ_READLINK: req->result = readlink (req->ptr2, req->ptr1, NAME_MAX); break;
355 1120
356 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break; 1121 case REQ_FDATASYNC: req->result = fdatasync (req->int1); break;
357 case REQ_FSYNC: req->result = fsync (req->fd); break; 1122 case REQ_FSYNC: req->result = fsync (req->int1); break;
1123 case REQ_READDIR: scandir_ (req, self); break;
358 1124
1125 case REQ_BUSY:
1126 {
1127 struct timeval tv;
1128
1129 tv.tv_sec = req->int1;
1130 tv.tv_usec = req->int2;
1131
1132 req->result = select (0, 0, 0, 0, &tv);
1133 }
1134
1135 case REQ_GROUP:
1136 case REQ_NOP:
1137 break;
1138
359 case REQ_QUIT: 1139 case REQ_QUIT:
360 break; 1140 goto quit;
361 1141
362 default: 1142 default:
363 req->result = ENOSYS; 1143 req->result = ENOSYS;
364 break; 1144 break;
365 } 1145 }
366 1146
367 req->errorno = errno; 1147 req->errorno = errno;
368 1148
369 pthread_mutex_lock (&reslock); 1149 LOCK (reslock);
370 1150
371 req->next = 0; 1151 ++npending;
372 1152
373 if (rese) 1153 if (!reqq_push (&res_queue, req))
374 {
375 rese->next = req;
376 rese = req;
377 }
378 else
379 {
380 rese = ress = req;
381
382 /* write a dummy byte to the pipe so fh becomes ready */ 1154 /* write a dummy byte to the pipe so fh becomes ready */
383 write (respipe [1], &respipe, 1); 1155 write (respipe [1], &respipe, 1);
384 }
385 1156
386 pthread_mutex_unlock (&reslock); 1157 self->req = 0;
1158 worker_clear (self);
1159
1160 UNLOCK (reslock);
387 } 1161 }
388 while (type != REQ_QUIT); 1162
1163quit:
1164 LOCK (wrklock);
1165 worker_free (self);
1166 UNLOCK (wrklock);
389 1167
390 return 0; 1168 return 0;
391} 1169}
392 1170
1171/*****************************************************************************/
1172
1173static void atfork_prepare (void)
1174{
1175 LOCK (wrklock);
1176 LOCK (reqlock);
1177 LOCK (reslock);
1178#if !HAVE_PREADWRITE
1179 LOCK (preadwritelock);
1180#endif
1181#if !HAVE_READDIR_R
1182 LOCK (readdirlock);
1183#endif
1184}
1185
1186static void atfork_parent (void)
1187{
1188#if !HAVE_READDIR_R
1189 UNLOCK (readdirlock);
1190#endif
1191#if !HAVE_PREADWRITE
1192 UNLOCK (preadwritelock);
1193#endif
1194 UNLOCK (reslock);
1195 UNLOCK (reqlock);
1196 UNLOCK (wrklock);
1197}
1198
1199static void atfork_child (void)
1200{
1201 aio_req prv;
1202
1203 while (prv = reqq_shift (&req_queue))
1204 req_free (prv);
1205
1206 while (prv = reqq_shift (&res_queue))
1207 req_free (prv);
1208
1209 while (wrk_first.next != &wrk_first)
1210 {
1211 worker *wrk = wrk_first.next;
1212
1213 if (wrk->req)
1214 req_free (wrk->req);
1215
1216 worker_clear (wrk);
1217 worker_free (wrk);
1218 }
1219
1220 started = 0;
1221 idle = 0;
1222 nreqs = 0;
1223 nready = 0;
1224 npending = 0;
1225
1226 close (respipe [0]);
1227 close (respipe [1]);
1228 create_pipe ();
1229
1230 atfork_parent ();
1231}
1232
1233#define dREQ \
1234 aio_req req; \
1235 int req_pri = next_pri; \
1236 next_pri = DEFAULT_PRI + PRI_BIAS; \
1237 \
1238 if (SvOK (callback) && !SvROK (callback)) \
1239 croak ("callback must be undef or of reference type"); \
1240 \
1241 Newz (0, req, 1, aio_cb); \
1242 if (!req) \
1243 croak ("out of memory during aio_req allocation"); \
1244 \
1245 req->callback = newSVsv (callback); \
1246 req->pri = req_pri
1247
1248#define REQ_SEND \
1249 req_send (req); \
1250 \
1251 if (GIMME_V != G_VOID) \
1252 XPUSHs (req_sv (req, AIO_REQ_KLASS));
1253
393MODULE = IO::AIO PACKAGE = IO::AIO 1254MODULE = IO::AIO PACKAGE = IO::AIO
394 1255
395PROTOTYPES: ENABLE 1256PROTOTYPES: ENABLE
396 1257
397BOOT: 1258BOOT:
398{ 1259{
399 if (pipe (respipe)) 1260 HV *stash = gv_stashpv ("IO::AIO", 1);
400 croak ("unable to initialize result pipe");
401 1261
402 if (fcntl (respipe [0], F_SETFL, O_NONBLOCK)) 1262 newCONSTSUB (stash, "EXDEV", newSViv (EXDEV));
403 croak ("cannot set result pipe to nonblocking mode"); 1263 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY));
1264 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY));
1265 newCONSTSUB (stash, "O_CREAT", newSViv (O_CREAT));
1266 newCONSTSUB (stash, "O_TRUNC", newSViv (O_TRUNC));
1267 newCONSTSUB (stash, "S_IFIFO", newSViv (S_IFIFO));
404 1268
405 if (fcntl (respipe [1], F_SETFL, O_NONBLOCK)) 1269 create_pipe ();
406 croak ("cannot set result pipe to nonblocking mode"); 1270 pthread_atfork (atfork_prepare, atfork_parent, atfork_child);
407} 1271}
408 1272
409void 1273void
410min_parallel(nthreads) 1274max_poll_reqs (int nreqs)
411 int nthreads
412 PROTOTYPE: $ 1275 PROTOTYPE: $
413 CODE: 1276 CODE:
414 while (nthreads > started) 1277 max_poll_reqs = nreqs;
415 start_thread ();
416 1278
417void 1279void
418max_parallel(nthreads) 1280max_poll_time (double nseconds)
419 int nthreads
420 PROTOTYPE: $ 1281 PROTOTYPE: $
421 CODE: 1282 CODE:
422{ 1283 max_poll_time = nseconds * AIO_TICKS;
423 int cur = started;
424 while (cur > nthreads)
425 {
426 end_thread ();
427 cur--;
428 }
429 1284
430 while (started > nthreads) 1285void
431 { 1286min_parallel (int nthreads)
432 poll_wait (); 1287 PROTOTYPE: $
433 poll_cb (); 1288
434 } 1289void
435} 1290max_parallel (int nthreads)
1291 PROTOTYPE: $
1292
1293void
1294max_idle (int nthreads)
1295 PROTOTYPE: $
1296 CODE:
1297 set_max_idle (nthreads);
436 1298
437int 1299int
438max_outstanding(nreqs) 1300max_outstanding (int maxreqs)
439 int nreqs 1301 PROTOTYPE: $
440 PROTOTYPE: $
441 CODE: 1302 CODE:
442 RETVAL = max_outstanding; 1303 RETVAL = max_outstanding;
443 max_outstanding = nreqs; 1304 max_outstanding = maxreqs;
1305 OUTPUT:
1306 RETVAL
444 1307
445void 1308void
446aio_open(pathname,flags,mode,callback=&PL_sv_undef) 1309aio_open (pathname,flags,mode,callback=&PL_sv_undef)
447 SV * pathname 1310 SV * pathname
448 int flags 1311 int flags
449 int mode 1312 int mode
450 SV * callback 1313 SV * callback
451 PROTOTYPE: $$$;$ 1314 PROTOTYPE: $$$;$
452 CODE: 1315 PPCODE:
453{ 1316{
454 aio_req req; 1317 dREQ;
455
456 Newz (0, req, 1, aio_cb);
457
458 if (!req)
459 croak ("out of memory during aio_req allocation");
460 1318
461 req->type = REQ_OPEN; 1319 req->type = REQ_OPEN;
462 req->data = newSVsv (pathname); 1320 req->sv1 = newSVsv (pathname);
463 req->dataptr = SvPV_nolen (req->data); 1321 req->ptr1 = SvPVbyte_nolen (pathname);
464 req->fd = flags; 1322 req->int1 = flags;
465 req->mode = mode; 1323 req->mode = mode;
466 req->callback = SvREFCNT_inc (callback);
467 1324
468 send_req (req); 1325 REQ_SEND;
469} 1326}
470 1327
471void 1328void
472aio_close(fh,callback=&PL_sv_undef) 1329aio_close (fh,callback=&PL_sv_undef)
473 SV * fh 1330 SV * fh
474 SV * callback 1331 SV * callback
475 PROTOTYPE: $;$ 1332 PROTOTYPE: $;$
476 ALIAS: 1333 ALIAS:
477 aio_close = REQ_CLOSE 1334 aio_close = REQ_CLOSE
478 aio_fsync = REQ_FSYNC 1335 aio_fsync = REQ_FSYNC
479 aio_fdatasync = REQ_FDATASYNC 1336 aio_fdatasync = REQ_FDATASYNC
480 CODE: 1337 PPCODE:
481{ 1338{
482 aio_req req; 1339 dREQ;
483
484 Newz (0, req, 1, aio_cb);
485
486 if (!req)
487 croak ("out of memory during aio_req allocation");
488 1340
489 req->type = ix; 1341 req->type = ix;
490 req->fh = newSVsv (fh); 1342 req->fh = newSVsv (fh);
491 req->fd = PerlIO_fileno (IoIFP (sv_2io (fh))); 1343 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh)));
492 req->callback = SvREFCNT_inc (callback);
493 1344
494 send_req (req); 1345 REQ_SEND (req);
495} 1346}
496 1347
497void 1348void
498aio_read(fh,offset,length,data,dataoffset,callback=&PL_sv_undef) 1349aio_read (fh,offset,length,data,dataoffset,callback=&PL_sv_undef)
499 SV * fh 1350 SV * fh
500 UV offset 1351 UV offset
501 IV length 1352 UV length
502 SV * data 1353 SV * data
503 IV dataoffset 1354 UV dataoffset
504 SV * callback 1355 SV * callback
505 ALIAS: 1356 ALIAS:
506 aio_read = REQ_READ 1357 aio_read = REQ_READ
507 aio_write = REQ_WRITE 1358 aio_write = REQ_WRITE
508 PROTOTYPE: $$$$$;$ 1359 PROTOTYPE: $$$$$;$
509 CODE: 1360 PPCODE:
510{ 1361{
511 aio_req req;
512 STRLEN svlen; 1362 STRLEN svlen;
513 char *svptr = SvPVbyte (data, svlen); 1363 char *svptr = SvPVbyte (data, svlen);
514 1364
515 SvUPGRADE (data, SVt_PV); 1365 SvUPGRADE (data, SVt_PV);
516 SvPOK_on (data); 1366 SvPOK_on (data);
534 } 1384 }
535 1385
536 if (length < 0) 1386 if (length < 0)
537 croak ("length must not be negative"); 1387 croak ("length must not be negative");
538 1388
539 Newz (0, req, 1, aio_cb); 1389 {
1390 dREQ;
540 1391
541 if (!req)
542 croak ("out of memory during aio_req allocation");
543
544 req->type = ix; 1392 req->type = ix;
545 req->fh = newSVsv (fh); 1393 req->fh = newSVsv (fh);
546 req->fd = PerlIO_fileno (ix == REQ_READ ? IoIFP (sv_2io (fh)) 1394 req->int1 = PerlIO_fileno (ix == REQ_READ ? IoIFP (sv_2io (fh))
547 : IoOFP (sv_2io (fh))); 1395 : IoOFP (sv_2io (fh)));
548 req->offset = offset; 1396 req->offs = offset;
549 req->length = length; 1397 req->size = length;
550 req->data = SvREFCNT_inc (data); 1398 req->sv1 = SvREFCNT_inc (data);
551 req->dataptr = (char *)svptr + dataoffset; 1399 req->ptr1 = (char *)svptr + dataoffset;
552 req->callback = SvREFCNT_inc (callback); 1400 req->stroffset = dataoffset;
553 1401
554 send_req (req); 1402 if (!SvREADONLY (data))
555} 1403 {
1404 SvREADONLY_on (data);
1405 req->flags |= FLAG_DATA_RO_OFF;
1406 }
556 1407
1408 REQ_SEND;
1409 }
1410}
1411
557void 1412void
1413aio_readlink (path,callback=&PL_sv_undef)
1414 SV * path
1415 SV * callback
1416 PROTOTYPE: $$;$
1417 PPCODE:
1418{
1419 SV *data;
1420 dREQ;
1421
1422 data = newSV (NAME_MAX);
1423 SvPOK_on (data);
1424
1425 req->type = REQ_READLINK;
1426 req->fh = newSVsv (path);
1427 req->ptr2 = SvPVbyte_nolen (path);
1428 req->sv1 = data;
1429 req->ptr1 = SvPVbyte_nolen (data);
1430
1431 REQ_SEND;
1432}
1433
1434void
1435aio_sendfile (out_fh,in_fh,in_offset,length,callback=&PL_sv_undef)
1436 SV * out_fh
1437 SV * in_fh
1438 UV in_offset
1439 UV length
1440 SV * callback
1441 PROTOTYPE: $$$$;$
1442 PPCODE:
1443{
1444 dREQ;
1445
1446 req->type = REQ_SENDFILE;
1447 req->fh = newSVsv (out_fh);
1448 req->int1 = PerlIO_fileno (IoIFP (sv_2io (out_fh)));
1449 req->sv2 = newSVsv (in_fh);
1450 req->int2 = PerlIO_fileno (IoIFP (sv_2io (in_fh)));
1451 req->offs = in_offset;
1452 req->size = length;
1453
1454 REQ_SEND;
1455}
1456
1457void
558aio_readahead(fh,offset,length,callback=&PL_sv_undef) 1458aio_readahead (fh,offset,length,callback=&PL_sv_undef)
559 SV * fh 1459 SV * fh
560 UV offset 1460 UV offset
561 IV length 1461 IV length
562 SV * callback 1462 SV * callback
563 PROTOTYPE: $$$;$ 1463 PROTOTYPE: $$$;$
564 CODE: 1464 PPCODE:
565{ 1465{
566 aio_req req; 1466 dREQ;
567
568 if (length < 0)
569 croak ("length must not be negative");
570
571 Newz (0, req, 1, aio_cb);
572
573 if (!req)
574 croak ("out of memory during aio_req allocation");
575 1467
576 req->type = REQ_READAHEAD; 1468 req->type = REQ_READAHEAD;
577 req->fh = newSVsv (fh); 1469 req->fh = newSVsv (fh);
578 req->fd = PerlIO_fileno (IoIFP (sv_2io (fh))); 1470 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh)));
579 req->offset = offset; 1471 req->offs = offset;
580 req->length = length; 1472 req->size = length;
581 req->callback = SvREFCNT_inc (callback);
582 1473
583 send_req (req); 1474 REQ_SEND;
584} 1475}
585 1476
586void 1477void
587aio_stat(fh_or_path,callback=&PL_sv_undef) 1478aio_stat (fh_or_path,callback=&PL_sv_undef)
588 SV * fh_or_path 1479 SV * fh_or_path
589 SV * callback 1480 SV * callback
590 ALIAS: 1481 ALIAS:
591 aio_stat = REQ_STAT 1482 aio_stat = REQ_STAT
592 aio_lstat = REQ_LSTAT 1483 aio_lstat = REQ_LSTAT
593 CODE: 1484 PPCODE:
594{ 1485{
595 aio_req req; 1486 dREQ;
596
597 Newz (0, req, 1, aio_cb);
598
599 if (!req)
600 croak ("out of memory during aio_req allocation");
601 1487
602 New (0, req->statdata, 1, Stat_t); 1488 New (0, req->statdata, 1, Stat_t);
603
604 if (!req->statdata) 1489 if (!req->statdata)
1490 {
1491 req_free (req);
605 croak ("out of memory during aio_req->statdata allocation"); 1492 croak ("out of memory during aio_req->statdata allocation");
1493 }
606 1494
607 if (SvPOK (fh_or_path)) 1495 if (SvPOK (fh_or_path))
608 { 1496 {
609 req->type = ix; 1497 req->type = ix;
610 req->data = newSVsv (fh_or_path); 1498 req->sv1 = newSVsv (fh_or_path);
611 req->dataptr = SvPV_nolen (req->data); 1499 req->ptr1 = SvPVbyte_nolen (fh_or_path);
612 } 1500 }
613 else 1501 else
614 { 1502 {
615 req->type = REQ_FSTAT; 1503 req->type = REQ_FSTAT;
616 req->fh = newSVsv (fh_or_path); 1504 req->fh = newSVsv (fh_or_path);
617 req->fd = PerlIO_fileno (IoIFP (sv_2io (fh_or_path))); 1505 req->int1 = PerlIO_fileno (IoIFP (sv_2io (fh_or_path)));
618 } 1506 }
619 1507
620 req->callback = SvREFCNT_inc (callback); 1508 REQ_SEND;
621
622 send_req (req);
623} 1509}
624 1510
625void 1511void
626aio_unlink(pathname,callback=&PL_sv_undef) 1512aio_unlink (pathname,callback=&PL_sv_undef)
627 SV * pathname 1513 SV * pathname
628 SV * callback 1514 SV * callback
1515 ALIAS:
1516 aio_unlink = REQ_UNLINK
1517 aio_rmdir = REQ_RMDIR
1518 aio_readdir = REQ_READDIR
1519 PPCODE:
1520{
1521 dREQ;
1522
1523 req->type = ix;
1524 req->sv1 = newSVsv (pathname);
1525 req->ptr1 = SvPVbyte_nolen (pathname);
1526
1527 REQ_SEND;
1528}
1529
1530void
1531aio_link (oldpath,newpath,callback=&PL_sv_undef)
1532 SV * oldpath
1533 SV * newpath
1534 SV * callback
1535 ALIAS:
1536 aio_link = REQ_LINK
1537 aio_symlink = REQ_SYMLINK
1538 aio_rename = REQ_RENAME
1539 PPCODE:
1540{
1541 dREQ;
1542
1543 req->type = ix;
1544 req->fh = newSVsv (oldpath);
1545 req->ptr2 = SvPVbyte_nolen (req->fh);
1546 req->sv1 = newSVsv (newpath);
1547 req->ptr1 = SvPVbyte_nolen (newpath);
1548
1549 REQ_SEND;
1550}
1551
1552void
1553aio_mknod (pathname,mode,dev,callback=&PL_sv_undef)
1554 SV * pathname
1555 SV * callback
1556 UV mode
1557 UV dev
1558 PPCODE:
1559{
1560 dREQ;
1561
1562 req->type = REQ_MKNOD;
1563 req->sv1 = newSVsv (pathname);
1564 req->ptr1 = SvPVbyte_nolen (pathname);
1565 req->mode = (mode_t)mode;
1566 req->offs = dev;
1567
1568 REQ_SEND;
1569}
1570
1571void
1572aio_busy (delay,callback=&PL_sv_undef)
1573 double delay
1574 SV * callback
1575 PPCODE:
1576{
1577 dREQ;
1578
1579 req->type = REQ_BUSY;
1580 req->int1 = delay < 0. ? 0 : delay;
1581 req->int2 = delay < 0. ? 0 : 1000. * (delay - req->int1);
1582
1583 REQ_SEND;
1584}
1585
1586void
1587aio_group (callback=&PL_sv_undef)
1588 SV * callback
1589 PROTOTYPE: ;$
1590 PPCODE:
1591{
1592 dREQ;
1593
1594 req->type = REQ_GROUP;
1595
1596 req_send (req);
1597 XPUSHs (req_sv (req, AIO_GRP_KLASS));
1598}
1599
1600void
1601aio_nop (callback=&PL_sv_undef)
1602 SV * callback
1603 PPCODE:
1604{
1605 dREQ;
1606
1607 req->type = REQ_NOP;
1608
1609 REQ_SEND;
1610}
1611
1612int
1613aioreq_pri (int pri = 0)
1614 PROTOTYPE: ;$
629 CODE: 1615 CODE:
630{ 1616 RETVAL = next_pri - PRI_BIAS;
631 aio_req req; 1617 if (items > 0)
632 1618 {
633 Newz (0, req, 1, aio_cb); 1619 if (pri < PRI_MIN) pri = PRI_MIN;
634 1620 if (pri > PRI_MAX) pri = PRI_MAX;
635 if (!req) 1621 next_pri = pri + PRI_BIAS;
636 croak ("out of memory during aio_req allocation"); 1622 }
637 1623 OUTPUT:
638 req->type = REQ_UNLINK; 1624 RETVAL
639 req->data = newSVsv (pathname);
640 req->dataptr = SvPV_nolen (req->data);
641 req->callback = SvREFCNT_inc (callback);
642
643 send_req (req);
644}
645 1625
646void 1626void
1627aioreq_nice (int nice = 0)
1628 CODE:
1629 nice = next_pri - nice;
1630 if (nice < PRI_MIN) nice = PRI_MIN;
1631 if (nice > PRI_MAX) nice = PRI_MAX;
1632 next_pri = nice + PRI_BIAS;
1633
1634void
647flush() 1635flush ()
648 PROTOTYPE: 1636 PROTOTYPE:
649 CODE: 1637 CODE:
650 while (nreqs) 1638 while (nreqs)
651 { 1639 {
652 poll_wait (); 1640 poll_wait ();
653 poll_cb (); 1641 poll_cb (0);
654 } 1642 }
655 1643
656void 1644void
657poll() 1645poll()
658 PROTOTYPE: 1646 PROTOTYPE:
659 CODE: 1647 CODE:
660 if (nreqs) 1648 if (nreqs)
661 { 1649 {
662 poll_wait (); 1650 poll_wait ();
663 poll_cb (); 1651 poll_cb (0);
664 } 1652 }
665 1653
666int 1654int
667poll_fileno() 1655poll_fileno()
668 PROTOTYPE: 1656 PROTOTYPE:
692 CODE: 1680 CODE:
693 RETVAL = nreqs; 1681 RETVAL = nreqs;
694 OUTPUT: 1682 OUTPUT:
695 RETVAL 1683 RETVAL
696 1684
1685int
1686nready()
1687 PROTOTYPE:
1688 CODE:
1689 RETVAL = get_nready ();
1690 OUTPUT:
1691 RETVAL
1692
1693int
1694npending()
1695 PROTOTYPE:
1696 CODE:
1697 RETVAL = get_npending ();
1698 OUTPUT:
1699 RETVAL
1700
1701int
1702nthreads()
1703 PROTOTYPE:
1704 CODE:
1705 if (WORDACCESS_UNSAFE) LOCK (wrklock);
1706 RETVAL = started;
1707 if (WORDACCESS_UNSAFE) UNLOCK (wrklock);
1708 OUTPUT:
1709 RETVAL
1710
1711PROTOTYPES: DISABLE
1712
1713MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1714
1715void
1716cancel (aio_req_ornot req)
1717 CODE:
1718 req_cancel (req);
1719
1720void
1721cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1722 CODE:
1723 SvREFCNT_dec (req->callback);
1724 req->callback = newSVsv (callback);
1725
1726MODULE = IO::AIO PACKAGE = IO::AIO::GRP
1727
1728void
1729add (aio_req grp, ...)
1730 PPCODE:
1731{
1732 int i;
1733 aio_req req;
1734
1735 if (grp->int1 == 2)
1736 croak ("cannot add requests to IO::AIO::GRP after the group finished");
1737
1738 for (i = 1; i < items; ++i )
1739 {
1740 if (GIMME_V != G_VOID)
1741 XPUSHs (sv_2mortal (newSVsv (ST (i))));
1742
1743 req = SvAIO_REQ (ST (i));
1744
1745 if (req)
1746 {
1747 ++grp->size;
1748 req->grp = grp;
1749
1750 req->grp_prev = 0;
1751 req->grp_next = grp->grp_first;
1752
1753 if (grp->grp_first)
1754 grp->grp_first->grp_prev = req;
1755
1756 grp->grp_first = req;
1757 }
1758 }
1759}
1760
1761void
1762cancel_subs (aio_req_ornot req)
1763 CODE:
1764 req_cancel_subs (req);
1765
1766void
1767result (aio_req grp, ...)
1768 CODE:
1769{
1770 int i;
1771 AV *av;
1772
1773 grp->errorno = errno;
1774
1775 av = newAV ();
1776
1777 for (i = 1; i < items; ++i )
1778 av_push (av, newSVsv (ST (i)));
1779
1780 SvREFCNT_dec (grp->sv1);
1781 grp->sv1 = (SV *)av;
1782}
1783
1784void
1785errno (aio_req grp, int errorno = errno)
1786 CODE:
1787 grp->errorno = errorno;
1788
1789void
1790limit (aio_req grp, int limit)
1791 CODE:
1792 grp->int2 = limit;
1793 aio_grp_feed (grp);
1794
1795void
1796feed (aio_req grp, SV *callback=&PL_sv_undef)
1797 CODE:
1798{
1799 SvREFCNT_dec (grp->sv2);
1800 grp->sv2 = newSVsv (callback);
1801
1802 if (grp->int2 <= 0)
1803 grp->int2 = 2;
1804
1805 aio_grp_feed (grp);
1806}
1807

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines