ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
(Generate patch)

Comparing IO-AIO/AIO.xs (file contents):
Revision 1.52 by root, Sun Oct 22 22:14:54 2006 UTC vs.
Revision 1.82 by root, Fri Oct 27 20:11:58 2006 UTC

1/* solaris */
2#define _POSIX_PTHREAD_SEMANTICS 1
3
4#if __linux && !defined(_GNU_SOURCE)
5# define _GNU_SOURCE
6#endif
7
8/* just in case */
1#define _REENTRANT 1 9#define _REENTRANT 1
10
2#include <errno.h> 11#include <errno.h>
3 12
4#include "EXTERN.h" 13#include "EXTERN.h"
5#include "perl.h" 14#include "perl.h"
6#include "XSUB.h" 15#include "XSUB.h"
39/* used for struct dirent, AIX doesn't provide it */ 48/* used for struct dirent, AIX doesn't provide it */
40#ifndef NAME_MAX 49#ifndef NAME_MAX
41# define NAME_MAX 4096 50# define NAME_MAX 4096
42#endif 51#endif
43 52
53#ifndef PTHREAD_STACK_MIN
54/* care for broken platforms, e.g. windows */
55# define PTHREAD_STACK_MIN 16384
56#endif
57
44#if __ia64 58#if __ia64
45# define STACKSIZE 65536 59# define STACKSIZE 65536
60#elif __i386 || __x86_64 /* 16k is unreasonably high :( */
61# define STACKSIZE PTHREAD_STACK_MIN
46#else 62#else
47# define STACKSIZE 8192 63# define STACKSIZE 16384
64#endif
65
66/* wether word reads are potentially non-atomic.
67 * this is conservatice, likely most arches this runs
68 * on have atomic word read/writes.
69 */
70#ifndef WORDREAD_UNSAFE
71# if __i386 || __x86_64
72# define WORDREAD_UNSAFE 0
73# else
74# define WORDREAD_UNSAFE 1
48#endif 75# endif
76#endif
77
78/* buffer size for various temporary buffers */
79#define AIO_BUFSIZE 65536
80
81#define dBUF \
82 char *aio_buf; \
83 LOCK (wrklock); \
84 self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \
85 UNLOCK (wrklock); \
86 if (!aio_buf) \
87 return -1;
49 88
50enum { 89enum {
51 REQ_QUIT, 90 REQ_QUIT,
52 REQ_OPEN, REQ_CLOSE, 91 REQ_OPEN, REQ_CLOSE,
53 REQ_READ, REQ_WRITE, REQ_READAHEAD, 92 REQ_READ, REQ_WRITE, REQ_READAHEAD,
54 REQ_SENDFILE, 93 REQ_SENDFILE,
55 REQ_STAT, REQ_LSTAT, REQ_FSTAT, 94 REQ_STAT, REQ_LSTAT, REQ_FSTAT,
56 REQ_FSYNC, REQ_FDATASYNC, 95 REQ_FSYNC, REQ_FDATASYNC,
57 REQ_UNLINK, REQ_RMDIR, REQ_RENAME, 96 REQ_UNLINK, REQ_RMDIR, REQ_RENAME,
58 REQ_READDIR, 97 REQ_MKNOD, REQ_READDIR,
59 REQ_LINK, REQ_SYMLINK, 98 REQ_LINK, REQ_SYMLINK,
60 REQ_SLEEP, 99 REQ_GROUP, REQ_NOP,
61 REQ_GROUP, 100 REQ_BUSY,
62}; 101};
63 102
64#define AIO_REQ_KLASS "IO::AIO::REQ" 103#define AIO_REQ_KLASS "IO::AIO::REQ"
65#define AIO_GRP_KLASS "IO::AIO::GRP" 104#define AIO_GRP_KLASS "IO::AIO::GRP"
66 105
67typedef struct aio_cb 106typedef struct aio_cb
68{ 107{
69 struct aio_cb *volatile next; 108 struct aio_cb *volatile next;
70
71 struct aio_cb *grp, *grp_prev, *grp_next, *grp_first;
72
73 SV *self; /* the perl counterpart of this request, if any */
74 109
75 SV *data, *callback; 110 SV *data, *callback;
76 SV *fh, *fh2; 111 SV *fh, *fh2;
77 void *dataptr, *data2ptr; 112 void *dataptr, *data2ptr;
78 Stat_t *statdata; 113 Stat_t *statdata;
79 off_t offset; 114 off_t offset;
80 size_t length; 115 size_t length;
81 ssize_t result; 116 ssize_t result;
82 117
118 STRLEN dataoffset;
83 int type; 119 int type;
84 int fd, fd2; 120 int fd, fd2;
85 int errorno; 121 int errorno;
86 STRLEN dataoffset;
87 mode_t mode; /* open */ 122 mode_t mode; /* open */
123
88 unsigned char cancelled; 124 unsigned char flags;
125 unsigned char pri;
126
127 SV *self; /* the perl counterpart of this request, if any */
128 struct aio_cb *grp, *grp_prev, *grp_next, *grp_first;
89} aio_cb; 129} aio_cb;
130
131enum {
132 FLAG_CANCELLED = 0x01,
133};
90 134
91typedef aio_cb *aio_req; 135typedef aio_cb *aio_req;
92typedef aio_cb *aio_req_ornot; 136typedef aio_cb *aio_req_ornot;
93 137
138enum {
139 PRI_MIN = -4,
140 PRI_MAX = 4,
141
142 DEFAULT_PRI = 0,
143 PRI_BIAS = -PRI_MIN,
144 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
145};
146
147static int next_pri = DEFAULT_PRI + PRI_BIAS;
148
94static int started, wanted; 149static unsigned int started, wanted;
95static volatile int nreqs; 150
96static int max_outstanding = 1<<30; 151#if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)
152# define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
153#else
154# define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
155#endif
156
157#define LOCK(mutex) pthread_mutex_lock (&(mutex))
158#define UNLOCK(mutex) pthread_mutex_unlock (&(mutex))
159
160/* worker threads management */
161static pthread_mutex_t wrklock = AIO_MUTEX_INIT;
162
163typedef struct worker {
164 /* locked by wrklock */
165 struct worker *prev, *next;
166
167 pthread_t tid;
168
169 /* locked by reslock, reqlock or wrklock */
170 aio_req req; /* currently processed request */
171 void *dbuf;
172 DIR *dirp;
173} worker;
174
175static worker wrk_first = { &wrk_first, &wrk_first, 0 };
176
177static void worker_clear (worker *wrk)
178{
179 if (wrk->dirp)
180 {
181 closedir (wrk->dirp);
182 wrk->dirp = 0;
183 }
184
185 if (wrk->dbuf)
186 {
187 free (wrk->dbuf);
188 wrk->dbuf = 0;
189 }
190}
191
192static void worker_free (worker *wrk)
193{
194 wrk->next->prev = wrk->prev;
195 wrk->prev->next = wrk->next;
196
197 free (wrk);
198}
199
200static volatile unsigned int nreqs, nready, npending;
201static volatile unsigned int max_outstanding = 0xffffffff;
97static int respipe [2]; 202static int respipe [2];
98 203
99static pthread_mutex_t reslock = PTHREAD_MUTEX_INITIALIZER; 204static pthread_mutex_t reslock = AIO_MUTEX_INIT;
100static pthread_mutex_t reqlock = PTHREAD_MUTEX_INITIALIZER; 205static pthread_mutex_t reqlock = AIO_MUTEX_INIT;
101static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER; 206static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER;
102 207
103static volatile aio_req reqs, reqe; /* queue start, queue end */ 208#if WORDREAD_UNSAFE
104static volatile aio_req ress, rese; /* queue start, queue end */
105 209
210static unsigned int get_nready ()
211{
212 unsigned int retval;
213
214 LOCK (reqlock);
215 retval = nready;
216 UNLOCK (reqlock);
217
218 return retval;
219}
220
221static unsigned int get_npending ()
222{
223 unsigned int retval;
224
225 LOCK (reslock);
226 retval = npending;
227 UNLOCK (reslock);
228
229 return retval;
230}
231
232#else
233
234# define get_nready() nready
235# define get_npending() npending
236
237#endif
238
239/*
240 * a somewhat faster data structure might be nice, but
241 * with 8 priorities this actually needs <20 insns
242 * per shift, the most expensive operation.
243 */
244typedef struct {
245 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
246 int size;
247} reqq;
248
249static reqq req_queue;
250static reqq res_queue;
251
252int reqq_push (reqq *q, aio_req req)
253{
254 int pri = req->pri;
255 req->next = 0;
256
257 if (q->qe[pri])
258 {
259 q->qe[pri]->next = req;
260 q->qe[pri] = req;
261 }
262 else
263 q->qe[pri] = q->qs[pri] = req;
264
265 return q->size++;
266}
267
268aio_req reqq_shift (reqq *q)
269{
270 int pri;
271
272 if (!q->size)
273 return 0;
274
275 --q->size;
276
277 for (pri = NUM_PRI; pri--; )
278 {
279 aio_req req = q->qs[pri];
280
281 if (req)
282 {
283 if (!(q->qs[pri] = req->next))
284 q->qe[pri] = 0;
285
286 return req;
287 }
288 }
289
290 abort ();
291}
292
293static int poll_cb (int max);
106static void req_invoke (aio_req req); 294static void req_invoke (aio_req req);
107static void req_free (aio_req req); 295static void req_free (aio_req req);
296static void req_cancel (aio_req req);
108 297
109/* must be called at most once */ 298/* must be called at most once */
110static SV *req_sv (aio_req req, const char *klass) 299static SV *req_sv (aio_req req, const char *klass)
111{ 300{
112 if (!req->self) 301 if (!req->self)
118 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1))); 307 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1)));
119} 308}
120 309
121static aio_req SvAIO_REQ (SV *sv) 310static aio_req SvAIO_REQ (SV *sv)
122{ 311{
312 MAGIC *mg;
313
123 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv)) 314 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv))
124 croak ("object of class " AIO_REQ_KLASS " expected"); 315 croak ("object of class " AIO_REQ_KLASS " expected");
125 316
126 MAGIC *mg = mg_find (SvRV (sv), PERL_MAGIC_ext); 317 mg = mg_find (SvRV (sv), PERL_MAGIC_ext);
127 318
128 return mg ? (aio_req)mg->mg_ptr : 0; 319 return mg ? (aio_req)mg->mg_ptr : 0;
129} 320}
130 321
131static void aio_grp_feed (aio_req grp) 322static void aio_grp_feed (aio_req grp)
132{ 323{
133 while (grp->length < grp->fd2) 324 while (grp->length < grp->fd2 && !(grp->flags & FLAG_CANCELLED))
134 { 325 {
135 int old_len = grp->length; 326 int old_len = grp->length;
136 327
137 if (grp->fh2 && SvOK (grp->fh2)) 328 if (grp->fh2 && SvOK (grp->fh2))
138 { 329 {
141 ENTER; 332 ENTER;
142 SAVETMPS; 333 SAVETMPS;
143 PUSHMARK (SP); 334 PUSHMARK (SP);
144 XPUSHs (req_sv (grp, AIO_GRP_KLASS)); 335 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
145 PUTBACK; 336 PUTBACK;
146 call_sv (grp->fh2, G_VOID | G_EVAL); 337 call_sv (grp->fh2, G_VOID | G_EVAL | G_KEEPERR);
147 SPAGAIN; 338 SPAGAIN;
148 FREETMPS; 339 FREETMPS;
149 LEAVE; 340 LEAVE;
150 } 341 }
151 342
172 req_invoke (grp); 363 req_invoke (grp);
173 req_free (grp); 364 req_free (grp);
174 } 365 }
175} 366}
176 367
177static void poll_wait ()
178{
179 if (nreqs && !ress)
180 {
181 fd_set rfd;
182 FD_ZERO(&rfd);
183 FD_SET(respipe [0], &rfd);
184
185 select (respipe [0] + 1, &rfd, 0, 0, 0);
186 }
187}
188
189static void req_invoke (aio_req req) 368static void req_invoke (aio_req req)
190{ 369{
191 dSP; 370 dSP;
192 int errorno = errno;
193 371
194 if (req->cancelled || !SvOK (req->callback)) 372 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
195 return; 373 {
196
197 errno = req->errorno;
198
199 ENTER; 374 ENTER;
200 SAVETMPS; 375 SAVETMPS;
201 PUSHMARK (SP); 376 PUSHMARK (SP);
202 EXTEND (SP, 1); 377 EXTEND (SP, 1);
203 378
204 switch (req->type) 379 switch (req->type)
205 {
206 case REQ_READDIR:
207 { 380 {
208 SV *rv = &PL_sv_undef; 381 case REQ_READDIR:
209
210 if (req->result >= 0)
211 { 382 {
212 char *buf = req->data2ptr; 383 SV *rv = &PL_sv_undef;
213 AV *av = newAV ();
214 384
215 while (req->result) 385 if (req->result >= 0)
216 { 386 {
387 int i;
388 char *buf = req->data2ptr;
389 AV *av = newAV ();
390
391 av_extend (av, req->result - 1);
392
393 for (i = 0; i < req->result; ++i)
394 {
217 SV *sv = newSVpv (buf, 0); 395 SV *sv = newSVpv (buf, 0);
218 396
219 av_push (av, sv); 397 av_store (av, i, sv);
220 buf += SvCUR (sv) + 1; 398 buf += SvCUR (sv) + 1;
221 req->result--; 399 }
400
401 rv = sv_2mortal (newRV_noinc ((SV *)av));
222 } 402 }
223 403
224 rv = sv_2mortal (newRV_noinc ((SV *)av)); 404 PUSHs (rv);
225 } 405 }
406 break;
226 407
227 PUSHs (rv); 408 case REQ_OPEN:
409 {
410 /* convert fd to fh */
411 SV *fh;
412
413 PUSHs (sv_2mortal (newSViv (req->result)));
414 PUTBACK;
415 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
416 SPAGAIN;
417
418 fh = SvREFCNT_inc (POPs);
419
420 PUSHMARK (SP);
421 XPUSHs (sv_2mortal (fh));
422 }
423 break;
424
425 case REQ_GROUP:
426 req->fd = 2; /* mark group as finished */
427
428 if (req->data)
429 {
430 int i;
431 AV *av = (AV *)req->data;
432
433 EXTEND (SP, AvFILL (av) + 1);
434 for (i = 0; i <= AvFILL (av); ++i)
435 PUSHs (*av_fetch (av, i, 0));
436 }
437 break;
438
439 case REQ_NOP:
440 case REQ_BUSY:
441 break;
442
443 default:
444 PUSHs (sv_2mortal (newSViv (req->result)));
445 break;
228 } 446 }
229 break;
230 447
231 case REQ_OPEN: 448 errno = req->errorno;
232 {
233 /* convert fd to fh */
234 SV *fh;
235 449
236 PUSHs (sv_2mortal (newSViv (req->result)));
237 PUTBACK; 450 PUTBACK;
238 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
239 SPAGAIN;
240
241 fh = SvREFCNT_inc (POPs);
242
243 PUSHMARK (SP);
244 XPUSHs (sv_2mortal (fh));
245 }
246 break;
247
248 case REQ_GROUP:
249 req->fd = 2; /* mark group as finished */
250
251 if (req->data)
252 {
253 int i;
254 AV *av = (AV *)req->data;
255
256 EXTEND (SP, AvFILL (av) + 1);
257 for (i = 0; i <= AvFILL (av); ++i)
258 PUSHs (*av_fetch (av, i, 0));
259 }
260 break;
261
262 case REQ_SLEEP:
263 break;
264
265 default:
266 PUSHs (sv_2mortal (newSViv (req->result)));
267 break;
268 }
269
270
271 PUTBACK;
272 call_sv (req->callback, G_VOID | G_EVAL); 451 call_sv (req->callback, G_VOID | G_EVAL);
273 SPAGAIN; 452 SPAGAIN;
274 453
275 FREETMPS; 454 FREETMPS;
276 LEAVE; 455 LEAVE;
277
278 errno = errorno;
279
280 if (SvTRUE (ERRSV))
281 { 456 }
282 req_free (req);
283 croak (0);
284 }
285}
286 457
287static void req_free (aio_req req)
288{
289 if (req->grp) 458 if (req->grp)
290 { 459 {
291 aio_req grp = req->grp; 460 aio_req grp = req->grp;
292 461
293 /* unlink request */ 462 /* unlink request */
298 grp->grp_first = req->grp_next; 467 grp->grp_first = req->grp_next;
299 468
300 aio_grp_dec (grp); 469 aio_grp_dec (grp);
301 } 470 }
302 471
472 if (SvTRUE (ERRSV))
473 {
474 req_free (req);
475 croak (0);
476 }
477}
478
479static void req_free (aio_req req)
480{
303 if (req->self) 481 if (req->self)
304 { 482 {
305 sv_unmagic (req->self, PERL_MAGIC_ext); 483 sv_unmagic (req->self, PERL_MAGIC_ext);
306 SvREFCNT_dec (req->self); 484 SvREFCNT_dec (req->self);
307 } 485 }
310 SvREFCNT_dec (req->fh); 488 SvREFCNT_dec (req->fh);
311 SvREFCNT_dec (req->fh2); 489 SvREFCNT_dec (req->fh2);
312 SvREFCNT_dec (req->callback); 490 SvREFCNT_dec (req->callback);
313 Safefree (req->statdata); 491 Safefree (req->statdata);
314 492
315 if (req->type == REQ_READDIR && req->result >= 0) 493 if (req->type == REQ_READDIR)
316 free (req->data2ptr); 494 free (req->data2ptr);
317 495
318 Safefree (req); 496 Safefree (req);
319} 497}
320 498
499static void req_cancel_subs (aio_req grp)
500{
501 aio_req sub;
502
503 if (grp->type != REQ_GROUP)
504 return;
505
506 SvREFCNT_dec (grp->fh2);
507 grp->fh2 = 0;
508
509 for (sub = grp->grp_first; sub; sub = sub->grp_next)
510 req_cancel (sub);
511}
512
321static void req_cancel (aio_req req) 513static void req_cancel (aio_req req)
322{ 514{
323 req->cancelled = 1; 515 req->flags |= FLAG_CANCELLED;
324 516
325 if (req->type == REQ_GROUP) 517 req_cancel_subs (req);
326 { 518}
327 aio_req sub;
328 519
329 for (sub = req->grp_first; sub; sub = sub->grp_next) 520static void *aio_proc(void *arg);
330 req_cancel (sub); 521
522static void start_thread (void)
523{
524 sigset_t fullsigset, oldsigset;
525 pthread_attr_t attr;
526
527 worker *wrk = calloc (1, sizeof (worker));
528
529 if (!wrk)
530 croak ("unable to allocate worker thread data");
531
532 pthread_attr_init (&attr);
533 pthread_attr_setstacksize (&attr, STACKSIZE);
534 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
535#ifdef PTHREAD_SCOPE_PROCESS
536 pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS);
537#endif
538
539 sigfillset (&fullsigset);
540
541 LOCK (wrklock);
542 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
543
544 if (pthread_create (&wrk->tid, &attr, aio_proc, (void *)wrk) == 0)
331 } 545 {
332} 546 wrk->prev = &wrk_first;
547 wrk->next = wrk_first.next;
548 wrk_first.next->prev = wrk;
549 wrk_first.next = wrk;
550 ++started;
551 }
552 else
553 free (wrk);
333 554
555 sigprocmask (SIG_SETMASK, &oldsigset, 0);
556 UNLOCK (wrklock);
557}
558
559static void maybe_start_thread ()
560{
561#if 0
562 static struct timeval last;
563 struct timeval diff, now;
564#endif
565
566 if (started >= wanted)
567 return;
568
569 if (nready <= nreqs - get_nready () - get_npending ())
570 return;
571
572#if 0
573 gettimeofday (&now, 0);
574
575 diff.tv_sec = now.tv_sec - last.tv_sec;
576 diff.tv_usec = now.tv_usec - last.tv_usec;
577
578 if (diff.tv_usec < 0)
579 {
580 --diff.tv_sec;
581 diff.tv_usec += 1000000;
582 }
583
584 if (!diff.tv_sec && diff.tv_usec < 10000)
585 return;
586
587 last = now;
588#endif
589
590 start_thread ();
591}
592
593static void req_send (aio_req req)
594{
595 ++nreqs;
596
597 LOCK (reqlock);
598 ++nready;
599 reqq_push (&req_queue, req);
600 pthread_cond_signal (&reqwait);
601 UNLOCK (reqlock);
602
603 maybe_start_thread ();
604}
605
606static void end_thread (void)
607{
608 aio_req req;
609
610 Newz (0, req, 1, aio_cb);
611
612 req->type = REQ_QUIT;
613 req->pri = PRI_MAX + PRI_BIAS;
614
615 req_send (req);
616
617 LOCK (wrklock);
618 --started;
619 UNLOCK (wrklock);
620}
621
622static void min_parallel (int nthreads)
623{
624 if (wanted < nthreads)
625 wanted = nthreads;
626}
627
628static void max_parallel (int nthreads)
629{
630 if (wanted > nthreads)
631 wanted = nthreads;
632
633 while (started > wanted)
634 end_thread ();
635}
636
637static void poll_wait ()
638{
639 fd_set rfd;
640
641 while (nreqs)
642 {
643 int size;
644 if (WORDREAD_UNSAFE) LOCK (reslock);
645 size = res_queue.size;
646 if (WORDREAD_UNSAFE) UNLOCK (reslock);
647
648 if (size)
649 return;
650
651 maybe_start_thread ();
652
653 FD_ZERO(&rfd);
654 FD_SET(respipe [0], &rfd);
655
656 select (respipe [0] + 1, &rfd, 0, 0, 0);
657 }
658}
659
334static int poll_cb () 660static int poll_cb (int max)
335{ 661{
336 dSP; 662 dSP;
337 int count = 0; 663 int count = 0;
338 int do_croak = 0; 664 int do_croak = 0;
339 aio_req req; 665 aio_req req;
340 666
341 for (;;) 667 for (;;)
342 { 668 {
343 pthread_mutex_lock (&reslock); 669 while (max <= 0 || count < max)
344 req = ress;
345
346 if (req)
347 { 670 {
348 ress = req->next; 671 maybe_start_thread ();
349 672
673 LOCK (reslock);
674 req = reqq_shift (&res_queue);
675
350 if (!ress) 676 if (req)
351 { 677 {
678 --npending;
679
680 if (!res_queue.size)
681 {
352 /* read any signals sent by the worker threads */ 682 /* read any signals sent by the worker threads */
353 char buf [32]; 683 char buf [32];
354 while (read (respipe [0], buf, 32) == 32) 684 while (read (respipe [0], buf, 32) == 32)
685 ;
355 ; 686 }
356
357 rese = 0;
358 } 687 }
688
689 UNLOCK (reslock);
690
691 if (!req)
692 break;
693
694 --nreqs;
695
696 if (req->type == REQ_GROUP && req->length)
697 {
698 req->fd = 1; /* mark request as delayed */
699 continue;
700 }
701 else
702 {
703 if (req->type == REQ_READ)
704 SvCUR_set (req->data, req->dataoffset + (req->result > 0 ? req->result : 0));
705
706 if (req->data2ptr && (req->type == REQ_READ || req->type == REQ_WRITE))
707 SvREADONLY_off (req->data);
708
709 if (req->statdata)
710 {
711 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
712 PL_laststatval = req->result;
713 PL_statcache = *(req->statdata);
714 }
715
716 req_invoke (req);
717
718 count++;
719 }
720
721 req_free (req);
359 } 722 }
360 723
361 pthread_mutex_unlock (&reslock); 724 if (nreqs <= max_outstanding)
362
363 if (!req)
364 break; 725 break;
365 726
366 --nreqs; 727 poll_wait ();
367 728
368 if (req->type == REQ_QUIT) 729 max = 0;
369 started--;
370 else if (req->type == REQ_GROUP && req->length)
371 {
372 req->fd = 1; /* mark request as delayed */
373 continue;
374 }
375 else
376 {
377 if (req->type == REQ_READ)
378 SvCUR_set (req->data, req->dataoffset + (req->result > 0 ? req->result : 0));
379
380 if (req->data2ptr && (req->type == REQ_READ || req->type == REQ_WRITE))
381 SvREADONLY_off (req->data);
382
383 if (req->statdata)
384 {
385 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
386 PL_laststatval = req->result;
387 PL_statcache = *(req->statdata);
388 }
389
390 req_invoke (req);
391
392 count++;
393 }
394
395 req_free (req);
396 } 730 }
397 731
398 return count; 732 return count;
399}
400
401static void *aio_proc(void *arg);
402
403static void start_thread (void)
404{
405 sigset_t fullsigset, oldsigset;
406 pthread_t tid;
407 pthread_attr_t attr;
408
409 pthread_attr_init (&attr);
410 pthread_attr_setstacksize (&attr, STACKSIZE);
411 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
412
413 sigfillset (&fullsigset);
414 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
415
416 if (pthread_create (&tid, &attr, aio_proc, 0) == 0)
417 started++;
418
419 sigprocmask (SIG_SETMASK, &oldsigset, 0);
420}
421
422static void req_send (aio_req req)
423{
424 while (started < wanted && nreqs >= started)
425 start_thread ();
426
427 ++nreqs;
428
429 pthread_mutex_lock (&reqlock);
430
431 req->next = 0;
432
433 if (reqe)
434 {
435 reqe->next = req;
436 reqe = req;
437 }
438 else
439 reqe = reqs = req;
440
441 pthread_cond_signal (&reqwait);
442 pthread_mutex_unlock (&reqlock);
443
444 if (nreqs > max_outstanding)
445 for (;;)
446 {
447 poll_cb ();
448
449 if (nreqs <= max_outstanding)
450 break;
451
452 poll_wait ();
453 }
454}
455
456static void end_thread (void)
457{
458 aio_req req;
459 Newz (0, req, 1, aio_cb);
460 req->type = REQ_QUIT;
461
462 req_send (req);
463}
464
465static void min_parallel (int nthreads)
466{
467 if (wanted < nthreads)
468 wanted = nthreads;
469}
470
471static void max_parallel (int nthreads)
472{
473 int cur = started;
474
475 if (wanted > nthreads)
476 wanted = nthreads;
477
478 while (cur > wanted)
479 {
480 end_thread ();
481 cur--;
482 }
483
484 while (started > wanted)
485 {
486 poll_wait ();
487 poll_cb ();
488 }
489} 733}
490 734
491static void create_pipe () 735static void create_pipe ()
492{ 736{
493 if (pipe (respipe)) 737 if (pipe (respipe))
517static ssize_t pread (int fd, void *buf, size_t count, off_t offset) 761static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
518{ 762{
519 ssize_t res; 763 ssize_t res;
520 off_t ooffset; 764 off_t ooffset;
521 765
522 pthread_mutex_lock (&preadwritelock); 766 LOCK (preadwritelock);
523 ooffset = lseek (fd, 0, SEEK_CUR); 767 ooffset = lseek (fd, 0, SEEK_CUR);
524 lseek (fd, offset, SEEK_SET); 768 lseek (fd, offset, SEEK_SET);
525 res = read (fd, buf, count); 769 res = read (fd, buf, count);
526 lseek (fd, ooffset, SEEK_SET); 770 lseek (fd, ooffset, SEEK_SET);
527 pthread_mutex_unlock (&preadwritelock); 771 UNLOCK (preadwritelock);
528 772
529 return res; 773 return res;
530} 774}
531 775
532static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset) 776static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
533{ 777{
534 ssize_t res; 778 ssize_t res;
535 off_t ooffset; 779 off_t ooffset;
536 780
537 pthread_mutex_lock (&preadwritelock); 781 LOCK (preadwritelock);
538 ooffset = lseek (fd, 0, SEEK_CUR); 782 ooffset = lseek (fd, 0, SEEK_CUR);
539 lseek (fd, offset, SEEK_SET); 783 lseek (fd, offset, SEEK_SET);
540 res = write (fd, buf, count); 784 res = write (fd, buf, count);
541 lseek (fd, offset, SEEK_SET); 785 lseek (fd, offset, SEEK_SET);
542 pthread_mutex_unlock (&preadwritelock); 786 UNLOCK (preadwritelock);
543 787
544 return res; 788 return res;
545} 789}
546#endif 790#endif
547 791
548#if !HAVE_FDATASYNC 792#if !HAVE_FDATASYNC
549# define fdatasync fsync 793# define fdatasync fsync
550#endif 794#endif
551 795
552#if !HAVE_READAHEAD 796#if !HAVE_READAHEAD
553# define readahead aio_readahead 797# define readahead(fd,offset,count) aio_readahead (fd, offset, count, self)
554 798
555static ssize_t readahead (int fd, off_t offset, size_t count) 799static ssize_t aio_readahead (int fd, off_t offset, size_t count, worker *self)
556{ 800{
557 char readahead_buf[4096]; 801 dBUF;
558 802
559 while (count > 0) 803 while (count > 0)
560 { 804 {
561 size_t len = count < sizeof (readahead_buf) ? count : sizeof (readahead_buf); 805 size_t len = count < AIO_BUFSIZE ? count : AIO_BUFSIZE;
562 806
563 pread (fd, readahead_buf, len, offset); 807 pread (fd, aio_buf, len, offset);
564 offset += len; 808 offset += len;
565 count -= len; 809 count -= len;
566 } 810 }
567 811
568 errno = 0; 812 errno = 0;
569} 813}
814
570#endif 815#endif
571 816
572#if !HAVE_READDIR_R 817#if !HAVE_READDIR_R
573# define readdir_r aio_readdir_r 818# define readdir_r aio_readdir_r
574 819
577static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res) 822static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res)
578{ 823{
579 struct dirent *e; 824 struct dirent *e;
580 int errorno; 825 int errorno;
581 826
582 pthread_mutex_lock (&readdirlock); 827 LOCK (readdirlock);
583 828
584 e = readdir (dirp); 829 e = readdir (dirp);
585 errorno = errno; 830 errorno = errno;
586 831
587 if (e) 832 if (e)
590 strcpy (ent->d_name, e->d_name); 835 strcpy (ent->d_name, e->d_name);
591 } 836 }
592 else 837 else
593 *res = 0; 838 *res = 0;
594 839
595 pthread_mutex_unlock (&readdirlock); 840 UNLOCK (readdirlock);
596 841
597 errno = errorno; 842 errno = errorno;
598 return e ? 0 : -1; 843 return e ? 0 : -1;
599} 844}
600#endif 845#endif
601 846
602/* sendfile always needs emulation */ 847/* sendfile always needs emulation */
603static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count) 848static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self)
604{ 849{
605 ssize_t res; 850 ssize_t res;
606 851
607 if (!count) 852 if (!count)
608 return 0; 853 return 0;
619 { 864 {
620 off_t sbytes; 865 off_t sbytes;
621 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0); 866 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0);
622 867
623 if (res < 0 && sbytes) 868 if (res < 0 && sbytes)
624 /* maybe only on EAGAIN only: as usual, the manpage leaves you guessing */ 869 /* maybe only on EAGAIN: as usual, the manpage leaves you guessing */
625 res = sbytes; 870 res = sbytes;
626 } 871 }
627 872
628# elif __hpux 873# elif __hpux
629 res = sendfile (ofd, ifd, offset, count, 0, 0); 874 res = sendfile (ofd, ifd, offset, count, 0, 0);
657#endif 902#endif
658 ) 903 )
659 ) 904 )
660 { 905 {
661 /* emulate sendfile. this is a major pain in the ass */ 906 /* emulate sendfile. this is a major pain in the ass */
662 char buf[4096]; 907 dBUF;
908
663 res = 0; 909 res = 0;
664 910
665 while (count) 911 while (count)
666 { 912 {
667 ssize_t cnt; 913 ssize_t cnt;
668 914
669 cnt = pread (ifd, buf, count > 4096 ? 4096 : count, offset); 915 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
670 916
671 if (cnt <= 0) 917 if (cnt <= 0)
672 { 918 {
673 if (cnt && !res) res = -1; 919 if (cnt && !res) res = -1;
674 break; 920 break;
675 } 921 }
676 922
677 cnt = write (ofd, buf, cnt); 923 cnt = write (ofd, aio_buf, cnt);
678 924
679 if (cnt <= 0) 925 if (cnt <= 0)
680 { 926 {
681 if (cnt && !res) res = -1; 927 if (cnt && !res) res = -1;
682 break; 928 break;
690 936
691 return res; 937 return res;
692} 938}
693 939
694/* read a full directory */ 940/* read a full directory */
695static int scandir_ (const char *path, void **namesp) 941static void scandir_ (aio_req req, worker *self)
696{ 942{
697 DIR *dirp = opendir (path); 943 DIR *dirp;
698 union 944 union
699 { 945 {
700 struct dirent d; 946 struct dirent d;
701 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1]; 947 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1];
702 } u; 948 } *u;
703 struct dirent *entp; 949 struct dirent *entp;
704 char *name, *names; 950 char *name, *names;
705 int memlen = 4096; 951 int memlen = 4096;
706 int memofs = 0; 952 int memofs = 0;
707 int res = 0; 953 int res = 0;
708 int errorno; 954 int errorno;
709 955
710 if (!dirp) 956 LOCK (wrklock);
711 return -1; 957 self->dirp = dirp = opendir (req->dataptr);
712 958 self->dbuf = u = malloc (sizeof (*u));
713 names = malloc (memlen); 959 req->data2ptr = names = malloc (memlen);
960 UNLOCK (wrklock);
961
962 if (dirp && u && names)
963 for (;;)
964 {
965 errno = 0;
966 readdir_r (dirp, &u->d, &entp);
967
968 if (!entp)
969 break;
970
971 name = entp->d_name;
972
973 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
974 {
975 int len = strlen (name) + 1;
976
977 res++;
978
979 while (memofs + len > memlen)
980 {
981 memlen *= 2;
982 LOCK (wrklock);
983 req->data2ptr = names = realloc (names, memlen);
984 UNLOCK (wrklock);
985
986 if (!names)
987 break;
988 }
989
990 memcpy (names + memofs, name, len);
991 memofs += len;
992 }
993 }
994
995 if (errno)
996 res = -1;
997
998 req->result = res;
999}
1000
1001/*****************************************************************************/
1002
1003static void *aio_proc (void *thr_arg)
1004{
1005 aio_req req;
1006 worker *self = (worker *)thr_arg;
714 1007
715 for (;;) 1008 for (;;)
716 { 1009 {
717 errno = 0, readdir_r (dirp, &u.d, &entp); 1010 LOCK (reqlock);
718
719 if (!entp)
720 break;
721
722 name = entp->d_name;
723
724 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
725 {
726 int len = strlen (name) + 1;
727
728 res++;
729
730 while (memofs + len > memlen)
731 {
732 memlen *= 2;
733 names = realloc (names, memlen);
734 if (!names)
735 break;
736 }
737
738 memcpy (names + memofs, name, len);
739 memofs += len;
740 }
741 }
742
743 errorno = errno;
744 closedir (dirp);
745
746 if (errorno)
747 {
748 free (names);
749 errno = errorno;
750 res = -1;
751 }
752
753 *namesp = (void *)names;
754 return res;
755}
756
757/*****************************************************************************/
758
759static void *aio_proc (void *thr_arg)
760{
761 aio_req req;
762 int type;
763
764 do
765 {
766 pthread_mutex_lock (&reqlock);
767 1011
768 for (;;) 1012 for (;;)
769 { 1013 {
770 req = reqs; 1014 self->req = req = reqq_shift (&req_queue);
771
772 if (reqs)
773 {
774 reqs = reqs->next;
775 if (!reqs) reqe = 0;
776 }
777 1015
778 if (req) 1016 if (req)
779 break; 1017 break;
780 1018
781 pthread_cond_wait (&reqwait, &reqlock); 1019 pthread_cond_wait (&reqwait, &reqlock);
782 } 1020 }
783 1021
784 pthread_mutex_unlock (&reqlock); 1022 --nready;
1023
1024 UNLOCK (reqlock);
785 1025
786 errno = 0; /* strictly unnecessary */ 1026 errno = 0; /* strictly unnecessary */
787 1027
788 if (!req->cancelled) 1028 if (!(req->flags & FLAG_CANCELLED))
789 switch (type = req->type) /* remember type for QUIT check */ 1029 switch (req->type)
790 { 1030 {
791 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break; 1031 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break;
792 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break; 1032 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break;
793 1033
794 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break; 1034 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break;
795 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length); break; 1035 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length, self); break;
796 1036
797 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break; 1037 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break;
798 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break; 1038 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break;
799 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break; 1039 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break;
800 1040
803 case REQ_UNLINK: req->result = unlink (req->dataptr); break; 1043 case REQ_UNLINK: req->result = unlink (req->dataptr); break;
804 case REQ_RMDIR: req->result = rmdir (req->dataptr); break; 1044 case REQ_RMDIR: req->result = rmdir (req->dataptr); break;
805 case REQ_RENAME: req->result = rename (req->data2ptr, req->dataptr); break; 1045 case REQ_RENAME: req->result = rename (req->data2ptr, req->dataptr); break;
806 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break; 1046 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break;
807 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break; 1047 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break;
1048 case REQ_MKNOD: req->result = mknod (req->data2ptr, req->mode, (dev_t)req->offset); break;
808 1049
809 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break; 1050 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break;
810 case REQ_FSYNC: req->result = fsync (req->fd); break; 1051 case REQ_FSYNC: req->result = fsync (req->fd); break;
811 case REQ_READDIR: req->result = scandir_ (req->dataptr, &req->data2ptr); break; 1052 case REQ_READDIR: scandir_ (req, self); break;
812 1053
813 case REQ_SLEEP: 1054 case REQ_BUSY:
814 { 1055 {
815 struct timeval tv; 1056 struct timeval tv;
816 1057
817 tv.tv_sec = req->fd; 1058 tv.tv_sec = req->fd;
818 tv.tv_usec = req->fd2; 1059 tv.tv_usec = req->fd2;
819 1060
820 req->result = select (0, 0, 0, 0, &tv); 1061 req->result = select (0, 0, 0, 0, &tv);
821 } 1062 }
822 1063
1064 case REQ_GROUP:
1065 case REQ_NOP:
1066 break;
1067
823 case REQ_QUIT: 1068 case REQ_QUIT:
1069 LOCK (wrklock);
1070 worker_free (self);
1071 --started;
1072 UNLOCK (wrklock);
824 break; 1073 return 0;
825 1074
826 default: 1075 default:
827 req->result = ENOSYS; 1076 req->result = ENOSYS;
828 break; 1077 break;
829 } 1078 }
830 1079
831 req->errorno = errno; 1080 req->errorno = errno;
832 1081
833 pthread_mutex_lock (&reslock); 1082 LOCK (reslock);
834 1083
835 req->next = 0; 1084 ++npending;
836 1085
837 if (rese) 1086 if (!reqq_push (&res_queue, req))
838 {
839 rese->next = req;
840 rese = req;
841 }
842 else
843 {
844 rese = ress = req;
845
846 /* write a dummy byte to the pipe so fh becomes ready */ 1087 /* write a dummy byte to the pipe so fh becomes ready */
847 write (respipe [1], &respipe, 1); 1088 write (respipe [1], &respipe, 1);
848 }
849 1089
850 pthread_mutex_unlock (&reslock); 1090 self->req = 0;
851 } 1091 worker_clear (self);
852 while (type != REQ_QUIT);
853 1092
854 return 0; 1093 UNLOCK (reslock);
1094 }
855} 1095}
856 1096
857/*****************************************************************************/ 1097/*****************************************************************************/
858 1098
859static void atfork_prepare (void) 1099static void atfork_prepare (void)
860{ 1100{
861 pthread_mutex_lock (&reqlock); 1101 LOCK (wrklock);
862 pthread_mutex_lock (&reslock); 1102 LOCK (reqlock);
1103 LOCK (reslock);
863#if !HAVE_PREADWRITE 1104#if !HAVE_PREADWRITE
864 pthread_mutex_lock (&preadwritelock); 1105 LOCK (preadwritelock);
865#endif 1106#endif
866#if !HAVE_READDIR_R 1107#if !HAVE_READDIR_R
867 pthread_mutex_lock (&readdirlock); 1108 LOCK (readdirlock);
868#endif 1109#endif
869} 1110}
870 1111
871static void atfork_parent (void) 1112static void atfork_parent (void)
872{ 1113{
873#if !HAVE_READDIR_R 1114#if !HAVE_READDIR_R
874 pthread_mutex_unlock (&readdirlock); 1115 UNLOCK (readdirlock);
875#endif 1116#endif
876#if !HAVE_PREADWRITE 1117#if !HAVE_PREADWRITE
877 pthread_mutex_unlock (&preadwritelock); 1118 UNLOCK (preadwritelock);
878#endif 1119#endif
879 pthread_mutex_unlock (&reslock); 1120 UNLOCK (reslock);
880 pthread_mutex_unlock (&reqlock); 1121 UNLOCK (reqlock);
1122 UNLOCK (wrklock);
881} 1123}
882 1124
883static void atfork_child (void) 1125static void atfork_child (void)
884{ 1126{
885 aio_req prv; 1127 aio_req prv;
886 1128
1129 while (prv = reqq_shift (&req_queue))
1130 req_free (prv);
1131
1132 while (prv = reqq_shift (&res_queue))
1133 req_free (prv);
1134
1135 while (wrk_first.next != &wrk_first)
1136 {
1137 worker *wrk = wrk_first.next;
1138
1139 if (wrk->req)
1140 req_free (wrk->req);
1141
1142 worker_clear (wrk);
1143 worker_free (wrk);
1144 }
1145
887 started = 0; 1146 started = 0;
888 1147 nreqs = 0;
889 while (reqs)
890 {
891 prv = reqs;
892 reqs = prv->next;
893 req_free (prv);
894 }
895
896 reqs = reqe = 0;
897
898 while (ress)
899 {
900 prv = ress;
901 ress = prv->next;
902 req_free (prv);
903 }
904
905 ress = rese = 0;
906 1148
907 close (respipe [0]); 1149 close (respipe [0]);
908 close (respipe [1]); 1150 close (respipe [1]);
909 create_pipe (); 1151 create_pipe ();
910 1152
911 atfork_parent (); 1153 atfork_parent ();
912} 1154}
913 1155
914#define dREQ \ 1156#define dREQ \
915 aio_req req; \ 1157 aio_req req; \
1158 int req_pri = next_pri; \
1159 next_pri = DEFAULT_PRI + PRI_BIAS; \
916 \ 1160 \
917 if (SvOK (callback) && !SvROK (callback)) \ 1161 if (SvOK (callback) && !SvROK (callback)) \
918 croak ("callback must be undef or of reference type"); \ 1162 croak ("callback must be undef or of reference type"); \
919 \ 1163 \
920 Newz (0, req, 1, aio_cb); \ 1164 Newz (0, req, 1, aio_cb); \
921 if (!req) \ 1165 if (!req) \
922 croak ("out of memory during aio_req allocation"); \ 1166 croak ("out of memory during aio_req allocation"); \
923 \ 1167 \
924 req->callback = newSVsv (callback) 1168 req->callback = newSVsv (callback); \
1169 req->pri = req_pri
925 1170
926#define REQ_SEND \ 1171#define REQ_SEND \
927 req_send (req); \ 1172 req_send (req); \
928 \ 1173 \
929 if (GIMME_V != G_VOID) \ 1174 if (GIMME_V != G_VOID) \
934PROTOTYPES: ENABLE 1179PROTOTYPES: ENABLE
935 1180
936BOOT: 1181BOOT:
937{ 1182{
938 HV *stash = gv_stashpv ("IO::AIO", 1); 1183 HV *stash = gv_stashpv ("IO::AIO", 1);
1184
939 newCONSTSUB (stash, "EXDEV", newSViv (EXDEV)); 1185 newCONSTSUB (stash, "EXDEV", newSViv (EXDEV));
940 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY)); 1186 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY));
941 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY)); 1187 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY));
1188 newCONSTSUB (stash, "O_CREAT", newSViv (O_CREAT));
1189 newCONSTSUB (stash, "O_TRUNC", newSViv (O_TRUNC));
1190 newCONSTSUB (stash, "S_IFIFO", newSViv (S_IFIFO));
942 1191
943 create_pipe (); 1192 create_pipe ();
944 pthread_atfork (atfork_prepare, atfork_parent, atfork_child); 1193 pthread_atfork (atfork_prepare, atfork_parent, atfork_child);
945}
946 1194
1195 start_thread ();
1196}
1197
947void 1198void
948min_parallel (nthreads) 1199min_parallel (int nthreads)
949 int nthreads
950 PROTOTYPE: $ 1200 PROTOTYPE: $
951 1201
952void 1202void
953max_parallel (nthreads) 1203max_parallel (int nthreads)
954 int nthreads
955 PROTOTYPE: $ 1204 PROTOTYPE: $
956 1205
957int 1206int
958max_outstanding (nreqs) 1207max_outstanding (int maxreqs)
959 int nreqs 1208 PROTOTYPE: $
960 PROTOTYPE: $
961 CODE: 1209 CODE:
962 RETVAL = max_outstanding; 1210 RETVAL = max_outstanding;
963 max_outstanding = nreqs; 1211 max_outstanding = maxreqs;
1212 OUTPUT:
1213 RETVAL
964 1214
965void 1215void
966aio_open (pathname,flags,mode,callback=&PL_sv_undef) 1216aio_open (pathname,flags,mode,callback=&PL_sv_undef)
967 SV * pathname 1217 SV * pathname
968 int flags 1218 int flags
1183 1433
1184 REQ_SEND; 1434 REQ_SEND;
1185} 1435}
1186 1436
1187void 1437void
1438aio_mknod (pathname,mode,dev,callback=&PL_sv_undef)
1439 SV * pathname
1440 SV * callback
1441 UV mode
1442 UV dev
1443 PPCODE:
1444{
1445 dREQ;
1446
1447 req->type = REQ_MKNOD;
1448 req->data = newSVsv (pathname);
1449 req->dataptr = SvPVbyte_nolen (req->data);
1450 req->mode = (mode_t)mode;
1451 req->offset = dev;
1452
1453 REQ_SEND;
1454}
1455
1456void
1188aio_sleep (delay,callback=&PL_sv_undef) 1457aio_busy (delay,callback=&PL_sv_undef)
1189 double delay 1458 double delay
1190 SV * callback 1459 SV * callback
1191 PPCODE: 1460 PPCODE:
1192{ 1461{
1193 dREQ; 1462 dREQ;
1194 1463
1195 req->type = REQ_SLEEP; 1464 req->type = REQ_BUSY;
1196 req->fd = delay < 0. ? 0 : delay; 1465 req->fd = delay < 0. ? 0 : delay;
1197 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd); 1466 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd);
1198 1467
1199 REQ_SEND; 1468 REQ_SEND;
1200} 1469}
1204 SV * callback 1473 SV * callback
1205 PROTOTYPE: ;$ 1474 PROTOTYPE: ;$
1206 PPCODE: 1475 PPCODE:
1207{ 1476{
1208 dREQ; 1477 dREQ;
1478
1209 req->type = REQ_GROUP; 1479 req->type = REQ_GROUP;
1210 req_send (req); 1480 req_send (req);
1481
1211 XPUSHs (req_sv (req, AIO_GRP_KLASS)); 1482 XPUSHs (req_sv (req, AIO_GRP_KLASS));
1212} 1483}
1484
1485void
1486aio_nop (callback=&PL_sv_undef)
1487 SV * callback
1488 PPCODE:
1489{
1490 dREQ;
1491
1492 req->type = REQ_NOP;
1493
1494 REQ_SEND;
1495}
1496
1497int
1498aioreq_pri (int pri = 0)
1499 PROTOTYPE: ;$
1500 CODE:
1501 RETVAL = next_pri - PRI_BIAS;
1502 if (items > 0)
1503 {
1504 if (pri < PRI_MIN) pri = PRI_MIN;
1505 if (pri > PRI_MAX) pri = PRI_MAX;
1506 next_pri = pri + PRI_BIAS;
1507 }
1508 OUTPUT:
1509 RETVAL
1510
1511void
1512aioreq_nice (int nice = 0)
1513 CODE:
1514 nice = next_pri - nice;
1515 if (nice < PRI_MIN) nice = PRI_MIN;
1516 if (nice > PRI_MAX) nice = PRI_MAX;
1517 next_pri = nice + PRI_BIAS;
1213 1518
1214void 1519void
1215flush () 1520flush ()
1216 PROTOTYPE: 1521 PROTOTYPE:
1217 CODE: 1522 CODE:
1218 while (nreqs) 1523 while (nreqs)
1219 { 1524 {
1220 poll_wait (); 1525 poll_wait ();
1221 poll_cb (); 1526 poll_cb (0);
1222 } 1527 }
1223 1528
1224void 1529void
1225poll() 1530poll()
1226 PROTOTYPE: 1531 PROTOTYPE:
1227 CODE: 1532 CODE:
1228 if (nreqs) 1533 if (nreqs)
1229 { 1534 {
1230 poll_wait (); 1535 poll_wait ();
1231 poll_cb (); 1536 poll_cb (0);
1232 } 1537 }
1233 1538
1234int 1539int
1235poll_fileno() 1540poll_fileno()
1236 PROTOTYPE: 1541 PROTOTYPE:
1241 1546
1242int 1547int
1243poll_cb(...) 1548poll_cb(...)
1244 PROTOTYPE: 1549 PROTOTYPE:
1245 CODE: 1550 CODE:
1246 RETVAL = poll_cb (); 1551 RETVAL = poll_cb (0);
1552 OUTPUT:
1553 RETVAL
1554
1555int
1556poll_some(int max = 0)
1557 PROTOTYPE: $
1558 CODE:
1559 RETVAL = poll_cb (max);
1247 OUTPUT: 1560 OUTPUT:
1248 RETVAL 1561 RETVAL
1249 1562
1250void 1563void
1251poll_wait() 1564poll_wait()
1260 CODE: 1573 CODE:
1261 RETVAL = nreqs; 1574 RETVAL = nreqs;
1262 OUTPUT: 1575 OUTPUT:
1263 RETVAL 1576 RETVAL
1264 1577
1265PROTOTYPES: DISABLE 1578int
1266 1579nready()
1267MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1268
1269void
1270cancel (aio_req_ornot req)
1271 PROTOTYPE: 1580 PROTOTYPE:
1272 CODE: 1581 CODE:
1582 RETVAL = get_nready ();
1583 OUTPUT:
1584 RETVAL
1585
1586int
1587npending()
1588 PROTOTYPE:
1589 CODE:
1590 RETVAL = get_npending ();
1591 OUTPUT:
1592 RETVAL
1593
1594PROTOTYPES: DISABLE
1595
1596MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1597
1598void
1599cancel (aio_req_ornot req)
1600 CODE:
1273 req_cancel (req); 1601 req_cancel (req);
1602
1603void
1604cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1605 CODE:
1606 SvREFCNT_dec (req->callback);
1607 req->callback = newSVsv (callback);
1274 1608
1275MODULE = IO::AIO PACKAGE = IO::AIO::GRP 1609MODULE = IO::AIO PACKAGE = IO::AIO::GRP
1276 1610
1277void 1611void
1278add (aio_req grp, ...) 1612add (aio_req grp, ...)
1279 PPCODE: 1613 PPCODE:
1280{ 1614{
1281 int i; 1615 int i;
1616 aio_req req;
1282 1617
1283 if (grp->fd == 2) 1618 if (grp->fd == 2)
1284 croak ("cannot add requests to IO::AIO::GRP after the group finished"); 1619 croak ("cannot add requests to IO::AIO::GRP after the group finished");
1285 1620
1286 for (i = 1; i < items; ++i ) 1621 for (i = 1; i < items; ++i )
1287 { 1622 {
1288 if (GIMME_V != G_VOID) 1623 if (GIMME_V != G_VOID)
1289 XPUSHs (sv_2mortal (newSVsv (ST (i)))); 1624 XPUSHs (sv_2mortal (newSVsv (ST (i))));
1290 1625
1291 aio_req req = SvAIO_REQ (ST (i)); 1626 req = SvAIO_REQ (ST (i));
1292 1627
1293 if (req) 1628 if (req)
1294 { 1629 {
1295 ++grp->length; 1630 ++grp->length;
1296 req->grp = grp; 1631 req->grp = grp;
1305 } 1640 }
1306 } 1641 }
1307} 1642}
1308 1643
1309void 1644void
1645cancel_subs (aio_req_ornot req)
1646 CODE:
1647 req_cancel_subs (req);
1648
1649void
1310result (aio_req grp, ...) 1650result (aio_req grp, ...)
1311 CODE: 1651 CODE:
1312{ 1652{
1313 int i; 1653 int i;
1654 AV *av;
1655
1656 grp->errorno = errno;
1657
1314 AV *av = newAV (); 1658 av = newAV ();
1315 1659
1316 for (i = 1; i < items; ++i ) 1660 for (i = 1; i < items; ++i )
1317 av_push (av, newSVsv (ST (i))); 1661 av_push (av, newSVsv (ST (i)));
1318 1662
1319 SvREFCNT_dec (grp->data); 1663 SvREFCNT_dec (grp->data);
1320 grp->data = (SV *)av; 1664 grp->data = (SV *)av;
1321} 1665}
1322 1666
1323void 1667void
1324lock (aio_req grp) 1668errno (aio_req grp, int errorno = errno)
1325 CODE: 1669 CODE:
1326 ++grp->length; 1670 grp->errorno = errorno;
1327 1671
1328void 1672void
1329unlock (aio_req grp)
1330 CODE:
1331 aio_grp_dec (grp);
1332
1333void
1334feeder_limit (aio_req grp, int limit) 1673limit (aio_req grp, int limit)
1335 CODE: 1674 CODE:
1336 grp->fd2 = limit; 1675 grp->fd2 = limit;
1337 aio_grp_feed (grp); 1676 aio_grp_feed (grp);
1338 1677
1339void 1678void
1340set_feeder (aio_req grp, SV *callback=&PL_sv_undef) 1679feed (aio_req grp, SV *callback=&PL_sv_undef)
1341 CODE: 1680 CODE:
1342{ 1681{
1343 SvREFCNT_dec (grp->fh2); 1682 SvREFCNT_dec (grp->fh2);
1344 grp->fh2 = newSVsv (callback); 1683 grp->fh2 = newSVsv (callback);
1345 1684

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines