ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
(Generate patch)

Comparing IO-AIO/AIO.xs (file contents):
Revision 1.46 by root, Sun Oct 22 00:49:29 2006 UTC vs.
Revision 1.83 by root, Sat Oct 28 00:17:30 2006 UTC

1/* solaris */
2#define _POSIX_PTHREAD_SEMANTICS 1
3
4#if __linux && !defined(_GNU_SOURCE)
5# define _GNU_SOURCE
6#endif
7
8/* just in case */
1#define _REENTRANT 1 9#define _REENTRANT 1
10
2#include <errno.h> 11#include <errno.h>
3 12
4#include "EXTERN.h" 13#include "EXTERN.h"
5#include "perl.h" 14#include "perl.h"
6#include "XSUB.h" 15#include "XSUB.h"
39/* used for struct dirent, AIX doesn't provide it */ 48/* used for struct dirent, AIX doesn't provide it */
40#ifndef NAME_MAX 49#ifndef NAME_MAX
41# define NAME_MAX 4096 50# define NAME_MAX 4096
42#endif 51#endif
43 52
53#ifndef PTHREAD_STACK_MIN
54/* care for broken platforms, e.g. windows */
55# define PTHREAD_STACK_MIN 16384
56#endif
57
44#if __ia64 58#if __ia64
45# define STACKSIZE 65536 59# define STACKSIZE 65536
60#elif __i386 || __x86_64 /* 16k is unreasonably high :( */
61# define STACKSIZE PTHREAD_STACK_MIN
46#else 62#else
47# define STACKSIZE 8192 63# define STACKSIZE 16384
64#endif
65
66/* wether word reads are potentially non-atomic.
67 * this is conservatice, likely most arches this runs
68 * on have atomic word read/writes.
69 */
70#ifndef WORDREAD_UNSAFE
71# if __i386 || __x86_64
72# define WORDREAD_UNSAFE 0
73# else
74# define WORDREAD_UNSAFE 1
48#endif 75# endif
76#endif
77
78/* buffer size for various temporary buffers */
79#define AIO_BUFSIZE 65536
80
81#define dBUF \
82 char *aio_buf; \
83 LOCK (wrklock); \
84 self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \
85 UNLOCK (wrklock); \
86 if (!aio_buf) \
87 return -1;
49 88
50enum { 89enum {
51 REQ_QUIT, 90 REQ_QUIT,
52 REQ_OPEN, REQ_CLOSE, 91 REQ_OPEN, REQ_CLOSE,
53 REQ_READ, REQ_WRITE, REQ_READAHEAD, 92 REQ_READ, REQ_WRITE, REQ_READAHEAD,
54 REQ_SENDFILE, 93 REQ_SENDFILE,
55 REQ_STAT, REQ_LSTAT, REQ_FSTAT, 94 REQ_STAT, REQ_LSTAT, REQ_FSTAT,
56 REQ_FSYNC, REQ_FDATASYNC, 95 REQ_FSYNC, REQ_FDATASYNC,
57 REQ_UNLINK, REQ_RMDIR, REQ_RENAME, 96 REQ_UNLINK, REQ_RMDIR, REQ_RENAME,
58 REQ_READDIR, 97 REQ_MKNOD, REQ_READDIR,
59 REQ_LINK, REQ_SYMLINK, 98 REQ_LINK, REQ_SYMLINK,
60 REQ_SLEEP, 99 REQ_GROUP, REQ_NOP,
61 REQ_GROUP, 100 REQ_BUSY,
62}; 101};
63 102
64#define AIO_REQ_KLASS "IO::AIO::REQ" 103#define AIO_REQ_KLASS "IO::AIO::REQ"
65#define AIO_GRP_KLASS "IO::AIO::GRP" 104#define AIO_GRP_KLASS "IO::AIO::GRP"
66 105
67typedef struct aio_cb 106typedef struct aio_cb
68{ 107{
69 struct aio_cb *grp, *grp_prev, *grp_next;
70
71 struct aio_cb *volatile next; 108 struct aio_cb *volatile next;
72
73 SV *self; /* the perl counterpart of this request, if any */
74 109
75 SV *data, *callback; 110 SV *data, *callback;
76 SV *fh, *fh2; 111 SV *fh, *fh2;
77 void *dataptr, *data2ptr; 112 void *dataptr, *data2ptr;
78 Stat_t *statdata; 113 Stat_t *statdata;
79 off_t offset; 114 off_t offset;
80 size_t length; 115 size_t length;
81 ssize_t result; 116 ssize_t result;
82 117
118 STRLEN dataoffset;
83 int type; 119 int type;
84 int fd, fd2; 120 int fd, fd2;
85 int errorno; 121 int errorno;
86 STRLEN dataoffset;
87 mode_t mode; /* open */ 122 mode_t mode; /* open */
123
88 unsigned char cancelled; 124 unsigned char flags;
125 unsigned char pri;
126
127 SV *self; /* the perl counterpart of this request, if any */
128 struct aio_cb *grp, *grp_prev, *grp_next, *grp_first;
89} aio_cb; 129} aio_cb;
130
131enum {
132 FLAG_CANCELLED = 0x01,
133};
90 134
91typedef aio_cb *aio_req; 135typedef aio_cb *aio_req;
92typedef aio_cb *aio_req_ornot; 136typedef aio_cb *aio_req_ornot;
93 137
138enum {
139 PRI_MIN = -4,
140 PRI_MAX = 4,
141
142 DEFAULT_PRI = 0,
143 PRI_BIAS = -PRI_MIN,
144 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
145};
146
147static int next_pri = DEFAULT_PRI + PRI_BIAS;
148
94static int started, wanted; 149static unsigned int started, wanted;
95static volatile int nreqs; 150
96static int max_outstanding = 1<<30; 151#if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)
152# define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
153#else
154# define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
155#endif
156
157#define LOCK(mutex) pthread_mutex_lock (&(mutex))
158#define UNLOCK(mutex) pthread_mutex_unlock (&(mutex))
159
160/* worker threads management */
161static pthread_mutex_t wrklock = AIO_MUTEX_INIT;
162
163typedef struct worker {
164 /* locked by wrklock */
165 struct worker *prev, *next;
166
167 pthread_t tid;
168
169 /* locked by reslock, reqlock or wrklock */
170 aio_req req; /* currently processed request */
171 void *dbuf;
172 DIR *dirp;
173} worker;
174
175static worker wrk_first = { &wrk_first, &wrk_first, 0 };
176
177static void worker_clear (worker *wrk)
178{
179 if (wrk->dirp)
180 {
181 closedir (wrk->dirp);
182 wrk->dirp = 0;
183 }
184
185 if (wrk->dbuf)
186 {
187 free (wrk->dbuf);
188 wrk->dbuf = 0;
189 }
190}
191
192static void worker_free (worker *wrk)
193{
194 wrk->next->prev = wrk->prev;
195 wrk->prev->next = wrk->next;
196
197 free (wrk);
198}
199
200static volatile unsigned int nreqs, nready, npending;
201static volatile unsigned int max_outstanding = 0xffffffff;
97static int respipe [2]; 202static int respipe [2];
98 203
99static pthread_mutex_t reslock = PTHREAD_MUTEX_INITIALIZER; 204static pthread_mutex_t reslock = AIO_MUTEX_INIT;
100static pthread_mutex_t reqlock = PTHREAD_MUTEX_INITIALIZER; 205static pthread_mutex_t reqlock = AIO_MUTEX_INIT;
101static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER; 206static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER;
102 207
103static volatile aio_req reqs, reqe; /* queue start, queue end */ 208#if WORDREAD_UNSAFE
104static volatile aio_req ress, rese; /* queue start, queue end */
105 209
210static unsigned int get_nready ()
211{
212 unsigned int retval;
213
214 LOCK (reqlock);
215 retval = nready;
216 UNLOCK (reqlock);
217
218 return retval;
219}
220
221static unsigned int get_npending ()
222{
223 unsigned int retval;
224
225 LOCK (reslock);
226 retval = npending;
227 UNLOCK (reslock);
228
229 return retval;
230}
231
232#else
233
234# define get_nready() nready
235# define get_npending() npending
236
237#endif
238
239/*
240 * a somewhat faster data structure might be nice, but
241 * with 8 priorities this actually needs <20 insns
242 * per shift, the most expensive operation.
243 */
244typedef struct {
245 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
246 int size;
247} reqq;
248
249static reqq req_queue;
250static reqq res_queue;
251
252int reqq_push (reqq *q, aio_req req)
253{
254 int pri = req->pri;
255 req->next = 0;
256
257 if (q->qe[pri])
258 {
259 q->qe[pri]->next = req;
260 q->qe[pri] = req;
261 }
262 else
263 q->qe[pri] = q->qs[pri] = req;
264
265 return q->size++;
266}
267
268aio_req reqq_shift (reqq *q)
269{
270 int pri;
271
272 if (!q->size)
273 return 0;
274
275 --q->size;
276
277 for (pri = NUM_PRI; pri--; )
278 {
279 aio_req req = q->qs[pri];
280
281 if (req)
282 {
283 if (!(q->qs[pri] = req->next))
284 q->qe[pri] = 0;
285
286 return req;
287 }
288 }
289
290 abort ();
291}
292
293static int poll_cb (int max);
294static void req_invoke (aio_req req);
106static void req_free (aio_req req); 295static void req_free (aio_req req);
296static void req_cancel (aio_req req);
107 297
108/* must be called at most once */ 298/* must be called at most once */
109static SV *req_sv (aio_req req, const char *klass) 299static SV *req_sv (aio_req req, const char *klass)
110{ 300{
301 if (!req->self)
302 {
111 req->self = (SV *)newHV (); 303 req->self = (SV *)newHV ();
112 sv_magic (req->self, 0, PERL_MAGIC_ext, (char *)req, 0); 304 sv_magic (req->self, 0, PERL_MAGIC_ext, (char *)req, 0);
305 }
113 306
114 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1))); 307 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1)));
115} 308}
116 309
117static aio_req SvAIO_REQ (SV *sv) 310static aio_req SvAIO_REQ (SV *sv)
118{ 311{
312 MAGIC *mg;
313
119 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv)) 314 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv))
120 croak ("object of class " AIO_REQ_KLASS " expected"); 315 croak ("object of class " AIO_REQ_KLASS " expected");
121 316
122 MAGIC *mg = mg_find (SvRV (sv), PERL_MAGIC_ext); 317 mg = mg_find (SvRV (sv), PERL_MAGIC_ext);
123 318
124 return mg ? (aio_req)mg->mg_ptr : 0; 319 return mg ? (aio_req)mg->mg_ptr : 0;
125} 320}
126 321
322static void aio_grp_feed (aio_req grp)
323{
324 while (grp->length < grp->fd2 && !(grp->flags & FLAG_CANCELLED))
325 {
326 int old_len = grp->length;
327
328 if (grp->fh2 && SvOK (grp->fh2))
329 {
330 dSP;
331
332 ENTER;
333 SAVETMPS;
334 PUSHMARK (SP);
335 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
336 PUTBACK;
337 call_sv (grp->fh2, G_VOID | G_EVAL | G_KEEPERR);
338 SPAGAIN;
339 FREETMPS;
340 LEAVE;
341 }
342
343 /* stop if no progress has been made */
344 if (old_len == grp->length)
345 {
346 SvREFCNT_dec (grp->fh2);
347 grp->fh2 = 0;
348 break;
349 }
350 }
351}
352
353static void aio_grp_dec (aio_req grp)
354{
355 --grp->length;
356
357 /* call feeder, if applicable */
358 aio_grp_feed (grp);
359
360 /* finish, if done */
361 if (!grp->length && grp->fd)
362 {
363 req_invoke (grp);
364 req_free (grp);
365 }
366}
367
368static void req_invoke (aio_req req)
369{
370 dSP;
371
372 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
373 {
374 ENTER;
375 SAVETMPS;
376 PUSHMARK (SP);
377 EXTEND (SP, 1);
378
379 switch (req->type)
380 {
381 case REQ_READDIR:
382 {
383 SV *rv = &PL_sv_undef;
384
385 if (req->result >= 0)
386 {
387 int i;
388 char *buf = req->data2ptr;
389 AV *av = newAV ();
390
391 av_extend (av, req->result - 1);
392
393 for (i = 0; i < req->result; ++i)
394 {
395 SV *sv = newSVpv (buf, 0);
396
397 av_store (av, i, sv);
398 buf += SvCUR (sv) + 1;
399 }
400
401 rv = sv_2mortal (newRV_noinc ((SV *)av));
402 }
403
404 PUSHs (rv);
405 }
406 break;
407
408 case REQ_OPEN:
409 {
410 /* convert fd to fh */
411 SV *fh;
412
413 PUSHs (sv_2mortal (newSViv (req->result)));
414 PUTBACK;
415 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
416 SPAGAIN;
417
418 fh = SvREFCNT_inc (POPs);
419
420 PUSHMARK (SP);
421 XPUSHs (sv_2mortal (fh));
422 }
423 break;
424
425 case REQ_GROUP:
426 req->fd = 2; /* mark group as finished */
427
428 if (req->data)
429 {
430 int i;
431 AV *av = (AV *)req->data;
432
433 EXTEND (SP, AvFILL (av) + 1);
434 for (i = 0; i <= AvFILL (av); ++i)
435 PUSHs (*av_fetch (av, i, 0));
436 }
437 break;
438
439 case REQ_NOP:
440 case REQ_BUSY:
441 break;
442
443 default:
444 PUSHs (sv_2mortal (newSViv (req->result)));
445 break;
446 }
447
448 errno = req->errorno;
449
450 PUTBACK;
451 call_sv (req->callback, G_VOID | G_EVAL);
452 SPAGAIN;
453
454 FREETMPS;
455 LEAVE;
456 }
457
458 if (req->grp)
459 {
460 aio_req grp = req->grp;
461
462 /* unlink request */
463 if (req->grp_next) req->grp_next->grp_prev = req->grp_prev;
464 if (req->grp_prev) req->grp_prev->grp_next = req->grp_next;
465
466 if (grp->grp_first == req)
467 grp->grp_first = req->grp_next;
468
469 aio_grp_dec (grp);
470 }
471
472 if (SvTRUE (ERRSV))
473 {
474 req_free (req);
475 croak (0);
476 }
477}
478
479static void req_free (aio_req req)
480{
481 if (req->self)
482 {
483 sv_unmagic (req->self, PERL_MAGIC_ext);
484 SvREFCNT_dec (req->self);
485 }
486
487 SvREFCNT_dec (req->data);
488 SvREFCNT_dec (req->fh);
489 SvREFCNT_dec (req->fh2);
490 SvREFCNT_dec (req->callback);
491 Safefree (req->statdata);
492
493 if (req->type == REQ_READDIR)
494 free (req->data2ptr);
495
496 Safefree (req);
497}
498
499static void req_cancel_subs (aio_req grp)
500{
501 aio_req sub;
502
503 if (grp->type != REQ_GROUP)
504 return;
505
506 SvREFCNT_dec (grp->fh2);
507 grp->fh2 = 0;
508
509 for (sub = grp->grp_first; sub; sub = sub->grp_next)
510 req_cancel (sub);
511}
512
513static void req_cancel (aio_req req)
514{
515 req->flags |= FLAG_CANCELLED;
516
517 req_cancel_subs (req);
518}
519
520static void *aio_proc(void *arg);
521
522static void start_thread (void)
523{
524 sigset_t fullsigset, oldsigset;
525 pthread_attr_t attr;
526
527 worker *wrk = calloc (1, sizeof (worker));
528
529 if (!wrk)
530 croak ("unable to allocate worker thread data");
531
532 pthread_attr_init (&attr);
533 pthread_attr_setstacksize (&attr, STACKSIZE);
534 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
535#ifdef PTHREAD_SCOPE_PROCESS
536 pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS);
537#endif
538
539 sigfillset (&fullsigset);
540
541 LOCK (wrklock);
542 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
543
544 if (pthread_create (&wrk->tid, &attr, aio_proc, (void *)wrk) == 0)
545 {
546 wrk->prev = &wrk_first;
547 wrk->next = wrk_first.next;
548 wrk_first.next->prev = wrk;
549 wrk_first.next = wrk;
550 ++started;
551 }
552 else
553 free (wrk);
554
555 sigprocmask (SIG_SETMASK, &oldsigset, 0);
556 UNLOCK (wrklock);
557}
558
559static void maybe_start_thread ()
560{
561#if 0
562 static struct timeval last;
563 struct timeval diff, now;
564#endif
565
566 if (started >= wanted)
567 return;
568
569 if (nready <= nreqs - get_nready () - get_npending ())
570 return;
571
572#if 0
573 gettimeofday (&now, 0);
574
575 diff.tv_sec = now.tv_sec - last.tv_sec;
576 diff.tv_usec = now.tv_usec - last.tv_usec;
577
578 if (diff.tv_usec < 0)
579 {
580 --diff.tv_sec;
581 diff.tv_usec += 1000000;
582 }
583
584 if (!diff.tv_sec && diff.tv_usec < 10000)
585 return;
586
587 last = now;
588#endif
589
590 start_thread ();
591}
592
593static void req_send (aio_req req)
594{
595 ++nreqs;
596
597 LOCK (reqlock);
598 ++nready;
599 reqq_push (&req_queue, req);
600 pthread_cond_signal (&reqwait);
601 UNLOCK (reqlock);
602
603 maybe_start_thread ();
604}
605
606static void end_thread (void)
607{
608 aio_req req;
609
610 Newz (0, req, 1, aio_cb);
611
612 req->type = REQ_QUIT;
613 req->pri = PRI_MAX + PRI_BIAS;
614
615 LOCK (reqlock);
616 reqq_push (&req_queue, req);
617 pthread_cond_signal (&reqwait);
618 UNLOCK (reqlock);
619
620 LOCK (wrklock);
621 --started;
622 UNLOCK (wrklock);
623}
624
625static void min_parallel (int nthreads)
626{
627 if (wanted < nthreads)
628 wanted = nthreads;
629}
630
631static void max_parallel (int nthreads)
632{
633 if (wanted > nthreads)
634 wanted = nthreads;
635
636 while (started > wanted)
637 end_thread ();
638}
639
127static void poll_wait () 640static void poll_wait ()
128{ 641{
129 if (nreqs && !ress)
130 {
131 fd_set rfd; 642 fd_set rfd;
643
644 while (nreqs)
645 {
646 int size;
647 if (WORDREAD_UNSAFE) LOCK (reslock);
648 size = res_queue.size;
649 if (WORDREAD_UNSAFE) UNLOCK (reslock);
650
651 if (size)
652 return;
653
654 maybe_start_thread ();
655
132 FD_ZERO(&rfd); 656 FD_ZERO(&rfd);
133 FD_SET(respipe [0], &rfd); 657 FD_SET(respipe [0], &rfd);
134 658
135 select (respipe [0] + 1, &rfd, 0, 0, 0); 659 select (respipe [0] + 1, &rfd, 0, 0, 0);
136 } 660 }
137} 661}
138 662
139static void req_invoke (aio_req req)
140{
141 dSP;
142 int errorno = errno;
143
144 if (req->cancelled || !SvOK (req->callback))
145 return;
146
147 errno = req->errorno;
148
149 ENTER;
150 PUSHMARK (SP);
151
152 switch (req->type)
153 {
154 case REQ_READDIR:
155 {
156 SV *rv = &PL_sv_undef;
157
158 if (req->result >= 0)
159 {
160 char *buf = req->data2ptr;
161 AV *av = newAV ();
162
163 while (req->result)
164 {
165 SV *sv = newSVpv (buf, 0);
166
167 av_push (av, sv);
168 buf += SvCUR (sv) + 1;
169 req->result--;
170 }
171
172 rv = sv_2mortal (newRV_noinc ((SV *)av));
173 }
174
175 XPUSHs (rv);
176 }
177 break;
178
179 case REQ_OPEN:
180 {
181 /* convert fd to fh */
182 SV *fh;
183
184 XPUSHs (sv_2mortal (newSViv (req->result)));
185 PUTBACK;
186 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
187 SPAGAIN;
188
189 fh = SvREFCNT_inc (POPs);
190
191 PUSHMARK (SP);
192 XPUSHs (sv_2mortal (fh));
193 }
194 break;
195
196 case REQ_SLEEP:
197 case REQ_GROUP:
198 break;
199
200 default:
201 XPUSHs (sv_2mortal (newSViv (req->result)));
202 break;
203 }
204
205
206 PUTBACK;
207 call_sv (req->callback, G_VOID | G_EVAL);
208 SPAGAIN;
209
210 if (SvTRUE (ERRSV))
211 {
212 req_free (req);
213 croak (0);
214 }
215
216 LEAVE;
217
218 errno = errorno;
219}
220
221static void req_free (aio_req req)
222{
223 if (req->grp)
224 {
225 aio_req grp = req->grp;
226
227 /* unlink request */
228 req->grp_next->grp_prev = req->grp_prev;
229 req->grp_prev->grp_next = req->grp_next;
230
231 if (grp->grp_next == grp && grp->fd)
232 {
233 req_invoke (grp);
234 req_free (grp);
235 }
236 }
237
238 if (req->self)
239 {
240 sv_unmagic (req->self, PERL_MAGIC_ext);
241 SvREFCNT_dec (req->self);
242 }
243
244 if (req->data)
245 SvREFCNT_dec (req->data);
246
247 if (req->fh)
248 SvREFCNT_dec (req->fh);
249
250 if (req->fh2)
251 SvREFCNT_dec (req->fh2);
252
253 if (req->statdata)
254 Safefree (req->statdata);
255
256 if (req->callback)
257 SvREFCNT_dec (req->callback);
258
259 if (req->type == REQ_READDIR && req->result >= 0)
260 free (req->data2ptr);
261
262 Safefree (req);
263}
264
265static void req_cancel (aio_req req)
266{
267 req->cancelled = 1;
268
269 if (req->type == REQ_GROUP)
270 {
271 aio_req sub;
272
273 for (sub = req->grp_next; sub != req; sub = sub->grp_next)
274 req_cancel (sub);
275 }
276}
277
278static int poll_cb () 663static int poll_cb (int max)
279{ 664{
280 dSP; 665 dSP;
281 int count = 0; 666 int count = 0;
282 int do_croak = 0; 667 int do_croak = 0;
283 aio_req req; 668 aio_req req;
284 669
285 for (;;) 670 for (;;)
286 { 671 {
287 pthread_mutex_lock (&reslock); 672 while (max <= 0 || count < max)
288 req = ress;
289
290 if (req)
291 { 673 {
292 ress = req->next; 674 maybe_start_thread ();
293 675
676 LOCK (reslock);
677 req = reqq_shift (&res_queue);
678
294 if (!ress) 679 if (req)
295 { 680 {
681 --npending;
682
683 if (!res_queue.size)
684 {
296 /* read any signals sent by the worker threads */ 685 /* read any signals sent by the worker threads */
297 char buf [32]; 686 char buf [32];
298 while (read (respipe [0], buf, 32) == 32) 687 while (read (respipe [0], buf, 32) == 32)
688 ;
299 ; 689 }
300
301 rese = 0;
302 } 690 }
691
692 UNLOCK (reslock);
693
694 if (!req)
695 break;
696
697 --nreqs;
698
699 if (req->type == REQ_GROUP && req->length)
700 {
701 req->fd = 1; /* mark request as delayed */
702 continue;
703 }
704 else
705 {
706 if (req->type == REQ_READ)
707 SvCUR_set (req->data, req->dataoffset + (req->result > 0 ? req->result : 0));
708
709 if (req->data2ptr && (req->type == REQ_READ || req->type == REQ_WRITE))
710 SvREADONLY_off (req->data);
711
712 if (req->statdata)
713 {
714 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
715 PL_laststatval = req->result;
716 PL_statcache = *(req->statdata);
717 }
718
719 req_invoke (req);
720
721 count++;
722 }
723
724 req_free (req);
303 } 725 }
304 726
305 pthread_mutex_unlock (&reslock); 727 if (nreqs <= max_outstanding)
306
307 if (!req)
308 break; 728 break;
309 729
310 nreqs--; 730 poll_wait ();
311 731
312 if (req->type == REQ_QUIT) 732 max = 0;
313 started--;
314 else if (req->type == REQ_GROUP && req->grp_next != req)
315 {
316 req->fd = 1; /* mark request as delayed */
317 continue;
318 }
319 else
320 {
321 if (req->type == REQ_READ)
322 SvCUR_set (req->data, req->dataoffset + (req->result > 0 ? req->result : 0));
323
324 if (req->data2ptr && (req->type == REQ_READ || req->type == REQ_WRITE))
325 SvREADONLY_off (req->data);
326
327 if (req->statdata)
328 {
329 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
330 PL_laststatval = req->result;
331 PL_statcache = *(req->statdata);
332 }
333
334 req_invoke (req);
335
336 count++;
337 }
338
339 req_free (req);
340 } 733 }
341 734
342 return count; 735 return count;
343}
344
345static void *aio_proc(void *arg);
346
347static void start_thread (void)
348{
349 sigset_t fullsigset, oldsigset;
350 pthread_t tid;
351 pthread_attr_t attr;
352
353 pthread_attr_init (&attr);
354 pthread_attr_setstacksize (&attr, STACKSIZE);
355 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
356
357 sigfillset (&fullsigset);
358 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
359
360 if (pthread_create (&tid, &attr, aio_proc, 0) == 0)
361 started++;
362
363 sigprocmask (SIG_SETMASK, &oldsigset, 0);
364}
365
366static void req_send (aio_req req)
367{
368 while (started < wanted && nreqs >= started)
369 start_thread ();
370
371 nreqs++;
372
373 pthread_mutex_lock (&reqlock);
374
375 req->next = 0;
376
377 if (reqe)
378 {
379 reqe->next = req;
380 reqe = req;
381 }
382 else
383 reqe = reqs = req;
384
385 pthread_cond_signal (&reqwait);
386 pthread_mutex_unlock (&reqlock);
387
388 if (nreqs > max_outstanding)
389 for (;;)
390 {
391 poll_cb ();
392
393 if (nreqs <= max_outstanding)
394 break;
395
396 poll_wait ();
397 }
398}
399
400static void end_thread (void)
401{
402 aio_req req;
403 Newz (0, req, 1, aio_cb);
404 req->type = REQ_QUIT;
405
406 req_send (req);
407}
408
409static void min_parallel (int nthreads)
410{
411 if (wanted < nthreads)
412 wanted = nthreads;
413}
414
415static void max_parallel (int nthreads)
416{
417 int cur = started;
418
419 if (wanted > nthreads)
420 wanted = nthreads;
421
422 while (cur > wanted)
423 {
424 end_thread ();
425 cur--;
426 }
427
428 while (started > wanted)
429 {
430 poll_wait ();
431 poll_cb ();
432 }
433} 736}
434 737
435static void create_pipe () 738static void create_pipe ()
436{ 739{
437 if (pipe (respipe)) 740 if (pipe (respipe))
461static ssize_t pread (int fd, void *buf, size_t count, off_t offset) 764static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
462{ 765{
463 ssize_t res; 766 ssize_t res;
464 off_t ooffset; 767 off_t ooffset;
465 768
466 pthread_mutex_lock (&preadwritelock); 769 LOCK (preadwritelock);
467 ooffset = lseek (fd, 0, SEEK_CUR); 770 ooffset = lseek (fd, 0, SEEK_CUR);
468 lseek (fd, offset, SEEK_SET); 771 lseek (fd, offset, SEEK_SET);
469 res = read (fd, buf, count); 772 res = read (fd, buf, count);
470 lseek (fd, ooffset, SEEK_SET); 773 lseek (fd, ooffset, SEEK_SET);
471 pthread_mutex_unlock (&preadwritelock); 774 UNLOCK (preadwritelock);
472 775
473 return res; 776 return res;
474} 777}
475 778
476static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset) 779static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
477{ 780{
478 ssize_t res; 781 ssize_t res;
479 off_t ooffset; 782 off_t ooffset;
480 783
481 pthread_mutex_lock (&preadwritelock); 784 LOCK (preadwritelock);
482 ooffset = lseek (fd, 0, SEEK_CUR); 785 ooffset = lseek (fd, 0, SEEK_CUR);
483 lseek (fd, offset, SEEK_SET); 786 lseek (fd, offset, SEEK_SET);
484 res = write (fd, buf, count); 787 res = write (fd, buf, count);
485 lseek (fd, offset, SEEK_SET); 788 lseek (fd, offset, SEEK_SET);
486 pthread_mutex_unlock (&preadwritelock); 789 UNLOCK (preadwritelock);
487 790
488 return res; 791 return res;
489} 792}
490#endif 793#endif
491 794
492#if !HAVE_FDATASYNC 795#if !HAVE_FDATASYNC
493# define fdatasync fsync 796# define fdatasync fsync
494#endif 797#endif
495 798
496#if !HAVE_READAHEAD 799#if !HAVE_READAHEAD
497# define readahead aio_readahead 800# define readahead(fd,offset,count) aio_readahead (fd, offset, count, self)
498 801
499static ssize_t readahead (int fd, off_t offset, size_t count) 802static ssize_t aio_readahead (int fd, off_t offset, size_t count, worker *self)
500{ 803{
501 char readahead_buf[4096]; 804 dBUF;
502 805
503 while (count > 0) 806 while (count > 0)
504 { 807 {
505 size_t len = count < sizeof (readahead_buf) ? count : sizeof (readahead_buf); 808 size_t len = count < AIO_BUFSIZE ? count : AIO_BUFSIZE;
506 809
507 pread (fd, readahead_buf, len, offset); 810 pread (fd, aio_buf, len, offset);
508 offset += len; 811 offset += len;
509 count -= len; 812 count -= len;
510 } 813 }
511 814
512 errno = 0; 815 errno = 0;
513} 816}
817
514#endif 818#endif
515 819
516#if !HAVE_READDIR_R 820#if !HAVE_READDIR_R
517# define readdir_r aio_readdir_r 821# define readdir_r aio_readdir_r
518 822
521static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res) 825static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res)
522{ 826{
523 struct dirent *e; 827 struct dirent *e;
524 int errorno; 828 int errorno;
525 829
526 pthread_mutex_lock (&readdirlock); 830 LOCK (readdirlock);
527 831
528 e = readdir (dirp); 832 e = readdir (dirp);
529 errorno = errno; 833 errorno = errno;
530 834
531 if (e) 835 if (e)
534 strcpy (ent->d_name, e->d_name); 838 strcpy (ent->d_name, e->d_name);
535 } 839 }
536 else 840 else
537 *res = 0; 841 *res = 0;
538 842
539 pthread_mutex_unlock (&readdirlock); 843 UNLOCK (readdirlock);
540 844
541 errno = errorno; 845 errno = errorno;
542 return e ? 0 : -1; 846 return e ? 0 : -1;
543} 847}
544#endif 848#endif
545 849
546/* sendfile always needs emulation */ 850/* sendfile always needs emulation */
547static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count) 851static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self)
548{ 852{
549 ssize_t res; 853 ssize_t res;
550 854
551 if (!count) 855 if (!count)
552 return 0; 856 return 0;
563 { 867 {
564 off_t sbytes; 868 off_t sbytes;
565 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0); 869 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0);
566 870
567 if (res < 0 && sbytes) 871 if (res < 0 && sbytes)
568 /* maybe only on EAGAIN only: as usual, the manpage leaves you guessing */ 872 /* maybe only on EAGAIN: as usual, the manpage leaves you guessing */
569 res = sbytes; 873 res = sbytes;
570 } 874 }
571 875
572# elif __hpux 876# elif __hpux
573 res = sendfile (ofd, ifd, offset, count, 0, 0); 877 res = sendfile (ofd, ifd, offset, count, 0, 0);
601#endif 905#endif
602 ) 906 )
603 ) 907 )
604 { 908 {
605 /* emulate sendfile. this is a major pain in the ass */ 909 /* emulate sendfile. this is a major pain in the ass */
606 char buf[4096]; 910 dBUF;
911
607 res = 0; 912 res = 0;
608 913
609 while (count) 914 while (count)
610 { 915 {
611 ssize_t cnt; 916 ssize_t cnt;
612 917
613 cnt = pread (ifd, buf, count > 4096 ? 4096 : count, offset); 918 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
614 919
615 if (cnt <= 0) 920 if (cnt <= 0)
616 { 921 {
617 if (cnt && !res) res = -1; 922 if (cnt && !res) res = -1;
618 break; 923 break;
619 } 924 }
620 925
621 cnt = write (ofd, buf, cnt); 926 cnt = write (ofd, aio_buf, cnt);
622 927
623 if (cnt <= 0) 928 if (cnt <= 0)
624 { 929 {
625 if (cnt && !res) res = -1; 930 if (cnt && !res) res = -1;
626 break; 931 break;
634 939
635 return res; 940 return res;
636} 941}
637 942
638/* read a full directory */ 943/* read a full directory */
639static int scandir_ (const char *path, void **namesp) 944static void scandir_ (aio_req req, worker *self)
640{ 945{
641 DIR *dirp = opendir (path); 946 DIR *dirp;
642 union 947 union
643 { 948 {
644 struct dirent d; 949 struct dirent d;
645 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1]; 950 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1];
646 } u; 951 } *u;
647 struct dirent *entp; 952 struct dirent *entp;
648 char *name, *names; 953 char *name, *names;
649 int memlen = 4096; 954 int memlen = 4096;
650 int memofs = 0; 955 int memofs = 0;
651 int res = 0; 956 int res = 0;
652 int errorno; 957 int errorno;
653 958
654 if (!dirp) 959 LOCK (wrklock);
655 return -1; 960 self->dirp = dirp = opendir (req->dataptr);
656 961 self->dbuf = u = malloc (sizeof (*u));
657 names = malloc (memlen); 962 req->data2ptr = names = malloc (memlen);
963 UNLOCK (wrklock);
964
965 if (dirp && u && names)
966 for (;;)
967 {
968 errno = 0;
969 readdir_r (dirp, &u->d, &entp);
970
971 if (!entp)
972 break;
973
974 name = entp->d_name;
975
976 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
977 {
978 int len = strlen (name) + 1;
979
980 res++;
981
982 while (memofs + len > memlen)
983 {
984 memlen *= 2;
985 LOCK (wrklock);
986 req->data2ptr = names = realloc (names, memlen);
987 UNLOCK (wrklock);
988
989 if (!names)
990 break;
991 }
992
993 memcpy (names + memofs, name, len);
994 memofs += len;
995 }
996 }
997
998 if (errno)
999 res = -1;
1000
1001 req->result = res;
1002}
1003
1004/*****************************************************************************/
1005
1006static void *aio_proc (void *thr_arg)
1007{
1008 aio_req req;
1009 worker *self = (worker *)thr_arg;
658 1010
659 for (;;) 1011 for (;;)
660 { 1012 {
661 errno = 0, readdir_r (dirp, &u.d, &entp); 1013 LOCK (reqlock);
662
663 if (!entp)
664 break;
665
666 name = entp->d_name;
667
668 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
669 {
670 int len = strlen (name) + 1;
671
672 res++;
673
674 while (memofs + len > memlen)
675 {
676 memlen *= 2;
677 names = realloc (names, memlen);
678 if (!names)
679 break;
680 }
681
682 memcpy (names + memofs, name, len);
683 memofs += len;
684 }
685 }
686
687 errorno = errno;
688 closedir (dirp);
689
690 if (errorno)
691 {
692 free (names);
693 errno = errorno;
694 res = -1;
695 }
696
697 *namesp = (void *)names;
698 return res;
699}
700
701/*****************************************************************************/
702
703static void *aio_proc (void *thr_arg)
704{
705 aio_req req;
706 int type;
707
708 do
709 {
710 pthread_mutex_lock (&reqlock);
711 1014
712 for (;;) 1015 for (;;)
713 { 1016 {
714 req = reqs; 1017 self->req = req = reqq_shift (&req_queue);
715
716 if (reqs)
717 {
718 reqs = reqs->next;
719 if (!reqs) reqe = 0;
720 }
721 1018
722 if (req) 1019 if (req)
723 break; 1020 break;
724 1021
725 pthread_cond_wait (&reqwait, &reqlock); 1022 pthread_cond_wait (&reqwait, &reqlock);
726 } 1023 }
727 1024
728 pthread_mutex_unlock (&reqlock); 1025 --nready;
1026
1027 UNLOCK (reqlock);
729 1028
730 errno = 0; /* strictly unnecessary */ 1029 errno = 0; /* strictly unnecessary */
731 1030
732 if (!req->cancelled) 1031 if (!(req->flags & FLAG_CANCELLED))
733 switch (req->type) 1032 switch (req->type)
734 { 1033 {
735 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break; 1034 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break;
736 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break; 1035 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break;
737 1036
738 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break; 1037 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break;
739 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length); break; 1038 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length, self); break;
740 1039
741 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break; 1040 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break;
742 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break; 1041 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break;
743 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break; 1042 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break;
744 1043
747 case REQ_UNLINK: req->result = unlink (req->dataptr); break; 1046 case REQ_UNLINK: req->result = unlink (req->dataptr); break;
748 case REQ_RMDIR: req->result = rmdir (req->dataptr); break; 1047 case REQ_RMDIR: req->result = rmdir (req->dataptr); break;
749 case REQ_RENAME: req->result = rename (req->data2ptr, req->dataptr); break; 1048 case REQ_RENAME: req->result = rename (req->data2ptr, req->dataptr); break;
750 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break; 1049 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break;
751 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break; 1050 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break;
1051 case REQ_MKNOD: req->result = mknod (req->data2ptr, req->mode, (dev_t)req->offset); break;
752 1052
753 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break; 1053 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break;
754 case REQ_FSYNC: req->result = fsync (req->fd); break; 1054 case REQ_FSYNC: req->result = fsync (req->fd); break;
755 case REQ_READDIR: req->result = scandir_ (req->dataptr, &req->data2ptr); break; 1055 case REQ_READDIR: scandir_ (req, self); break;
756 1056
757 case REQ_SLEEP: 1057 case REQ_BUSY:
758 { 1058 {
759 struct timeval tv; 1059 struct timeval tv;
760 1060
761 tv.tv_sec = req->fd; 1061 tv.tv_sec = req->fd;
762 tv.tv_usec = req->fd2; 1062 tv.tv_usec = req->fd2;
763 1063
764 req->result = select (0, 0, 0, 0, &tv); 1064 req->result = select (0, 0, 0, 0, &tv);
765 } 1065 }
766 1066
1067 case REQ_GROUP:
1068 case REQ_NOP:
1069 break;
1070
767 case REQ_QUIT: 1071 case REQ_QUIT:
1072 LOCK (wrklock);
1073 worker_free (self);
1074 --started;
1075 UNLOCK (wrklock);
768 break; 1076 return 0;
769 1077
770 default: 1078 default:
771 req->result = ENOSYS; 1079 req->result = ENOSYS;
772 break; 1080 break;
773 } 1081 }
774 1082
775 req->errorno = errno; 1083 req->errorno = errno;
776 1084
777 pthread_mutex_lock (&reslock); 1085 LOCK (reslock);
778 1086
779 req->next = 0; 1087 ++npending;
780 1088
781 if (rese) 1089 if (!reqq_push (&res_queue, req))
782 {
783 rese->next = req;
784 rese = req;
785 }
786 else
787 {
788 rese = ress = req;
789
790 /* write a dummy byte to the pipe so fh becomes ready */ 1090 /* write a dummy byte to the pipe so fh becomes ready */
791 write (respipe [1], &respipe, 1); 1091 write (respipe [1], &respipe, 1);
792 }
793 1092
794 pthread_mutex_unlock (&reslock); 1093 self->req = 0;
795 } 1094 worker_clear (self);
796 while (type != REQ_QUIT);
797 1095
798 return 0; 1096 UNLOCK (reslock);
1097 }
799} 1098}
800 1099
801/*****************************************************************************/ 1100/*****************************************************************************/
802 1101
803static void atfork_prepare (void) 1102static void atfork_prepare (void)
804{ 1103{
805 pthread_mutex_lock (&reqlock); 1104 LOCK (wrklock);
806 pthread_mutex_lock (&reslock); 1105 LOCK (reqlock);
1106 LOCK (reslock);
807#if !HAVE_PREADWRITE 1107#if !HAVE_PREADWRITE
808 pthread_mutex_lock (&preadwritelock); 1108 LOCK (preadwritelock);
809#endif 1109#endif
810#if !HAVE_READDIR_R 1110#if !HAVE_READDIR_R
811 pthread_mutex_lock (&readdirlock); 1111 LOCK (readdirlock);
812#endif 1112#endif
813} 1113}
814 1114
815static void atfork_parent (void) 1115static void atfork_parent (void)
816{ 1116{
817#if !HAVE_READDIR_R 1117#if !HAVE_READDIR_R
818 pthread_mutex_unlock (&readdirlock); 1118 UNLOCK (readdirlock);
819#endif 1119#endif
820#if !HAVE_PREADWRITE 1120#if !HAVE_PREADWRITE
821 pthread_mutex_unlock (&preadwritelock); 1121 UNLOCK (preadwritelock);
822#endif 1122#endif
823 pthread_mutex_unlock (&reslock); 1123 UNLOCK (reslock);
824 pthread_mutex_unlock (&reqlock); 1124 UNLOCK (reqlock);
1125 UNLOCK (wrklock);
825} 1126}
826 1127
827static void atfork_child (void) 1128static void atfork_child (void)
828{ 1129{
829 aio_req prv; 1130 aio_req prv;
830 1131
1132 while (prv = reqq_shift (&req_queue))
1133 req_free (prv);
1134
1135 while (prv = reqq_shift (&res_queue))
1136 req_free (prv);
1137
1138 while (wrk_first.next != &wrk_first)
1139 {
1140 worker *wrk = wrk_first.next;
1141
1142 if (wrk->req)
1143 req_free (wrk->req);
1144
1145 worker_clear (wrk);
1146 worker_free (wrk);
1147 }
1148
831 started = 0; 1149 started = 0;
832 1150 nreqs = 0;
833 while (reqs)
834 {
835 prv = reqs;
836 reqs = prv->next;
837 req_free (prv);
838 }
839
840 reqs = reqe = 0;
841
842 while (ress)
843 {
844 prv = ress;
845 ress = prv->next;
846 req_free (prv);
847 }
848
849 ress = rese = 0;
850 1151
851 close (respipe [0]); 1152 close (respipe [0]);
852 close (respipe [1]); 1153 close (respipe [1]);
853 create_pipe (); 1154 create_pipe ();
854 1155
855 atfork_parent (); 1156 atfork_parent ();
856} 1157}
857 1158
858#define dREQ \ 1159#define dREQ \
859 aio_req req; \ 1160 aio_req req; \
1161 int req_pri = next_pri; \
1162 next_pri = DEFAULT_PRI + PRI_BIAS; \
860 \ 1163 \
861 if (SvOK (callback) && !SvROK (callback)) \ 1164 if (SvOK (callback) && !SvROK (callback)) \
862 croak ("callback must be undef or of reference type"); \ 1165 croak ("callback must be undef or of reference type"); \
863 \ 1166 \
864 Newz (0, req, 1, aio_cb); \ 1167 Newz (0, req, 1, aio_cb); \
865 if (!req) \ 1168 if (!req) \
866 croak ("out of memory during aio_req allocation"); \ 1169 croak ("out of memory during aio_req allocation"); \
867 \ 1170 \
868 req->callback = newSVsv (callback) 1171 req->callback = newSVsv (callback); \
1172 req->pri = req_pri
869 1173
870#define REQ_SEND \ 1174#define REQ_SEND \
871 req_send (req); \ 1175 req_send (req); \
872 \ 1176 \
873 if (GIMME_V != G_VOID) \ 1177 if (GIMME_V != G_VOID) \
878PROTOTYPES: ENABLE 1182PROTOTYPES: ENABLE
879 1183
880BOOT: 1184BOOT:
881{ 1185{
882 HV *stash = gv_stashpv ("IO::AIO", 1); 1186 HV *stash = gv_stashpv ("IO::AIO", 1);
1187
883 newCONSTSUB (stash, "EXDEV", newSViv (EXDEV)); 1188 newCONSTSUB (stash, "EXDEV", newSViv (EXDEV));
884 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY)); 1189 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY));
885 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY)); 1190 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY));
1191 newCONSTSUB (stash, "O_CREAT", newSViv (O_CREAT));
1192 newCONSTSUB (stash, "O_TRUNC", newSViv (O_TRUNC));
1193 newCONSTSUB (stash, "S_IFIFO", newSViv (S_IFIFO));
886 1194
887 create_pipe (); 1195 create_pipe ();
888 pthread_atfork (atfork_prepare, atfork_parent, atfork_child); 1196 pthread_atfork (atfork_prepare, atfork_parent, atfork_child);
889}
890 1197
1198 start_thread ();
1199}
1200
891void 1201void
892min_parallel (nthreads) 1202min_parallel (int nthreads)
893 int nthreads
894 PROTOTYPE: $ 1203 PROTOTYPE: $
895 1204
896void 1205void
897max_parallel (nthreads) 1206max_parallel (int nthreads)
898 int nthreads
899 PROTOTYPE: $ 1207 PROTOTYPE: $
900 1208
901int 1209int
902max_outstanding (nreqs) 1210max_outstanding (int maxreqs)
903 int nreqs 1211 PROTOTYPE: $
904 PROTOTYPE: $
905 CODE: 1212 CODE:
906 RETVAL = max_outstanding; 1213 RETVAL = max_outstanding;
907 max_outstanding = nreqs; 1214 max_outstanding = maxreqs;
1215 OUTPUT:
1216 RETVAL
908 1217
909void 1218void
910aio_open (pathname,flags,mode,callback=&PL_sv_undef) 1219aio_open (pathname,flags,mode,callback=&PL_sv_undef)
911 SV * pathname 1220 SV * pathname
912 int flags 1221 int flags
1127 1436
1128 REQ_SEND; 1437 REQ_SEND;
1129} 1438}
1130 1439
1131void 1440void
1441aio_mknod (pathname,mode,dev,callback=&PL_sv_undef)
1442 SV * pathname
1443 SV * callback
1444 UV mode
1445 UV dev
1446 PPCODE:
1447{
1448 dREQ;
1449
1450 req->type = REQ_MKNOD;
1451 req->data = newSVsv (pathname);
1452 req->dataptr = SvPVbyte_nolen (req->data);
1453 req->mode = (mode_t)mode;
1454 req->offset = dev;
1455
1456 REQ_SEND;
1457}
1458
1459void
1132aio_sleep (delay,callback=&PL_sv_undef) 1460aio_busy (delay,callback=&PL_sv_undef)
1133 double delay 1461 double delay
1134 SV * callback 1462 SV * callback
1135 PPCODE: 1463 PPCODE:
1136{ 1464{
1137 dREQ; 1465 dREQ;
1138 1466
1139 req->type = REQ_SLEEP; 1467 req->type = REQ_BUSY;
1140 req->fd = delay < 0. ? 0 : delay; 1468 req->fd = delay < 0. ? 0 : delay;
1141 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd); 1469 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd);
1142 1470
1143 REQ_SEND; 1471 REQ_SEND;
1144} 1472}
1148 SV * callback 1476 SV * callback
1149 PROTOTYPE: ;$ 1477 PROTOTYPE: ;$
1150 PPCODE: 1478 PPCODE:
1151{ 1479{
1152 dREQ; 1480 dREQ;
1481
1153 req->type = REQ_GROUP; 1482 req->type = REQ_GROUP;
1154 req->grp_next = req;
1155 req->grp_prev = req;
1156
1157 req_send (req); 1483 req_send (req);
1484
1158 XPUSHs (req_sv (req, AIO_GRP_KLASS)); 1485 XPUSHs (req_sv (req, AIO_GRP_KLASS));
1159} 1486}
1487
1488void
1489aio_nop (callback=&PL_sv_undef)
1490 SV * callback
1491 PPCODE:
1492{
1493 dREQ;
1494
1495 req->type = REQ_NOP;
1496
1497 REQ_SEND;
1498}
1499
1500int
1501aioreq_pri (int pri = 0)
1502 PROTOTYPE: ;$
1503 CODE:
1504 RETVAL = next_pri - PRI_BIAS;
1505 if (items > 0)
1506 {
1507 if (pri < PRI_MIN) pri = PRI_MIN;
1508 if (pri > PRI_MAX) pri = PRI_MAX;
1509 next_pri = pri + PRI_BIAS;
1510 }
1511 OUTPUT:
1512 RETVAL
1513
1514void
1515aioreq_nice (int nice = 0)
1516 CODE:
1517 nice = next_pri - nice;
1518 if (nice < PRI_MIN) nice = PRI_MIN;
1519 if (nice > PRI_MAX) nice = PRI_MAX;
1520 next_pri = nice + PRI_BIAS;
1160 1521
1161void 1522void
1162flush () 1523flush ()
1163 PROTOTYPE: 1524 PROTOTYPE:
1164 CODE: 1525 CODE:
1165 while (nreqs) 1526 while (nreqs)
1166 { 1527 {
1167 poll_wait (); 1528 poll_wait ();
1168 poll_cb (); 1529 poll_cb (0);
1169 } 1530 }
1170 1531
1171void 1532void
1172poll() 1533poll()
1173 PROTOTYPE: 1534 PROTOTYPE:
1174 CODE: 1535 CODE:
1175 if (nreqs) 1536 if (nreqs)
1176 { 1537 {
1177 poll_wait (); 1538 poll_wait ();
1178 poll_cb (); 1539 poll_cb (0);
1179 } 1540 }
1180 1541
1181int 1542int
1182poll_fileno() 1543poll_fileno()
1183 PROTOTYPE: 1544 PROTOTYPE:
1188 1549
1189int 1550int
1190poll_cb(...) 1551poll_cb(...)
1191 PROTOTYPE: 1552 PROTOTYPE:
1192 CODE: 1553 CODE:
1193 RETVAL = poll_cb (); 1554 RETVAL = poll_cb (0);
1555 OUTPUT:
1556 RETVAL
1557
1558int
1559poll_some(int max = 0)
1560 PROTOTYPE: $
1561 CODE:
1562 RETVAL = poll_cb (max);
1194 OUTPUT: 1563 OUTPUT:
1195 RETVAL 1564 RETVAL
1196 1565
1197void 1566void
1198poll_wait() 1567poll_wait()
1207 CODE: 1576 CODE:
1208 RETVAL = nreqs; 1577 RETVAL = nreqs;
1209 OUTPUT: 1578 OUTPUT:
1210 RETVAL 1579 RETVAL
1211 1580
1212MODULE = IO::AIO PACKAGE = IO::AIO::REQ 1581int
1213 1582nready()
1214void
1215cancel (aio_req_ornot req)
1216 PROTOTYPE: 1583 PROTOTYPE:
1217 CODE: 1584 CODE:
1585 RETVAL = get_nready ();
1586 OUTPUT:
1587 RETVAL
1588
1589int
1590npending()
1591 PROTOTYPE:
1592 CODE:
1593 RETVAL = get_npending ();
1594 OUTPUT:
1595 RETVAL
1596
1597PROTOTYPES: DISABLE
1598
1599MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1600
1601void
1602cancel (aio_req_ornot req)
1603 CODE:
1218 req_cancel (req); 1604 req_cancel (req);
1219 1605
1606void
1607cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1608 CODE:
1609 SvREFCNT_dec (req->callback);
1610 req->callback = newSVsv (callback);
1611
1220MODULE = IO::AIO PACKAGE = IO::AIO::GRP 1612MODULE = IO::AIO PACKAGE = IO::AIO::GRP
1221 1613
1222void 1614void
1223add (aio_req grp, ...) 1615add (aio_req grp, ...)
1224 PROTOTYPE: $;@
1225 PPCODE: 1616 PPCODE:
1226{ 1617{
1227 int i; 1618 int i;
1619 aio_req req;
1620
1621 if (grp->fd == 2)
1622 croak ("cannot add requests to IO::AIO::GRP after the group finished");
1228 1623
1229 for (i = 1; i < items; ++i ) 1624 for (i = 1; i < items; ++i )
1230 { 1625 {
1231 if (GIMME_V != G_VOID) 1626 if (GIMME_V != G_VOID)
1232 XPUSHs (sv_2mortal (newSVsv (ST (i)))); 1627 XPUSHs (sv_2mortal (newSVsv (ST (i))));
1233 1628
1234 aio_req req = SvAIO_REQ (ST (i)); 1629 req = SvAIO_REQ (ST (i));
1235 printf ("req = %p\n", req);//D
1236 1630
1237 if (req) 1631 if (req)
1238 { 1632 {
1239 req->grp_prev = grp; 1633 ++grp->length;
1240 req->grp_next = grp->grp_next;
1241 grp->grp_next->grp_prev = req;
1242 grp->grp_next = req;
1243
1244 req->grp = grp; 1634 req->grp = grp;
1635
1636 req->grp_prev = 0;
1637 req->grp_next = grp->grp_first;
1638
1639 if (grp->grp_first)
1640 grp->grp_first->grp_prev = req;
1641
1642 grp->grp_first = req;
1245 } 1643 }
1246 } 1644 }
1247} 1645}
1248 1646
1647void
1648cancel_subs (aio_req_ornot req)
1649 CODE:
1650 req_cancel_subs (req);
1651
1652void
1653result (aio_req grp, ...)
1654 CODE:
1655{
1656 int i;
1657 AV *av;
1658
1659 grp->errorno = errno;
1660
1661 av = newAV ();
1662
1663 for (i = 1; i < items; ++i )
1664 av_push (av, newSVsv (ST (i)));
1665
1666 SvREFCNT_dec (grp->data);
1667 grp->data = (SV *)av;
1668}
1669
1670void
1671errno (aio_req grp, int errorno = errno)
1672 CODE:
1673 grp->errorno = errorno;
1674
1675void
1676limit (aio_req grp, int limit)
1677 CODE:
1678 grp->fd2 = limit;
1679 aio_grp_feed (grp);
1680
1681void
1682feed (aio_req grp, SV *callback=&PL_sv_undef)
1683 CODE:
1684{
1685 SvREFCNT_dec (grp->fh2);
1686 grp->fh2 = newSVsv (callback);
1687
1688 if (grp->fd2 <= 0)
1689 grp->fd2 = 2;
1690
1691 aio_grp_feed (grp);
1692}
1693

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines