ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
(Generate patch)

Comparing IO-AIO/AIO.xs (file contents):
Revision 1.43 by root, Sat Oct 21 23:06:04 2006 UTC vs.
Revision 1.80 by root, Fri Oct 27 19:17:23 2006 UTC

1/* solaris */
2#define _POSIX_PTHREAD_SEMANTICS 1
3
4#if __linux && !defined(_GNU_SOURCE)
5# define _GNU_SOURCE
6#endif
7
8/* just in case */
1#define _REENTRANT 1 9#define _REENTRANT 1
10
2#include <errno.h> 11#include <errno.h>
3 12
4#include "EXTERN.h" 13#include "EXTERN.h"
5#include "perl.h" 14#include "perl.h"
6#include "XSUB.h" 15#include "XSUB.h"
9 18
10#include <pthread.h> 19#include <pthread.h>
11 20
12#include <stddef.h> 21#include <stddef.h>
13#include <errno.h> 22#include <errno.h>
23#include <sys/time.h>
24#include <sys/select.h>
14#include <sys/types.h> 25#include <sys/types.h>
15#include <sys/stat.h> 26#include <sys/stat.h>
16#include <limits.h> 27#include <limits.h>
17#include <unistd.h> 28#include <unistd.h>
18#include <fcntl.h> 29#include <fcntl.h>
37/* used for struct dirent, AIX doesn't provide it */ 48/* used for struct dirent, AIX doesn't provide it */
38#ifndef NAME_MAX 49#ifndef NAME_MAX
39# define NAME_MAX 4096 50# define NAME_MAX 4096
40#endif 51#endif
41 52
53#ifndef PTHREAD_STACK_MIN
54/* care for broken platforms, e.g. windows */
55# define PTHREAD_STACK_MIN 16384
56#endif
57
42#if __ia64 58#if __ia64
43# define STACKSIZE 65536 59# define STACKSIZE 65536
60#elif __i386 || __x86_64 /* 16k is unreasonably high :( */
61# define STACKSIZE PTHREAD_STACK_MIN
44#else 62#else
45# define STACKSIZE 8192 63# define STACKSIZE 16384
64#endif
65
66/* wether word reads are potentially non-atomic.
67 * this is conservatice, likely most arches this runs
68 * on have atomic word read/writes.
69 */
70#ifndef WORDREAD_UNSAFE
71# if __i386 || __x86_64
72# define WORDREAD_UNSAFE 0
73# else
74# define WORDREAD_UNSAFE 1
46#endif 75# endif
76#endif
77
78/* buffer size for various temporary buffers */
79#define AIO_BUFSIZE 65536
80
81#define dBUF \
82 char *aio_buf; \
83 LOCK (wrklock); \
84 self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \
85 UNLOCK (wrklock); \
86 if (!aio_buf) \
87 return -1;
47 88
48enum { 89enum {
49 REQ_QUIT, 90 REQ_QUIT,
50 REQ_OPEN, REQ_CLOSE, 91 REQ_OPEN, REQ_CLOSE,
51 REQ_READ, REQ_WRITE, REQ_READAHEAD, 92 REQ_READ, REQ_WRITE, REQ_READAHEAD,
53 REQ_STAT, REQ_LSTAT, REQ_FSTAT, 94 REQ_STAT, REQ_LSTAT, REQ_FSTAT,
54 REQ_FSYNC, REQ_FDATASYNC, 95 REQ_FSYNC, REQ_FDATASYNC,
55 REQ_UNLINK, REQ_RMDIR, REQ_RENAME, 96 REQ_UNLINK, REQ_RMDIR, REQ_RENAME,
56 REQ_READDIR, 97 REQ_READDIR,
57 REQ_LINK, REQ_SYMLINK, 98 REQ_LINK, REQ_SYMLINK,
99 REQ_GROUP, REQ_NOP,
100 REQ_BUSY,
58}; 101};
59 102
60#define AIO_CB_KLASS "IO::AIO::CB" 103#define AIO_REQ_KLASS "IO::AIO::REQ"
104#define AIO_GRP_KLASS "IO::AIO::GRP"
61 105
62typedef struct aio_cb 106typedef struct aio_cb
63{ 107{
64 struct aio_cb *grp_prev, *grp_next;
65 struct aio_grp *grp;
66
67 struct aio_cb *volatile next; 108 struct aio_cb *volatile next;
68
69 SV *self; /* the perl counterpart of this request, if any */
70 109
71 SV *data, *callback; 110 SV *data, *callback;
72 SV *fh, *fh2; 111 SV *fh, *fh2;
73 void *dataptr, *data2ptr; 112 void *dataptr, *data2ptr;
74 Stat_t *statdata; 113 Stat_t *statdata;
75 off_t offset; 114 off_t offset;
76 size_t length; 115 size_t length;
77 ssize_t result; 116 ssize_t result;
78 117
118 STRLEN dataoffset;
79 int type; 119 int type;
80 int fd, fd2; 120 int fd, fd2;
81 int errorno; 121 int errorno;
82 STRLEN dataoffset;
83 mode_t mode; /* open */ 122 mode_t mode; /* open */
123
84 unsigned char cancelled; 124 unsigned char flags;
125 unsigned char pri;
126
127 SV *self; /* the perl counterpart of this request, if any */
128 struct aio_cb *grp, *grp_prev, *grp_next, *grp_first;
85} aio_cb; 129} aio_cb;
130
131enum {
132 FLAG_CANCELLED = 0x01,
133};
86 134
87typedef aio_cb *aio_req; 135typedef aio_cb *aio_req;
88typedef aio_cb *aio_req_ornot; 136typedef aio_cb *aio_req_ornot;
89 137
138enum {
139 PRI_MIN = -4,
140 PRI_MAX = 4,
141
142 DEFAULT_PRI = 0,
143 PRI_BIAS = -PRI_MIN,
144 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
145};
146
147static int next_pri = DEFAULT_PRI + PRI_BIAS;
148
90static int started, wanted; 149static unsigned int started, wanted;
91static volatile int nreqs; 150
92static int max_outstanding = 1<<30; 151#if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)
152# define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
153#else
154# define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
155#endif
156
157#define LOCK(mutex) pthread_mutex_lock (&(mutex))
158#define UNLOCK(mutex) pthread_mutex_unlock (&(mutex))
159
160/* worker threads management */
161static pthread_mutex_t wrklock = AIO_MUTEX_INIT;
162
163typedef struct worker {
164 /* locked by wrklock */
165 struct worker *prev, *next;
166
167 pthread_t tid;
168
169 /* locked by reslock, reqlock or wrklock */
170 aio_req req; /* currently processed request */
171 void *dbuf;
172 DIR *dirp;
173} worker;
174
175static worker wrk_first = { &wrk_first, &wrk_first, 0 };
176
177static void worker_clear (worker *wrk)
178{
179 if (wrk->dirp)
180 {
181 closedir (wrk->dirp);
182 wrk->dirp = 0;
183 }
184
185 if (wrk->dbuf)
186 {
187 free (wrk->dbuf);
188 wrk->dbuf = 0;
189 }
190}
191
192static void worker_free (worker *wrk)
193{
194 wrk->next->prev = wrk->prev;
195 wrk->prev->next = wrk->next;
196
197 free (wrk);
198}
199
200static volatile unsigned int nreqs, nready, npending;
201static volatile unsigned int max_outstanding = 0xffffffff;
93static int respipe [2]; 202static int respipe [2];
94 203
95static pthread_mutex_t reslock = PTHREAD_MUTEX_INITIALIZER; 204static pthread_mutex_t reslock = AIO_MUTEX_INIT;
96static pthread_mutex_t reqlock = PTHREAD_MUTEX_INITIALIZER; 205static pthread_mutex_t reqlock = AIO_MUTEX_INIT;
97static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER; 206static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER;
98 207
99static volatile aio_req reqs, reqe; /* queue start, queue end */ 208#if WORDREAD_UNSAFE
100static volatile aio_req ress, rese; /* queue start, queue end */
101 209
210static unsigned int get_nready ()
211{
212 unsigned int retval;
213
214 LOCK (reqlock);
215 retval = nready;
216 UNLOCK (reqlock);
217
218 return retval;
219}
220
221static unsigned int get_npending ()
222{
223 unsigned int retval;
224
225 LOCK (reslock);
226 retval = npending;
227 UNLOCK (reslock);
228
229 return retval;
230}
231
232#else
233
234# define get_nready() nready
235# define get_npending() npending
236
237#endif
238
239/*
240 * a somewhat faster data structure might be nice, but
241 * with 8 priorities this actually needs <20 insns
242 * per shift, the most expensive operation.
243 */
102typedef struct aio_grp 244typedef struct {
103{ 245 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
104 struct aio_cb *first, *last; 246 int size;
105 SV *callback; 247} reqq;
106 int busycount;
107} aio_grp;
108 248
109static void aio_grp_begin (aio_grp *grp) 249static reqq req_queue;
110{ 250static reqq res_queue;
111 ++grp->busycount;
112}
113 251
114static void aio_grp_end (aio_grp *grp) 252int reqq_push (reqq *q, aio_req req)
115{ 253{
116 --grp->busycount; 254 int pri = req->pri;
255 req->next = 0;
117 256
118 if (grp->busycount) 257 if (q->qe[pri])
258 {
259 q->qe[pri]->next = req;
260 q->qe[pri] = req;
261 }
262 else
263 q->qe[pri] = q->qs[pri] = req;
264
265 return q->size++;
266}
267
268aio_req reqq_shift (reqq *q)
269{
270 int pri;
271
272 if (!q->size)
119 return; 273 return 0;
120 274
121 SvREFCNT_dec (grp->callback); 275 --q->size;
122 grp->callback = 0;
123}
124 276
125static aio_grp *aio_grp_new () 277 for (pri = NUM_PRI; pri--; )
126{ 278 {
127 aio_grp *grp; 279 aio_req req = q->qs[pri];
128 280
129 Newz (0, grp, 1, aio_grp); 281 if (req)
130 aio_grp_begin (grp); 282 {
283 if (!(q->qs[pri] = req->next))
284 q->qe[pri] = 0;
131 285
132 return grp; 286 return req;
287 }
288 }
289
290 abort ();
133} 291}
292
293static int poll_cb (int max);
294static void req_invoke (aio_req req);
295static void req_free (aio_req req);
296static void req_cancel (aio_req req);
134 297
135/* must be called at most once */ 298/* must be called at most once */
136static SV *req_sv (aio_req req) 299static SV *req_sv (aio_req req, const char *klass)
137{ 300{
301 if (!req->self)
302 {
138 req->self = (SV *)newHV (); 303 req->self = (SV *)newHV ();
139 sv_magic (req->self, 0, PERL_MAGIC_ext, (char *)req, 0); 304 sv_magic (req->self, 0, PERL_MAGIC_ext, (char *)req, 0);
305 }
140 306
141 return sv_bless (newRV_noinc (req->self), gv_stashpv (AIO_CB_KLASS, 1)); 307 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1)));
142} 308}
143 309
144static aio_req SvAIO_REQ (SV *sv) 310static aio_req SvAIO_REQ (SV *sv)
145{ 311{
312 MAGIC *mg;
313
146 if (!sv_derived_from (sv, AIO_CB_KLASS) || !SvROK (sv)) 314 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv))
147 croak ("object of class " AIO_CB_KLASS " expected"); 315 croak ("object of class " AIO_REQ_KLASS " expected");
148 316
149 MAGIC *mg = mg_find (SvRV (sv), PERL_MAGIC_ext); 317 mg = mg_find (SvRV (sv), PERL_MAGIC_ext);
150 318
151 return mg ? (aio_req)mg->mg_ptr : 0; 319 return mg ? (aio_req)mg->mg_ptr : 0;
320}
321
322static void aio_grp_feed (aio_req grp)
323{
324 while (grp->length < grp->fd2 && !(grp->flags & FLAG_CANCELLED))
325 {
326 int old_len = grp->length;
327
328 if (grp->fh2 && SvOK (grp->fh2))
329 {
330 dSP;
331
332 ENTER;
333 SAVETMPS;
334 PUSHMARK (SP);
335 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
336 PUTBACK;
337 call_sv (grp->fh2, G_VOID | G_EVAL | G_KEEPERR);
338 SPAGAIN;
339 FREETMPS;
340 LEAVE;
341 }
342
343 /* stop if no progress has been made */
344 if (old_len == grp->length)
345 {
346 SvREFCNT_dec (grp->fh2);
347 grp->fh2 = 0;
348 break;
349 }
350 }
351}
352
353static void aio_grp_dec (aio_req grp)
354{
355 --grp->length;
356
357 /* call feeder, if applicable */
358 aio_grp_feed (grp);
359
360 /* finish, if done */
361 if (!grp->length && grp->fd)
362 {
363 req_invoke (grp);
364 req_free (grp);
365 }
366}
367
368static void req_invoke (aio_req req)
369{
370 dSP;
371
372 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
373 {
374 ENTER;
375 SAVETMPS;
376 PUSHMARK (SP);
377 EXTEND (SP, 1);
378
379 switch (req->type)
380 {
381 case REQ_READDIR:
382 {
383 SV *rv = &PL_sv_undef;
384
385 if (req->result >= 0)
386 {
387 int i;
388 char *buf = req->data2ptr;
389 AV *av = newAV ();
390
391 av_extend (av, req->result - 1);
392
393 for (i = 0; i < req->result; ++i)
394 {
395 SV *sv = newSVpv (buf, 0);
396
397 av_store (av, i, sv);
398 buf += SvCUR (sv) + 1;
399 }
400
401 rv = sv_2mortal (newRV_noinc ((SV *)av));
402 }
403
404 PUSHs (rv);
405 }
406 break;
407
408 case REQ_OPEN:
409 {
410 /* convert fd to fh */
411 SV *fh;
412
413 PUSHs (sv_2mortal (newSViv (req->result)));
414 PUTBACK;
415 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
416 SPAGAIN;
417
418 fh = SvREFCNT_inc (POPs);
419
420 PUSHMARK (SP);
421 XPUSHs (sv_2mortal (fh));
422 }
423 break;
424
425 case REQ_GROUP:
426 req->fd = 2; /* mark group as finished */
427
428 if (req->data)
429 {
430 int i;
431 AV *av = (AV *)req->data;
432
433 EXTEND (SP, AvFILL (av) + 1);
434 for (i = 0; i <= AvFILL (av); ++i)
435 PUSHs (*av_fetch (av, i, 0));
436 }
437 break;
438
439 case REQ_NOP:
440 case REQ_BUSY:
441 break;
442
443 default:
444 PUSHs (sv_2mortal (newSViv (req->result)));
445 break;
446 }
447
448 errno = req->errorno;
449
450 PUTBACK;
451 call_sv (req->callback, G_VOID | G_EVAL);
452 SPAGAIN;
453
454 FREETMPS;
455 LEAVE;
456 }
457
458 if (req->grp)
459 {
460 aio_req grp = req->grp;
461
462 /* unlink request */
463 if (req->grp_next) req->grp_next->grp_prev = req->grp_prev;
464 if (req->grp_prev) req->grp_prev->grp_next = req->grp_next;
465
466 if (grp->grp_first == req)
467 grp->grp_first = req->grp_next;
468
469 aio_grp_dec (grp);
470 }
471
472 if (SvTRUE (ERRSV))
473 {
474 req_free (req);
475 croak (0);
476 }
152} 477}
153 478
154static void req_free (aio_req req) 479static void req_free (aio_req req)
155{ 480{
156 if (req->self) 481 if (req->self)
157 { 482 {
158 sv_unmagic (req->self, PERL_MAGIC_ext); 483 sv_unmagic (req->self, PERL_MAGIC_ext);
159 SvREFCNT_dec (req->self); 484 SvREFCNT_dec (req->self);
160 } 485 }
161 486
162 if (req->data)
163 SvREFCNT_dec (req->data); 487 SvREFCNT_dec (req->data);
164
165 if (req->fh)
166 SvREFCNT_dec (req->fh); 488 SvREFCNT_dec (req->fh);
167
168 if (req->fh2)
169 SvREFCNT_dec (req->fh2); 489 SvREFCNT_dec (req->fh2);
170
171 if (req->statdata)
172 Safefree (req->statdata);
173
174 if (req->callback)
175 SvREFCNT_dec (req->callback); 490 SvREFCNT_dec (req->callback);
491 Safefree (req->statdata);
176 492
177 if (req->type == REQ_READDIR && req->result >= 0) 493 if (req->type == REQ_READDIR)
178 free (req->data2ptr); 494 free (req->data2ptr);
179 495
180 Safefree (req); 496 Safefree (req);
181} 497}
182 498
183static void 499static void req_cancel_subs (aio_req grp)
184poll_wait ()
185{ 500{
186 if (nreqs && !ress) 501 aio_req sub;
502
503 if (grp->type != REQ_GROUP)
504 return;
505
506 SvREFCNT_dec (grp->fh2);
507 grp->fh2 = 0;
508
509 for (sub = grp->grp_first; sub; sub = sub->grp_next)
510 req_cancel (sub);
511}
512
513static void req_cancel (aio_req req)
514{
515 req->flags |= FLAG_CANCELLED;
516
517 req_cancel_subs (req);
518}
519
520static void *aio_proc(void *arg);
521
522static void start_thread (void)
523{
524 sigset_t fullsigset, oldsigset;
525 pthread_attr_t attr;
526
527 worker *wrk = calloc (1, sizeof (worker));
528
529 if (!wrk)
530 croak ("unable to allocate worker thread data");
531
532 pthread_attr_init (&attr);
533 pthread_attr_setstacksize (&attr, STACKSIZE);
534 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
535#ifdef PTHREAD_SCOPE_PROCESS
536 pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS);
537#endif
538
539 sigfillset (&fullsigset);
540
541 LOCK (wrklock);
542 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
543
544 if (pthread_create (&wrk->tid, &attr, aio_proc, (void *)wrk) == 0)
545 {
546 wrk->prev = &wrk_first;
547 wrk->next = wrk_first.next;
548 wrk_first.next->prev = wrk;
549 wrk_first.next = wrk;
550 ++started;
187 { 551 }
552 else
553 free (wrk);
554
555 sigprocmask (SIG_SETMASK, &oldsigset, 0);
556 UNLOCK (wrklock);
557}
558
559static void maybe_start_thread ()
560{
561#if 0
562 static struct timeval last;
563 struct timeval diff, now;
564#endif
565
566 if (started >= wanted)
567 return;
568
569 if (nready <= nreqs - get_nready () - get_npending ())
570 return;
571
572#if 0
573 gettimeofday (&now, 0);
574
575 diff.tv_sec = now.tv_sec - last.tv_sec;
576 diff.tv_usec = now.tv_usec - last.tv_usec;
577
578 if (diff.tv_usec < 0)
579 {
580 --diff.tv_sec;
581 diff.tv_usec += 1000000;
582 }
583
584 if (!diff.tv_sec && diff.tv_usec < 10000)
585 return;
586
587 last = now;
588#endif
589
590 start_thread ();
591}
592
593static void req_send (aio_req req)
594{
595 ++nreqs;
596
597 LOCK (reqlock);
598 ++nready;
599 reqq_push (&req_queue, req);
600 pthread_cond_signal (&reqwait);
601 UNLOCK (reqlock);
602
603 maybe_start_thread ();
604}
605
606static void end_thread (void)
607{
608 aio_req req;
609
610 Newz (0, req, 1, aio_cb);
611
612 req->type = REQ_QUIT;
613 req->pri = PRI_MAX + PRI_BIAS;
614
615 req_send (req);
616
617 LOCK (wrklock);
618 --started;
619 UNLOCK (wrklock);
620}
621
622static void min_parallel (int nthreads)
623{
624 if (wanted < nthreads)
625 wanted = nthreads;
626}
627
628static void max_parallel (int nthreads)
629{
630 if (wanted > nthreads)
631 wanted = nthreads;
632
633 while (started > wanted)
634 end_thread ();
635}
636
637static void poll_wait ()
638{
188 fd_set rfd; 639 fd_set rfd;
640
641 while (nreqs)
642 {
643 int size;
644 if (WORDREAD_UNSAFE) LOCK (reslock);
645 size = res_queue.size;
646 if (WORDREAD_UNSAFE) UNLOCK (reslock);
647
648 if (size)
649 return;
650
651 maybe_start_thread ();
652
189 FD_ZERO(&rfd); 653 FD_ZERO(&rfd);
190 FD_SET(respipe [0], &rfd); 654 FD_SET(respipe [0], &rfd);
191 655
192 select (respipe [0] + 1, &rfd, 0, 0, 0); 656 select (respipe [0] + 1, &rfd, 0, 0, 0);
193 } 657 }
194} 658}
195 659
196static int 660static int poll_cb (int max)
197poll_cb ()
198{ 661{
199 dSP; 662 dSP;
200 int count = 0; 663 int count = 0;
201 int do_croak = 0; 664 int do_croak = 0;
202 aio_req req; 665 aio_req req;
203 666
204 for (;;) 667 for (;;)
205 { 668 {
206 pthread_mutex_lock (&reslock); 669 while (max <= 0 || count < max)
207 req = ress;
208
209 if (req)
210 { 670 {
211 ress = req->next; 671 maybe_start_thread ();
212 672
673 LOCK (reslock);
674 req = reqq_shift (&res_queue);
675
213 if (!ress) 676 if (req)
214 { 677 {
678 --npending;
679
680 if (!res_queue.size)
681 {
215 /* read any signals sent by the worker threads */ 682 /* read any signals sent by the worker threads */
216 char buf [32]; 683 char buf [32];
217 while (read (respipe [0], buf, 32) == 32) 684 while (read (respipe [0], buf, 32) == 32)
685 ;
218 ; 686 }
219
220 rese = 0;
221 } 687 }
222 }
223 688
224 pthread_mutex_unlock (&reslock); 689 UNLOCK (reslock);
225 690
226 if (!req) 691 if (!req)
227 break; 692 break;
228 693
229 nreqs--; 694 --nreqs;
230 695
231 if (req->type == REQ_QUIT)
232 started--;
233 else
234 {
235 int errorno = errno;
236 errno = req->errorno;
237
238 if (req->type == REQ_READ) 696 if (req->type == REQ_GROUP && req->length)
239 SvCUR_set (req->data, req->dataoffset + (req->result > 0 ? req->result : 0));
240
241 if (req->data2ptr && (req->type == REQ_READ || req->type == REQ_WRITE))
242 SvREADONLY_off (req->data);
243
244 if (req->statdata)
245 { 697 {
246 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT; 698 req->fd = 1; /* mark request as delayed */
247 PL_laststatval = req->result; 699 continue;
248 PL_statcache = *(req->statdata);
249 }
250
251 ENTER;
252 PUSHMARK (SP);
253
254 if (req->type == REQ_READDIR)
255 {
256 SV *rv = &PL_sv_undef;
257
258 if (req->result >= 0)
259 {
260 char *buf = req->data2ptr;
261 AV *av = newAV ();
262
263 while (req->result)
264 {
265 SV *sv = newSVpv (buf, 0);
266
267 av_push (av, sv);
268 buf += SvCUR (sv) + 1;
269 req->result--;
270 }
271
272 rv = sv_2mortal (newRV_noinc ((SV *)av));
273 }
274
275 XPUSHs (rv);
276 } 700 }
277 else 701 else
278 { 702 {
279 XPUSHs (sv_2mortal (newSViv (req->result)));
280
281 if (req->type == REQ_OPEN) 703 if (req->type == REQ_READ)
704 SvCUR_set (req->data, req->dataoffset + (req->result > 0 ? req->result : 0));
705
706 if (req->data2ptr && (req->type == REQ_READ || req->type == REQ_WRITE))
707 SvREADONLY_off (req->data);
708
709 if (req->statdata)
282 { 710 {
283 /* convert fd to fh */ 711 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
284 SV *fh; 712 PL_laststatval = req->result;
285 713 PL_statcache = *(req->statdata);
286 PUTBACK;
287 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
288 SPAGAIN;
289
290 fh = SvREFCNT_inc (POPs);
291
292 PUSHMARK (SP);
293 XPUSHs (sv_2mortal (fh));
294 } 714 }
715
716 req_invoke (req);
717
718 count++;
295 } 719 }
296 720
297 if (SvOK (req->callback) && !req->cancelled)
298 {
299 PUTBACK;
300 call_sv (req->callback, G_VOID | G_EVAL);
301 SPAGAIN;
302
303 if (SvTRUE (ERRSV))
304 {
305 req_free (req); 721 req_free (req);
306 croak (0);
307 }
308 }
309
310 LEAVE;
311
312 errno = errorno;
313 count++;
314 } 722 }
315 723
316 req_free (req); 724 if (nreqs <= max_outstanding)
725 break;
726
727 poll_wait ();
728
729 max = 0;
317 } 730 }
318 731
319 return count; 732 return count;
320}
321
322static void *aio_proc(void *arg);
323
324static void
325start_thread (void)
326{
327 sigset_t fullsigset, oldsigset;
328 pthread_t tid;
329 pthread_attr_t attr;
330
331 pthread_attr_init (&attr);
332 pthread_attr_setstacksize (&attr, STACKSIZE);
333 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
334
335 sigfillset (&fullsigset);
336 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
337
338 if (pthread_create (&tid, &attr, aio_proc, 0) == 0)
339 started++;
340
341 sigprocmask (SIG_SETMASK, &oldsigset, 0);
342}
343
344static void
345req_send (aio_req req)
346{
347 while (started < wanted && nreqs >= started)
348 start_thread ();
349
350 nreqs++;
351
352 pthread_mutex_lock (&reqlock);
353
354 req->next = 0;
355
356 if (reqe)
357 {
358 reqe->next = req;
359 reqe = req;
360 }
361 else
362 reqe = reqs = req;
363
364 pthread_cond_signal (&reqwait);
365 pthread_mutex_unlock (&reqlock);
366
367 if (nreqs > max_outstanding)
368 for (;;)
369 {
370 poll_cb ();
371
372 if (nreqs <= max_outstanding)
373 break;
374
375 poll_wait ();
376 }
377}
378
379static void
380end_thread (void)
381{
382 aio_req req;
383 Newz (0, req, 1, aio_cb);
384 req->type = REQ_QUIT;
385
386 req_send (req);
387}
388
389static void min_parallel (int nthreads)
390{
391 if (wanted < nthreads)
392 wanted = nthreads;
393}
394
395static void max_parallel (int nthreads)
396{
397 int cur = started;
398
399 if (wanted > nthreads)
400 wanted = nthreads;
401
402 while (cur > wanted)
403 {
404 end_thread ();
405 cur--;
406 }
407
408 while (started > wanted)
409 {
410 poll_wait ();
411 poll_cb ();
412 }
413} 733}
414 734
415static void create_pipe () 735static void create_pipe ()
416{ 736{
417 if (pipe (respipe)) 737 if (pipe (respipe))
436 * normal read/write by using a mutex. slows down execution a lot, 756 * normal read/write by using a mutex. slows down execution a lot,
437 * but that's your problem, not mine. 757 * but that's your problem, not mine.
438 */ 758 */
439static pthread_mutex_t preadwritelock = PTHREAD_MUTEX_INITIALIZER; 759static pthread_mutex_t preadwritelock = PTHREAD_MUTEX_INITIALIZER;
440 760
441static ssize_t 761static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
442pread (int fd, void *buf, size_t count, off_t offset)
443{ 762{
444 ssize_t res; 763 ssize_t res;
445 off_t ooffset; 764 off_t ooffset;
446 765
447 pthread_mutex_lock (&preadwritelock); 766 LOCK (preadwritelock);
448 ooffset = lseek (fd, 0, SEEK_CUR); 767 ooffset = lseek (fd, 0, SEEK_CUR);
449 lseek (fd, offset, SEEK_SET); 768 lseek (fd, offset, SEEK_SET);
450 res = read (fd, buf, count); 769 res = read (fd, buf, count);
451 lseek (fd, ooffset, SEEK_SET); 770 lseek (fd, ooffset, SEEK_SET);
452 pthread_mutex_unlock (&preadwritelock); 771 UNLOCK (preadwritelock);
453 772
454 return res; 773 return res;
455} 774}
456 775
457static ssize_t
458pwrite (int fd, void *buf, size_t count, off_t offset) 776static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
459{ 777{
460 ssize_t res; 778 ssize_t res;
461 off_t ooffset; 779 off_t ooffset;
462 780
463 pthread_mutex_lock (&preadwritelock); 781 LOCK (preadwritelock);
464 ooffset = lseek (fd, 0, SEEK_CUR); 782 ooffset = lseek (fd, 0, SEEK_CUR);
465 lseek (fd, offset, SEEK_SET); 783 lseek (fd, offset, SEEK_SET);
466 res = write (fd, buf, count); 784 res = write (fd, buf, count);
467 lseek (fd, offset, SEEK_SET); 785 lseek (fd, offset, SEEK_SET);
468 pthread_mutex_unlock (&preadwritelock); 786 UNLOCK (preadwritelock);
469 787
470 return res; 788 return res;
471} 789}
472#endif 790#endif
473 791
474#if !HAVE_FDATASYNC 792#if !HAVE_FDATASYNC
475# define fdatasync fsync 793# define fdatasync fsync
476#endif 794#endif
477 795
478#if !HAVE_READAHEAD 796#if !HAVE_READAHEAD
479# define readahead aio_readahead 797# define readahead(fd,offset,count) aio_readahead (fd, offset, count, self)
480 798
481static ssize_t 799static ssize_t aio_readahead (int fd, off_t offset, size_t count, worker *self)
482readahead (int fd, off_t offset, size_t count)
483{ 800{
484 char readahead_buf[4096]; 801 dBUF;
485 802
486 while (count > 0) 803 while (count > 0)
487 { 804 {
488 size_t len = count < sizeof (readahead_buf) ? count : sizeof (readahead_buf); 805 size_t len = count < AIO_BUFSIZE ? count : AIO_BUFSIZE;
489 806
490 pread (fd, readahead_buf, len, offset); 807 pread (fd, aio_buf, len, offset);
491 offset += len; 808 offset += len;
492 count -= len; 809 count -= len;
493 } 810 }
494 811
495 errno = 0; 812 errno = 0;
496} 813}
814
497#endif 815#endif
498 816
499#if !HAVE_READDIR_R 817#if !HAVE_READDIR_R
500# define readdir_r aio_readdir_r 818# define readdir_r aio_readdir_r
501 819
502static pthread_mutex_t readdirlock = PTHREAD_MUTEX_INITIALIZER; 820static pthread_mutex_t readdirlock = PTHREAD_MUTEX_INITIALIZER;
503 821
504static int
505readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res) 822static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res)
506{ 823{
507 struct dirent *e; 824 struct dirent *e;
508 int errorno; 825 int errorno;
509 826
510 pthread_mutex_lock (&readdirlock); 827 LOCK (readdirlock);
511 828
512 e = readdir (dirp); 829 e = readdir (dirp);
513 errorno = errno; 830 errorno = errno;
514 831
515 if (e) 832 if (e)
518 strcpy (ent->d_name, e->d_name); 835 strcpy (ent->d_name, e->d_name);
519 } 836 }
520 else 837 else
521 *res = 0; 838 *res = 0;
522 839
523 pthread_mutex_unlock (&readdirlock); 840 UNLOCK (readdirlock);
524 841
525 errno = errorno; 842 errno = errorno;
526 return e ? 0 : -1; 843 return e ? 0 : -1;
527} 844}
528#endif 845#endif
529 846
530/* sendfile always needs emulation */ 847/* sendfile always needs emulation */
531static ssize_t
532sendfile_ (int ofd, int ifd, off_t offset, size_t count) 848static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self)
533{ 849{
534 ssize_t res; 850 ssize_t res;
535 851
536 if (!count) 852 if (!count)
537 return 0; 853 return 0;
548 { 864 {
549 off_t sbytes; 865 off_t sbytes;
550 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0); 866 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0);
551 867
552 if (res < 0 && sbytes) 868 if (res < 0 && sbytes)
553 /* maybe only on EAGAIN only: as usual, the manpage leaves you guessing */ 869 /* maybe only on EAGAIN: as usual, the manpage leaves you guessing */
554 res = sbytes; 870 res = sbytes;
555 } 871 }
556 872
557# elif __hpux 873# elif __hpux
558 res = sendfile (ofd, ifd, offset, count, 0, 0); 874 res = sendfile (ofd, ifd, offset, count, 0, 0);
586#endif 902#endif
587 ) 903 )
588 ) 904 )
589 { 905 {
590 /* emulate sendfile. this is a major pain in the ass */ 906 /* emulate sendfile. this is a major pain in the ass */
591 char buf[4096]; 907 dBUF;
908
592 res = 0; 909 res = 0;
593 910
594 while (count) 911 while (count)
595 { 912 {
596 ssize_t cnt; 913 ssize_t cnt;
597 914
598 cnt = pread (ifd, buf, count > 4096 ? 4096 : count, offset); 915 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
599 916
600 if (cnt <= 0) 917 if (cnt <= 0)
601 { 918 {
602 if (cnt && !res) res = -1; 919 if (cnt && !res) res = -1;
603 break; 920 break;
604 } 921 }
605 922
606 cnt = write (ofd, buf, cnt); 923 cnt = write (ofd, aio_buf, cnt);
607 924
608 if (cnt <= 0) 925 if (cnt <= 0)
609 { 926 {
610 if (cnt && !res) res = -1; 927 if (cnt && !res) res = -1;
611 break; 928 break;
619 936
620 return res; 937 return res;
621} 938}
622 939
623/* read a full directory */ 940/* read a full directory */
624static int 941static void scandir_ (aio_req req, worker *self)
625scandir_ (const char *path, void **namesp)
626{ 942{
627 DIR *dirp = opendir (path); 943 DIR *dirp;
628 union 944 union
629 { 945 {
630 struct dirent d; 946 struct dirent d;
631 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1]; 947 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1];
632 } u; 948 } *u;
633 struct dirent *entp; 949 struct dirent *entp;
634 char *name, *names; 950 char *name, *names;
635 int memlen = 4096; 951 int memlen = 4096;
636 int memofs = 0; 952 int memofs = 0;
637 int res = 0; 953 int res = 0;
638 int errorno; 954 int errorno;
639 955
640 if (!dirp) 956 LOCK (wrklock);
641 return -1; 957 self->dirp = dirp = opendir (req->dataptr);
642 958 self->dbuf = u = malloc (sizeof (*u));
643 names = malloc (memlen); 959 req->data2ptr = names = malloc (memlen);
960 UNLOCK (wrklock);
961
962 if (dirp && u && names)
963 for (;;)
964 {
965 errno = 0;
966 readdir_r (dirp, &u->d, &entp);
967
968 if (!entp)
969 break;
970
971 name = entp->d_name;
972
973 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
974 {
975 int len = strlen (name) + 1;
976
977 res++;
978
979 while (memofs + len > memlen)
980 {
981 memlen *= 2;
982 LOCK (wrklock);
983 req->data2ptr = names = realloc (names, memlen);
984 UNLOCK (wrklock);
985
986 if (!names)
987 break;
988 }
989
990 memcpy (names + memofs, name, len);
991 memofs += len;
992 }
993 }
994
995 if (errno)
996 res = -1;
997
998 req->result = res;
999}
1000
1001/*****************************************************************************/
1002
1003static void *aio_proc (void *thr_arg)
1004{
1005 aio_req req;
1006 worker *self = (worker *)thr_arg;
644 1007
645 for (;;) 1008 for (;;)
646 { 1009 {
647 errno = 0, readdir_r (dirp, &u.d, &entp); 1010 LOCK (reqlock);
648
649 if (!entp)
650 break;
651
652 name = entp->d_name;
653
654 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
655 {
656 int len = strlen (name) + 1;
657
658 res++;
659
660 while (memofs + len > memlen)
661 {
662 memlen *= 2;
663 names = realloc (names, memlen);
664 if (!names)
665 break;
666 }
667
668 memcpy (names + memofs, name, len);
669 memofs += len;
670 }
671 }
672
673 errorno = errno;
674 closedir (dirp);
675
676 if (errorno)
677 {
678 free (names);
679 errno = errorno;
680 res = -1;
681 }
682
683 *namesp = (void *)names;
684 return res;
685}
686
687/*****************************************************************************/
688
689static void *
690aio_proc (void *thr_arg)
691{
692 aio_req req;
693 int type;
694
695 do
696 {
697 pthread_mutex_lock (&reqlock);
698 1011
699 for (;;) 1012 for (;;)
700 { 1013 {
701 req = reqs; 1014 self->req = req = reqq_shift (&req_queue);
702
703 if (reqs)
704 {
705 reqs = reqs->next;
706 if (!reqs) reqe = 0;
707 }
708 1015
709 if (req) 1016 if (req)
710 break; 1017 break;
711 1018
712 pthread_cond_wait (&reqwait, &reqlock); 1019 pthread_cond_wait (&reqwait, &reqlock);
713 } 1020 }
714 1021
715 pthread_mutex_unlock (&reqlock); 1022 --nready;
1023
1024 UNLOCK (reqlock);
716 1025
717 errno = 0; /* strictly unnecessary */ 1026 errno = 0; /* strictly unnecessary */
718 1027
719 if (!req->cancelled) 1028 if (!(req->flags & FLAG_CANCELLED))
720 switch (req->type) 1029 switch (req->type)
721 { 1030 {
722 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break; 1031 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break;
723 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break; 1032 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break;
724 1033
725 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break; 1034 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break;
726 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length); break; 1035 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length, self); break;
727 1036
728 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break; 1037 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break;
729 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break; 1038 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break;
730 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break; 1039 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break;
731 1040
737 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break; 1046 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break;
738 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break; 1047 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break;
739 1048
740 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break; 1049 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break;
741 case REQ_FSYNC: req->result = fsync (req->fd); break; 1050 case REQ_FSYNC: req->result = fsync (req->fd); break;
742 case REQ_READDIR: req->result = scandir_ (req->dataptr, &req->data2ptr); break; 1051 case REQ_READDIR: scandir_ (req, self); break;
1052
1053 case REQ_BUSY:
1054 {
1055 struct timeval tv;
1056
1057 tv.tv_sec = req->fd;
1058 tv.tv_usec = req->fd2;
1059
1060 req->result = select (0, 0, 0, 0, &tv);
1061 }
1062
1063 case REQ_GROUP:
1064 case REQ_NOP:
1065 break;
743 1066
744 case REQ_QUIT: 1067 case REQ_QUIT:
1068 LOCK (wrklock);
1069 worker_free (self);
1070 --started;
1071 UNLOCK (wrklock);
745 break; 1072 return 0;
746 1073
747 default: 1074 default:
748 req->result = ENOSYS; 1075 req->result = ENOSYS;
749 break; 1076 break;
750 } 1077 }
751 1078
752 req->errorno = errno; 1079 req->errorno = errno;
753 1080
754 pthread_mutex_lock (&reslock); 1081 LOCK (reslock);
755 1082
756 req->next = 0; 1083 ++npending;
757 1084
758 if (rese) 1085 if (!reqq_push (&res_queue, req))
759 {
760 rese->next = req;
761 rese = req;
762 }
763 else
764 {
765 rese = ress = req;
766
767 /* write a dummy byte to the pipe so fh becomes ready */ 1086 /* write a dummy byte to the pipe so fh becomes ready */
768 write (respipe [1], &respipe, 1); 1087 write (respipe [1], &respipe, 1);
769 }
770 1088
771 pthread_mutex_unlock (&reslock); 1089 self->req = 0;
772 } 1090 worker_clear (self);
773 while (type != REQ_QUIT);
774 1091
775 return 0; 1092 UNLOCK (reslock);
1093 }
776} 1094}
777 1095
778/*****************************************************************************/ 1096/*****************************************************************************/
779 1097
780static void atfork_prepare (void) 1098static void atfork_prepare (void)
781{ 1099{
782 pthread_mutex_lock (&reqlock); 1100 LOCK (wrklock);
783 pthread_mutex_lock (&reslock); 1101 LOCK (reqlock);
1102 LOCK (reslock);
784#if !HAVE_PREADWRITE 1103#if !HAVE_PREADWRITE
785 pthread_mutex_lock (&preadwritelock); 1104 LOCK (preadwritelock);
786#endif 1105#endif
787#if !HAVE_READDIR_R 1106#if !HAVE_READDIR_R
788 pthread_mutex_lock (&readdirlock); 1107 LOCK (readdirlock);
789#endif 1108#endif
790} 1109}
791 1110
792static void atfork_parent (void) 1111static void atfork_parent (void)
793{ 1112{
794#if !HAVE_READDIR_R 1113#if !HAVE_READDIR_R
795 pthread_mutex_unlock (&readdirlock); 1114 UNLOCK (readdirlock);
796#endif 1115#endif
797#if !HAVE_PREADWRITE 1116#if !HAVE_PREADWRITE
798 pthread_mutex_unlock (&preadwritelock); 1117 UNLOCK (preadwritelock);
799#endif 1118#endif
800 pthread_mutex_unlock (&reslock); 1119 UNLOCK (reslock);
801 pthread_mutex_unlock (&reqlock); 1120 UNLOCK (reqlock);
1121 UNLOCK (wrklock);
802} 1122}
803 1123
804static void atfork_child (void) 1124static void atfork_child (void)
805{ 1125{
806 aio_req prv; 1126 aio_req prv;
807 1127
1128 while (prv = reqq_shift (&req_queue))
1129 req_free (prv);
1130
1131 while (prv = reqq_shift (&res_queue))
1132 req_free (prv);
1133
1134 while (wrk_first.next != &wrk_first)
1135 {
1136 worker *wrk = wrk_first.next;
1137
1138 if (wrk->req)
1139 req_free (wrk->req);
1140
1141 worker_clear (wrk);
1142 worker_free (wrk);
1143 }
1144
808 started = 0; 1145 started = 0;
809 1146 nreqs = 0;
810 while (reqs)
811 {
812 prv = reqs;
813 reqs = prv->next;
814 req_free (prv);
815 }
816
817 reqs = reqe = 0;
818
819 while (ress)
820 {
821 prv = ress;
822 ress = prv->next;
823 req_free (prv);
824 }
825
826 ress = rese = 0;
827 1147
828 close (respipe [0]); 1148 close (respipe [0]);
829 close (respipe [1]); 1149 close (respipe [1]);
830 create_pipe (); 1150 create_pipe ();
831 1151
832 atfork_parent (); 1152 atfork_parent ();
833} 1153}
834 1154
835#define dREQ \ 1155#define dREQ \
836 aio_req req; \ 1156 aio_req req; \
1157 int req_pri = next_pri; \
1158 next_pri = DEFAULT_PRI + PRI_BIAS; \
837 \ 1159 \
838 if (SvOK (callback) && !SvROK (callback)) \ 1160 if (SvOK (callback) && !SvROK (callback)) \
839 croak ("callback must be undef or of reference type"); \ 1161 croak ("callback must be undef or of reference type"); \
840 \ 1162 \
841 Newz (0, req, 1, aio_cb); \ 1163 Newz (0, req, 1, aio_cb); \
842 if (!req) \ 1164 if (!req) \
843 croak ("out of memory during aio_req allocation"); \ 1165 croak ("out of memory during aio_req allocation"); \
844 \ 1166 \
845 req->callback = newSVsv (callback) 1167 req->callback = newSVsv (callback); \
1168 req->pri = req_pri
846 1169
847#define REQ_SEND \ 1170#define REQ_SEND \
848 req_send (req); \ 1171 req_send (req); \
849 \ 1172 \
850 if (GIMME_V != G_VOID) \ 1173 if (GIMME_V != G_VOID) \
851 XPUSHs (req_sv (req)); 1174 XPUSHs (req_sv (req, AIO_REQ_KLASS));
852 1175
853MODULE = IO::AIO PACKAGE = IO::AIO 1176MODULE = IO::AIO PACKAGE = IO::AIO
854 1177
855PROTOTYPES: ENABLE 1178PROTOTYPES: ENABLE
856 1179
861 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY)); 1184 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY));
862 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY)); 1185 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY));
863 1186
864 create_pipe (); 1187 create_pipe ();
865 pthread_atfork (atfork_prepare, atfork_parent, atfork_child); 1188 pthread_atfork (atfork_prepare, atfork_parent, atfork_child);
866}
867 1189
1190 start_thread ();
1191}
1192
868void 1193void
869min_parallel (nthreads) 1194min_parallel (int nthreads)
870 int nthreads
871 PROTOTYPE: $ 1195 PROTOTYPE: $
872 1196
873void 1197void
874max_parallel (nthreads) 1198max_parallel (int nthreads)
875 int nthreads
876 PROTOTYPE: $ 1199 PROTOTYPE: $
877 1200
878int 1201int
879max_outstanding (nreqs) 1202max_outstanding (int maxreqs)
880 int nreqs 1203 PROTOTYPE: $
881 PROTOTYPE: $
882 CODE: 1204 CODE:
883 RETVAL = max_outstanding; 1205 RETVAL = max_outstanding;
884 max_outstanding = nreqs; 1206 max_outstanding = maxreqs;
1207 OUTPUT:
1208 RETVAL
885 1209
886void 1210void
887aio_open (pathname,flags,mode,callback=&PL_sv_undef) 1211aio_open (pathname,flags,mode,callback=&PL_sv_undef)
888 SV * pathname 1212 SV * pathname
889 int flags 1213 int flags
1103 req->dataptr = SvPVbyte_nolen (req->data); 1427 req->dataptr = SvPVbyte_nolen (req->data);
1104 1428
1105 REQ_SEND; 1429 REQ_SEND;
1106} 1430}
1107 1431
1108#if 0
1109
1110# undocumented, because it does not cancel active requests
1111void 1432void
1112cancel_most_requests () 1433aio_busy (delay,callback=&PL_sv_undef)
1434 double delay
1435 SV * callback
1436 PPCODE:
1437{
1438 dREQ;
1439
1440 req->type = REQ_BUSY;
1441 req->fd = delay < 0. ? 0 : delay;
1442 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd);
1443
1444 REQ_SEND;
1445}
1446
1447void
1448aio_group (callback=&PL_sv_undef)
1449 SV * callback
1113 PROTOTYPE: 1450 PROTOTYPE: ;$
1114 CODE: 1451 PPCODE:
1115{ 1452{
1116 aio_req *req; 1453 dREQ;
1117 1454
1118 pthread_mutex_lock (&reqlock); 1455 req->type = REQ_GROUP;
1119 for (req = reqs; req; req = req->next) 1456 req_send (req);
1120 req->flags |= 1;
1121 pthread_mutex_unlock (&reqlock);
1122 1457
1123 pthread_mutex_lock (&reslock); 1458 XPUSHs (req_sv (req, AIO_GRP_KLASS));
1124 for (req = ress; req; req = req->next)
1125 req->flags |= 1;
1126 pthread_mutex_unlock (&reslock);
1127} 1459}
1128 1460
1129#endif 1461void
1462aio_nop (callback=&PL_sv_undef)
1463 SV * callback
1464 PPCODE:
1465{
1466 dREQ;
1467
1468 req->type = REQ_NOP;
1469
1470 REQ_SEND;
1471}
1472
1473int
1474aioreq_pri (int pri = 0)
1475 PROTOTYPE: ;$
1476 CODE:
1477 RETVAL = next_pri - PRI_BIAS;
1478 if (items > 0)
1479 {
1480 if (pri < PRI_MIN) pri = PRI_MIN;
1481 if (pri > PRI_MAX) pri = PRI_MAX;
1482 next_pri = pri + PRI_BIAS;
1483 }
1484 OUTPUT:
1485 RETVAL
1486
1487void
1488aioreq_nice (int nice = 0)
1489 CODE:
1490 nice = next_pri - nice;
1491 if (nice < PRI_MIN) nice = PRI_MIN;
1492 if (nice > PRI_MAX) nice = PRI_MAX;
1493 next_pri = nice + PRI_BIAS;
1130 1494
1131void 1495void
1132flush () 1496flush ()
1133 PROTOTYPE: 1497 PROTOTYPE:
1134 CODE: 1498 CODE:
1135 while (nreqs) 1499 while (nreqs)
1136 { 1500 {
1137 poll_wait (); 1501 poll_wait ();
1138 poll_cb (); 1502 poll_cb (0);
1139 } 1503 }
1140 1504
1141void 1505void
1142poll() 1506poll()
1143 PROTOTYPE: 1507 PROTOTYPE:
1144 CODE: 1508 CODE:
1145 if (nreqs) 1509 if (nreqs)
1146 { 1510 {
1147 poll_wait (); 1511 poll_wait ();
1148 poll_cb (); 1512 poll_cb (0);
1149 } 1513 }
1150 1514
1151int 1515int
1152poll_fileno() 1516poll_fileno()
1153 PROTOTYPE: 1517 PROTOTYPE:
1158 1522
1159int 1523int
1160poll_cb(...) 1524poll_cb(...)
1161 PROTOTYPE: 1525 PROTOTYPE:
1162 CODE: 1526 CODE:
1163 RETVAL = poll_cb (); 1527 RETVAL = poll_cb (0);
1528 OUTPUT:
1529 RETVAL
1530
1531int
1532poll_some(int max = 0)
1533 PROTOTYPE: $
1534 CODE:
1535 RETVAL = poll_cb (max);
1164 OUTPUT: 1536 OUTPUT:
1165 RETVAL 1537 RETVAL
1166 1538
1167void 1539void
1168poll_wait() 1540poll_wait()
1177 CODE: 1549 CODE:
1178 RETVAL = nreqs; 1550 RETVAL = nreqs;
1179 OUTPUT: 1551 OUTPUT:
1180 RETVAL 1552 RETVAL
1181 1553
1182MODULE = IO::AIO PACKAGE = IO::AIO::CB 1554int
1183 1555nready()
1184void
1185cancel (aio_req_ornot req)
1186 PROTOTYPE: 1556 PROTOTYPE:
1187 CODE: 1557 CODE:
1188 req->cancelled = 1; 1558 RETVAL = get_nready ();
1559 OUTPUT:
1560 RETVAL
1189 1561
1562int
1563npending()
1564 PROTOTYPE:
1565 CODE:
1566 RETVAL = get_npending ();
1567 OUTPUT:
1568 RETVAL
1569
1570PROTOTYPES: DISABLE
1571
1572MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1573
1574void
1575cancel (aio_req_ornot req)
1576 CODE:
1577 req_cancel (req);
1578
1579void
1580cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1581 CODE:
1582 SvREFCNT_dec (req->callback);
1583 req->callback = newSVsv (callback);
1584
1585MODULE = IO::AIO PACKAGE = IO::AIO::GRP
1586
1587void
1588add (aio_req grp, ...)
1589 PPCODE:
1590{
1591 int i;
1592 aio_req req;
1593
1594 if (grp->fd == 2)
1595 croak ("cannot add requests to IO::AIO::GRP after the group finished");
1596
1597 for (i = 1; i < items; ++i )
1598 {
1599 if (GIMME_V != G_VOID)
1600 XPUSHs (sv_2mortal (newSVsv (ST (i))));
1601
1602 req = SvAIO_REQ (ST (i));
1603
1604 if (req)
1605 {
1606 ++grp->length;
1607 req->grp = grp;
1608
1609 req->grp_prev = 0;
1610 req->grp_next = grp->grp_first;
1611
1612 if (grp->grp_first)
1613 grp->grp_first->grp_prev = req;
1614
1615 grp->grp_first = req;
1616 }
1617 }
1618}
1619
1620void
1621cancel_subs (aio_req_ornot req)
1622 CODE:
1623 req_cancel_subs (req);
1624
1625void
1626result (aio_req grp, ...)
1627 CODE:
1628{
1629 int i;
1630 AV *av;
1631
1632 grp->errorno = errno;
1633
1634 av = newAV ();
1635
1636 for (i = 1; i < items; ++i )
1637 av_push (av, newSVsv (ST (i)));
1638
1639 SvREFCNT_dec (grp->data);
1640 grp->data = (SV *)av;
1641}
1642
1643void
1644errno (aio_req grp, int errorno = errno)
1645 CODE:
1646 grp->errorno = errorno;
1647
1648void
1649limit (aio_req grp, int limit)
1650 CODE:
1651 grp->fd2 = limit;
1652 aio_grp_feed (grp);
1653
1654void
1655feed (aio_req grp, SV *callback=&PL_sv_undef)
1656 CODE:
1657{
1658 SvREFCNT_dec (grp->fh2);
1659 grp->fh2 = newSVsv (callback);
1660
1661 if (grp->fd2 <= 0)
1662 grp->fd2 = 2;
1663
1664 aio_grp_feed (grp);
1665}
1666

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines