ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
(Generate patch)

Comparing IO-AIO/AIO.xs (file contents):
Revision 1.63 by root, Mon Oct 23 23:48:31 2006 UTC vs.
Revision 1.82 by root, Fri Oct 27 20:11:58 2006 UTC

1#if __linux 1/* solaris */
2#define _POSIX_PTHREAD_SEMANTICS 1
3
4#if __linux && !defined(_GNU_SOURCE)
2# define _GNU_SOURCE 5# define _GNU_SOURCE
3#endif 6#endif
4 7
8/* just in case */
5#define _REENTRANT 1 9#define _REENTRANT 1
6 10
7#include <errno.h> 11#include <errno.h>
8 12
9#include "EXTERN.h" 13#include "EXTERN.h"
44/* used for struct dirent, AIX doesn't provide it */ 48/* used for struct dirent, AIX doesn't provide it */
45#ifndef NAME_MAX 49#ifndef NAME_MAX
46# define NAME_MAX 4096 50# define NAME_MAX 4096
47#endif 51#endif
48 52
53#ifndef PTHREAD_STACK_MIN
54/* care for broken platforms, e.g. windows */
55# define PTHREAD_STACK_MIN 16384
56#endif
57
49#if __ia64 58#if __ia64
50# define STACKSIZE 65536 59# define STACKSIZE 65536
60#elif __i386 || __x86_64 /* 16k is unreasonably high :( */
61# define STACKSIZE PTHREAD_STACK_MIN
51#else 62#else
52# define STACKSIZE 8192 63# define STACKSIZE 16384
64#endif
65
66/* wether word reads are potentially non-atomic.
67 * this is conservatice, likely most arches this runs
68 * on have atomic word read/writes.
69 */
70#ifndef WORDREAD_UNSAFE
71# if __i386 || __x86_64
72# define WORDREAD_UNSAFE 0
73# else
74# define WORDREAD_UNSAFE 1
53#endif 75# endif
76#endif
77
78/* buffer size for various temporary buffers */
79#define AIO_BUFSIZE 65536
80
81#define dBUF \
82 char *aio_buf; \
83 LOCK (wrklock); \
84 self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \
85 UNLOCK (wrklock); \
86 if (!aio_buf) \
87 return -1;
54 88
55enum { 89enum {
56 REQ_QUIT, 90 REQ_QUIT,
57 REQ_OPEN, REQ_CLOSE, 91 REQ_OPEN, REQ_CLOSE,
58 REQ_READ, REQ_WRITE, REQ_READAHEAD, 92 REQ_READ, REQ_WRITE, REQ_READAHEAD,
59 REQ_SENDFILE, 93 REQ_SENDFILE,
60 REQ_STAT, REQ_LSTAT, REQ_FSTAT, 94 REQ_STAT, REQ_LSTAT, REQ_FSTAT,
61 REQ_FSYNC, REQ_FDATASYNC, 95 REQ_FSYNC, REQ_FDATASYNC,
62 REQ_UNLINK, REQ_RMDIR, REQ_RENAME, 96 REQ_UNLINK, REQ_RMDIR, REQ_RENAME,
63 REQ_READDIR, 97 REQ_MKNOD, REQ_READDIR,
64 REQ_LINK, REQ_SYMLINK, 98 REQ_LINK, REQ_SYMLINK,
65 REQ_GROUP, REQ_NOP, 99 REQ_GROUP, REQ_NOP,
66 REQ_SLEEP, 100 REQ_BUSY,
67}; 101};
68 102
69#define AIO_REQ_KLASS "IO::AIO::REQ" 103#define AIO_REQ_KLASS "IO::AIO::REQ"
70#define AIO_GRP_KLASS "IO::AIO::GRP" 104#define AIO_GRP_KLASS "IO::AIO::GRP"
71 105
105 PRI_MIN = -4, 139 PRI_MIN = -4,
106 PRI_MAX = 4, 140 PRI_MAX = 4,
107 141
108 DEFAULT_PRI = 0, 142 DEFAULT_PRI = 0,
109 PRI_BIAS = -PRI_MIN, 143 PRI_BIAS = -PRI_MIN,
144 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
110}; 145};
111 146
112static int next_pri = DEFAULT_PRI + PRI_BIAS; 147static int next_pri = DEFAULT_PRI + PRI_BIAS;
113 148
114static int started, wanted; 149static unsigned int started, wanted;
115static volatile int nreqs;
116static int max_outstanding = 1<<30;
117static int respipe [2];
118 150
119#if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP) 151#if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)
120# define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP 152# define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
121#else 153#else
122# define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER 154# define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
123#endif 155#endif
124 156
157#define LOCK(mutex) pthread_mutex_lock (&(mutex))
158#define UNLOCK(mutex) pthread_mutex_unlock (&(mutex))
159
160/* worker threads management */
161static pthread_mutex_t wrklock = AIO_MUTEX_INIT;
162
163typedef struct worker {
164 /* locked by wrklock */
165 struct worker *prev, *next;
166
167 pthread_t tid;
168
169 /* locked by reslock, reqlock or wrklock */
170 aio_req req; /* currently processed request */
171 void *dbuf;
172 DIR *dirp;
173} worker;
174
175static worker wrk_first = { &wrk_first, &wrk_first, 0 };
176
177static void worker_clear (worker *wrk)
178{
179 if (wrk->dirp)
180 {
181 closedir (wrk->dirp);
182 wrk->dirp = 0;
183 }
184
185 if (wrk->dbuf)
186 {
187 free (wrk->dbuf);
188 wrk->dbuf = 0;
189 }
190}
191
192static void worker_free (worker *wrk)
193{
194 wrk->next->prev = wrk->prev;
195 wrk->prev->next = wrk->next;
196
197 free (wrk);
198}
199
200static volatile unsigned int nreqs, nready, npending;
201static volatile unsigned int max_outstanding = 0xffffffff;
202static int respipe [2];
203
125static pthread_mutex_t reslock = AIO_MUTEX_INIT; 204static pthread_mutex_t reslock = AIO_MUTEX_INIT;
126static pthread_mutex_t reqlock = AIO_MUTEX_INIT; 205static pthread_mutex_t reqlock = AIO_MUTEX_INIT;
127static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER; 206static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER;
128 207
129static volatile aio_req reqs, reqe; /* queue start, queue end */ 208#if WORDREAD_UNSAFE
130static volatile aio_req ress, rese; /* queue start, queue end */
131 209
210static unsigned int get_nready ()
211{
212 unsigned int retval;
213
214 LOCK (reqlock);
215 retval = nready;
216 UNLOCK (reqlock);
217
218 return retval;
219}
220
221static unsigned int get_npending ()
222{
223 unsigned int retval;
224
225 LOCK (reslock);
226 retval = npending;
227 UNLOCK (reslock);
228
229 return retval;
230}
231
232#else
233
234# define get_nready() nready
235# define get_npending() npending
236
237#endif
238
239/*
240 * a somewhat faster data structure might be nice, but
241 * with 8 priorities this actually needs <20 insns
242 * per shift, the most expensive operation.
243 */
244typedef struct {
245 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
246 int size;
247} reqq;
248
249static reqq req_queue;
250static reqq res_queue;
251
252int reqq_push (reqq *q, aio_req req)
253{
254 int pri = req->pri;
255 req->next = 0;
256
257 if (q->qe[pri])
258 {
259 q->qe[pri]->next = req;
260 q->qe[pri] = req;
261 }
262 else
263 q->qe[pri] = q->qs[pri] = req;
264
265 return q->size++;
266}
267
268aio_req reqq_shift (reqq *q)
269{
270 int pri;
271
272 if (!q->size)
273 return 0;
274
275 --q->size;
276
277 for (pri = NUM_PRI; pri--; )
278 {
279 aio_req req = q->qs[pri];
280
281 if (req)
282 {
283 if (!(q->qs[pri] = req->next))
284 q->qe[pri] = 0;
285
286 return req;
287 }
288 }
289
290 abort ();
291}
292
293static int poll_cb (int max);
132static void req_invoke (aio_req req); 294static void req_invoke (aio_req req);
133static void req_free (aio_req req); 295static void req_free (aio_req req);
296static void req_cancel (aio_req req);
134 297
135/* must be called at most once */ 298/* must be called at most once */
136static SV *req_sv (aio_req req, const char *klass) 299static SV *req_sv (aio_req req, const char *klass)
137{ 300{
138 if (!req->self) 301 if (!req->self)
169 ENTER; 332 ENTER;
170 SAVETMPS; 333 SAVETMPS;
171 PUSHMARK (SP); 334 PUSHMARK (SP);
172 XPUSHs (req_sv (grp, AIO_GRP_KLASS)); 335 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
173 PUTBACK; 336 PUTBACK;
174 call_sv (grp->fh2, G_VOID | G_EVAL); 337 call_sv (grp->fh2, G_VOID | G_EVAL | G_KEEPERR);
175 SPAGAIN; 338 SPAGAIN;
176 FREETMPS; 339 FREETMPS;
177 LEAVE; 340 LEAVE;
178 } 341 }
179 342
200 req_invoke (grp); 363 req_invoke (grp);
201 req_free (grp); 364 req_free (grp);
202 } 365 }
203} 366}
204 367
205static void poll_wait ()
206{
207 fd_set rfd;
208
209 while (nreqs)
210 {
211 aio_req req;
212 pthread_mutex_lock (&reslock);
213 req = ress;
214 pthread_mutex_unlock (&reslock);
215
216 if (req)
217 return;
218
219 FD_ZERO(&rfd);
220 FD_SET(respipe [0], &rfd);
221
222 select (respipe [0] + 1, &rfd, 0, 0, 0);
223 }
224}
225
226static void req_invoke (aio_req req) 368static void req_invoke (aio_req req)
227{ 369{
228 dSP; 370 dSP;
229 int errorno = errno;
230 371
231 if (req->flags & FLAG_CANCELLED || !SvOK (req->callback)) 372 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
232 return; 373 {
233
234 errno = req->errorno;
235
236 ENTER; 374 ENTER;
237 SAVETMPS; 375 SAVETMPS;
238 PUSHMARK (SP); 376 PUSHMARK (SP);
239 EXTEND (SP, 1); 377 EXTEND (SP, 1);
240 378
241 switch (req->type) 379 switch (req->type)
242 {
243 case REQ_READDIR:
244 { 380 {
245 SV *rv = &PL_sv_undef; 381 case REQ_READDIR:
246
247 if (req->result >= 0)
248 { 382 {
249 char *buf = req->data2ptr; 383 SV *rv = &PL_sv_undef;
250 AV *av = newAV ();
251 384
252 while (req->result) 385 if (req->result >= 0)
253 { 386 {
387 int i;
388 char *buf = req->data2ptr;
389 AV *av = newAV ();
390
391 av_extend (av, req->result - 1);
392
393 for (i = 0; i < req->result; ++i)
394 {
254 SV *sv = newSVpv (buf, 0); 395 SV *sv = newSVpv (buf, 0);
255 396
256 av_push (av, sv); 397 av_store (av, i, sv);
257 buf += SvCUR (sv) + 1; 398 buf += SvCUR (sv) + 1;
258 req->result--; 399 }
400
401 rv = sv_2mortal (newRV_noinc ((SV *)av));
259 } 402 }
260 403
261 rv = sv_2mortal (newRV_noinc ((SV *)av)); 404 PUSHs (rv);
262 } 405 }
406 break;
263 407
264 PUSHs (rv); 408 case REQ_OPEN:
409 {
410 /* convert fd to fh */
411 SV *fh;
412
413 PUSHs (sv_2mortal (newSViv (req->result)));
414 PUTBACK;
415 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
416 SPAGAIN;
417
418 fh = SvREFCNT_inc (POPs);
419
420 PUSHMARK (SP);
421 XPUSHs (sv_2mortal (fh));
422 }
423 break;
424
425 case REQ_GROUP:
426 req->fd = 2; /* mark group as finished */
427
428 if (req->data)
429 {
430 int i;
431 AV *av = (AV *)req->data;
432
433 EXTEND (SP, AvFILL (av) + 1);
434 for (i = 0; i <= AvFILL (av); ++i)
435 PUSHs (*av_fetch (av, i, 0));
436 }
437 break;
438
439 case REQ_NOP:
440 case REQ_BUSY:
441 break;
442
443 default:
444 PUSHs (sv_2mortal (newSViv (req->result)));
445 break;
265 } 446 }
266 break;
267 447
268 case REQ_OPEN: 448 errno = req->errorno;
269 {
270 /* convert fd to fh */
271 SV *fh;
272 449
273 PUSHs (sv_2mortal (newSViv (req->result)));
274 PUTBACK; 450 PUTBACK;
275 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
276 SPAGAIN;
277
278 fh = SvREFCNT_inc (POPs);
279
280 PUSHMARK (SP);
281 XPUSHs (sv_2mortal (fh));
282 }
283 break;
284
285 case REQ_GROUP:
286 req->fd = 2; /* mark group as finished */
287
288 if (req->data)
289 {
290 int i;
291 AV *av = (AV *)req->data;
292
293 EXTEND (SP, AvFILL (av) + 1);
294 for (i = 0; i <= AvFILL (av); ++i)
295 PUSHs (*av_fetch (av, i, 0));
296 }
297 break;
298
299 case REQ_NOP:
300 case REQ_SLEEP:
301 break;
302
303 default:
304 PUSHs (sv_2mortal (newSViv (req->result)));
305 break;
306 }
307
308
309 PUTBACK;
310 call_sv (req->callback, G_VOID | G_EVAL); 451 call_sv (req->callback, G_VOID | G_EVAL);
311 SPAGAIN; 452 SPAGAIN;
312 453
313 FREETMPS; 454 FREETMPS;
314 LEAVE; 455 LEAVE;
315
316 errno = errorno;
317
318 if (SvTRUE (ERRSV))
319 { 456 }
320 req_free (req);
321 croak (0);
322 }
323}
324 457
325static void req_free (aio_req req)
326{
327 if (req->grp) 458 if (req->grp)
328 { 459 {
329 aio_req grp = req->grp; 460 aio_req grp = req->grp;
330 461
331 /* unlink request */ 462 /* unlink request */
336 grp->grp_first = req->grp_next; 467 grp->grp_first = req->grp_next;
337 468
338 aio_grp_dec (grp); 469 aio_grp_dec (grp);
339 } 470 }
340 471
472 if (SvTRUE (ERRSV))
473 {
474 req_free (req);
475 croak (0);
476 }
477}
478
479static void req_free (aio_req req)
480{
341 if (req->self) 481 if (req->self)
342 { 482 {
343 sv_unmagic (req->self, PERL_MAGIC_ext); 483 sv_unmagic (req->self, PERL_MAGIC_ext);
344 SvREFCNT_dec (req->self); 484 SvREFCNT_dec (req->self);
345 } 485 }
348 SvREFCNT_dec (req->fh); 488 SvREFCNT_dec (req->fh);
349 SvREFCNT_dec (req->fh2); 489 SvREFCNT_dec (req->fh2);
350 SvREFCNT_dec (req->callback); 490 SvREFCNT_dec (req->callback);
351 Safefree (req->statdata); 491 Safefree (req->statdata);
352 492
353 if (req->type == REQ_READDIR && req->result >= 0) 493 if (req->type == REQ_READDIR)
354 free (req->data2ptr); 494 free (req->data2ptr);
355 495
356 Safefree (req); 496 Safefree (req);
357} 497}
358 498
499static void req_cancel_subs (aio_req grp)
500{
501 aio_req sub;
502
503 if (grp->type != REQ_GROUP)
504 return;
505
506 SvREFCNT_dec (grp->fh2);
507 grp->fh2 = 0;
508
509 for (sub = grp->grp_first; sub; sub = sub->grp_next)
510 req_cancel (sub);
511}
512
359static void req_cancel (aio_req req) 513static void req_cancel (aio_req req)
360{ 514{
361 req->flags |= FLAG_CANCELLED; 515 req->flags |= FLAG_CANCELLED;
362 516
363 if (req->type == REQ_GROUP) 517 req_cancel_subs (req);
364 { 518}
365 aio_req sub;
366 519
367 for (sub = req->grp_first; sub; sub = sub->grp_next) 520static void *aio_proc(void *arg);
368 req_cancel (sub); 521
522static void start_thread (void)
523{
524 sigset_t fullsigset, oldsigset;
525 pthread_attr_t attr;
526
527 worker *wrk = calloc (1, sizeof (worker));
528
529 if (!wrk)
530 croak ("unable to allocate worker thread data");
531
532 pthread_attr_init (&attr);
533 pthread_attr_setstacksize (&attr, STACKSIZE);
534 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
535#ifdef PTHREAD_SCOPE_PROCESS
536 pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS);
537#endif
538
539 sigfillset (&fullsigset);
540
541 LOCK (wrklock);
542 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
543
544 if (pthread_create (&wrk->tid, &attr, aio_proc, (void *)wrk) == 0)
369 } 545 {
370} 546 wrk->prev = &wrk_first;
547 wrk->next = wrk_first.next;
548 wrk_first.next->prev = wrk;
549 wrk_first.next = wrk;
550 ++started;
551 }
552 else
553 free (wrk);
371 554
555 sigprocmask (SIG_SETMASK, &oldsigset, 0);
556 UNLOCK (wrklock);
557}
558
559static void maybe_start_thread ()
560{
561#if 0
562 static struct timeval last;
563 struct timeval diff, now;
564#endif
565
566 if (started >= wanted)
567 return;
568
569 if (nready <= nreqs - get_nready () - get_npending ())
570 return;
571
572#if 0
573 gettimeofday (&now, 0);
574
575 diff.tv_sec = now.tv_sec - last.tv_sec;
576 diff.tv_usec = now.tv_usec - last.tv_usec;
577
578 if (diff.tv_usec < 0)
579 {
580 --diff.tv_sec;
581 diff.tv_usec += 1000000;
582 }
583
584 if (!diff.tv_sec && diff.tv_usec < 10000)
585 return;
586
587 last = now;
588#endif
589
590 start_thread ();
591}
592
593static void req_send (aio_req req)
594{
595 ++nreqs;
596
597 LOCK (reqlock);
598 ++nready;
599 reqq_push (&req_queue, req);
600 pthread_cond_signal (&reqwait);
601 UNLOCK (reqlock);
602
603 maybe_start_thread ();
604}
605
606static void end_thread (void)
607{
608 aio_req req;
609
610 Newz (0, req, 1, aio_cb);
611
612 req->type = REQ_QUIT;
613 req->pri = PRI_MAX + PRI_BIAS;
614
615 req_send (req);
616
617 LOCK (wrklock);
618 --started;
619 UNLOCK (wrklock);
620}
621
622static void min_parallel (int nthreads)
623{
624 if (wanted < nthreads)
625 wanted = nthreads;
626}
627
628static void max_parallel (int nthreads)
629{
630 if (wanted > nthreads)
631 wanted = nthreads;
632
633 while (started > wanted)
634 end_thread ();
635}
636
637static void poll_wait ()
638{
639 fd_set rfd;
640
641 while (nreqs)
642 {
643 int size;
644 if (WORDREAD_UNSAFE) LOCK (reslock);
645 size = res_queue.size;
646 if (WORDREAD_UNSAFE) UNLOCK (reslock);
647
648 if (size)
649 return;
650
651 maybe_start_thread ();
652
653 FD_ZERO(&rfd);
654 FD_SET(respipe [0], &rfd);
655
656 select (respipe [0] + 1, &rfd, 0, 0, 0);
657 }
658}
659
372static int poll_cb () 660static int poll_cb (int max)
373{ 661{
374 dSP; 662 dSP;
375 int count = 0; 663 int count = 0;
376 int do_croak = 0; 664 int do_croak = 0;
377 aio_req req; 665 aio_req req;
378 666
379 for (;;) 667 for (;;)
380 { 668 {
381 pthread_mutex_lock (&reslock); 669 while (max <= 0 || count < max)
382 req = ress;
383
384 if (req)
385 { 670 {
386 ress = req->next; 671 maybe_start_thread ();
387 672
673 LOCK (reslock);
674 req = reqq_shift (&res_queue);
675
388 if (!ress) 676 if (req)
389 { 677 {
678 --npending;
679
680 if (!res_queue.size)
681 {
390 /* read any signals sent by the worker threads */ 682 /* read any signals sent by the worker threads */
391 char buf [32]; 683 char buf [32];
392 while (read (respipe [0], buf, 32) == 32) 684 while (read (respipe [0], buf, 32) == 32)
685 ;
393 ; 686 }
394
395 rese = 0;
396 } 687 }
688
689 UNLOCK (reslock);
690
691 if (!req)
692 break;
693
694 --nreqs;
695
696 if (req->type == REQ_GROUP && req->length)
697 {
698 req->fd = 1; /* mark request as delayed */
699 continue;
700 }
701 else
702 {
703 if (req->type == REQ_READ)
704 SvCUR_set (req->data, req->dataoffset + (req->result > 0 ? req->result : 0));
705
706 if (req->data2ptr && (req->type == REQ_READ || req->type == REQ_WRITE))
707 SvREADONLY_off (req->data);
708
709 if (req->statdata)
710 {
711 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
712 PL_laststatval = req->result;
713 PL_statcache = *(req->statdata);
714 }
715
716 req_invoke (req);
717
718 count++;
719 }
720
721 req_free (req);
397 } 722 }
398 723
399 pthread_mutex_unlock (&reslock); 724 if (nreqs <= max_outstanding)
400
401 if (!req)
402 break; 725 break;
403 726
404 --nreqs; 727 poll_wait ();
405 728
406 if (req->type == REQ_QUIT) 729 max = 0;
407 started--;
408 else if (req->type == REQ_GROUP && req->length)
409 {
410 req->fd = 1; /* mark request as delayed */
411 continue;
412 }
413 else
414 {
415 if (req->type == REQ_READ)
416 SvCUR_set (req->data, req->dataoffset + (req->result > 0 ? req->result : 0));
417
418 if (req->data2ptr && (req->type == REQ_READ || req->type == REQ_WRITE))
419 SvREADONLY_off (req->data);
420
421 if (req->statdata)
422 {
423 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
424 PL_laststatval = req->result;
425 PL_statcache = *(req->statdata);
426 }
427
428 req_invoke (req);
429
430 count++;
431 }
432
433 req_free (req);
434 } 730 }
435 731
436 return count; 732 return count;
437}
438
439static void *aio_proc(void *arg);
440
441static void start_thread (void)
442{
443 sigset_t fullsigset, oldsigset;
444 pthread_t tid;
445 pthread_attr_t attr;
446
447 pthread_attr_init (&attr);
448 pthread_attr_setstacksize (&attr, STACKSIZE);
449 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
450
451 sigfillset (&fullsigset);
452 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
453
454 if (pthread_create (&tid, &attr, aio_proc, 0) == 0)
455 started++;
456
457 sigprocmask (SIG_SETMASK, &oldsigset, 0);
458}
459
460static void req_send (aio_req req)
461{
462 while (started < wanted && nreqs >= started)
463 start_thread ();
464
465 ++nreqs;
466
467 pthread_mutex_lock (&reqlock);
468
469 req->next = 0;
470
471 if (reqe)
472 {
473 reqe->next = req;
474 reqe = req;
475 }
476 else
477 reqe = reqs = req;
478
479 pthread_cond_signal (&reqwait);
480 pthread_mutex_unlock (&reqlock);
481
482 if (nreqs > max_outstanding)
483 for (;;)
484 {
485 poll_cb ();
486
487 if (nreqs <= max_outstanding)
488 break;
489
490 poll_wait ();
491 }
492}
493
494static void end_thread (void)
495{
496 aio_req req;
497 Newz (0, req, 1, aio_cb);
498 req->type = REQ_QUIT;
499
500 req_send (req);
501}
502
503static void min_parallel (int nthreads)
504{
505 if (wanted < nthreads)
506 wanted = nthreads;
507}
508
509static void max_parallel (int nthreads)
510{
511 int cur = started;
512
513 if (wanted > nthreads)
514 wanted = nthreads;
515
516 while (cur > wanted)
517 {
518 end_thread ();
519 cur--;
520 }
521
522 while (started > wanted)
523 {
524 poll_wait ();
525 poll_cb ();
526 }
527} 733}
528 734
529static void create_pipe () 735static void create_pipe ()
530{ 736{
531 if (pipe (respipe)) 737 if (pipe (respipe))
555static ssize_t pread (int fd, void *buf, size_t count, off_t offset) 761static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
556{ 762{
557 ssize_t res; 763 ssize_t res;
558 off_t ooffset; 764 off_t ooffset;
559 765
560 pthread_mutex_lock (&preadwritelock); 766 LOCK (preadwritelock);
561 ooffset = lseek (fd, 0, SEEK_CUR); 767 ooffset = lseek (fd, 0, SEEK_CUR);
562 lseek (fd, offset, SEEK_SET); 768 lseek (fd, offset, SEEK_SET);
563 res = read (fd, buf, count); 769 res = read (fd, buf, count);
564 lseek (fd, ooffset, SEEK_SET); 770 lseek (fd, ooffset, SEEK_SET);
565 pthread_mutex_unlock (&preadwritelock); 771 UNLOCK (preadwritelock);
566 772
567 return res; 773 return res;
568} 774}
569 775
570static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset) 776static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
571{ 777{
572 ssize_t res; 778 ssize_t res;
573 off_t ooffset; 779 off_t ooffset;
574 780
575 pthread_mutex_lock (&preadwritelock); 781 LOCK (preadwritelock);
576 ooffset = lseek (fd, 0, SEEK_CUR); 782 ooffset = lseek (fd, 0, SEEK_CUR);
577 lseek (fd, offset, SEEK_SET); 783 lseek (fd, offset, SEEK_SET);
578 res = write (fd, buf, count); 784 res = write (fd, buf, count);
579 lseek (fd, offset, SEEK_SET); 785 lseek (fd, offset, SEEK_SET);
580 pthread_mutex_unlock (&preadwritelock); 786 UNLOCK (preadwritelock);
581 787
582 return res; 788 return res;
583} 789}
584#endif 790#endif
585 791
586#if !HAVE_FDATASYNC 792#if !HAVE_FDATASYNC
587# define fdatasync fsync 793# define fdatasync fsync
588#endif 794#endif
589 795
590#if !HAVE_READAHEAD 796#if !HAVE_READAHEAD
591# define readahead aio_readahead 797# define readahead(fd,offset,count) aio_readahead (fd, offset, count, self)
592 798
593static ssize_t readahead (int fd, off_t offset, size_t count) 799static ssize_t aio_readahead (int fd, off_t offset, size_t count, worker *self)
594{ 800{
595 char readahead_buf[4096]; 801 dBUF;
596 802
597 while (count > 0) 803 while (count > 0)
598 { 804 {
599 size_t len = count < sizeof (readahead_buf) ? count : sizeof (readahead_buf); 805 size_t len = count < AIO_BUFSIZE ? count : AIO_BUFSIZE;
600 806
601 pread (fd, readahead_buf, len, offset); 807 pread (fd, aio_buf, len, offset);
602 offset += len; 808 offset += len;
603 count -= len; 809 count -= len;
604 } 810 }
605 811
606 errno = 0; 812 errno = 0;
607} 813}
814
608#endif 815#endif
609 816
610#if !HAVE_READDIR_R 817#if !HAVE_READDIR_R
611# define readdir_r aio_readdir_r 818# define readdir_r aio_readdir_r
612 819
615static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res) 822static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res)
616{ 823{
617 struct dirent *e; 824 struct dirent *e;
618 int errorno; 825 int errorno;
619 826
620 pthread_mutex_lock (&readdirlock); 827 LOCK (readdirlock);
621 828
622 e = readdir (dirp); 829 e = readdir (dirp);
623 errorno = errno; 830 errorno = errno;
624 831
625 if (e) 832 if (e)
628 strcpy (ent->d_name, e->d_name); 835 strcpy (ent->d_name, e->d_name);
629 } 836 }
630 else 837 else
631 *res = 0; 838 *res = 0;
632 839
633 pthread_mutex_unlock (&readdirlock); 840 UNLOCK (readdirlock);
634 841
635 errno = errorno; 842 errno = errorno;
636 return e ? 0 : -1; 843 return e ? 0 : -1;
637} 844}
638#endif 845#endif
639 846
640/* sendfile always needs emulation */ 847/* sendfile always needs emulation */
641static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count) 848static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self)
642{ 849{
643 ssize_t res; 850 ssize_t res;
644 851
645 if (!count) 852 if (!count)
646 return 0; 853 return 0;
657 { 864 {
658 off_t sbytes; 865 off_t sbytes;
659 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0); 866 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0);
660 867
661 if (res < 0 && sbytes) 868 if (res < 0 && sbytes)
662 /* maybe only on EAGAIN only: as usual, the manpage leaves you guessing */ 869 /* maybe only on EAGAIN: as usual, the manpage leaves you guessing */
663 res = sbytes; 870 res = sbytes;
664 } 871 }
665 872
666# elif __hpux 873# elif __hpux
667 res = sendfile (ofd, ifd, offset, count, 0, 0); 874 res = sendfile (ofd, ifd, offset, count, 0, 0);
695#endif 902#endif
696 ) 903 )
697 ) 904 )
698 { 905 {
699 /* emulate sendfile. this is a major pain in the ass */ 906 /* emulate sendfile. this is a major pain in the ass */
700 char buf[4096]; 907 dBUF;
908
701 res = 0; 909 res = 0;
702 910
703 while (count) 911 while (count)
704 { 912 {
705 ssize_t cnt; 913 ssize_t cnt;
706 914
707 cnt = pread (ifd, buf, count > 4096 ? 4096 : count, offset); 915 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
708 916
709 if (cnt <= 0) 917 if (cnt <= 0)
710 { 918 {
711 if (cnt && !res) res = -1; 919 if (cnt && !res) res = -1;
712 break; 920 break;
713 } 921 }
714 922
715 cnt = write (ofd, buf, cnt); 923 cnt = write (ofd, aio_buf, cnt);
716 924
717 if (cnt <= 0) 925 if (cnt <= 0)
718 { 926 {
719 if (cnt && !res) res = -1; 927 if (cnt && !res) res = -1;
720 break; 928 break;
728 936
729 return res; 937 return res;
730} 938}
731 939
732/* read a full directory */ 940/* read a full directory */
733static int scandir_ (const char *path, void **namesp) 941static void scandir_ (aio_req req, worker *self)
734{ 942{
735 DIR *dirp = opendir (path); 943 DIR *dirp;
736 union 944 union
737 { 945 {
738 struct dirent d; 946 struct dirent d;
739 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1]; 947 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1];
740 } u; 948 } *u;
741 struct dirent *entp; 949 struct dirent *entp;
742 char *name, *names; 950 char *name, *names;
743 int memlen = 4096; 951 int memlen = 4096;
744 int memofs = 0; 952 int memofs = 0;
745 int res = 0; 953 int res = 0;
746 int errorno; 954 int errorno;
747 955
748 if (!dirp) 956 LOCK (wrklock);
749 return -1; 957 self->dirp = dirp = opendir (req->dataptr);
750 958 self->dbuf = u = malloc (sizeof (*u));
751 names = malloc (memlen); 959 req->data2ptr = names = malloc (memlen);
960 UNLOCK (wrklock);
961
962 if (dirp && u && names)
963 for (;;)
964 {
965 errno = 0;
966 readdir_r (dirp, &u->d, &entp);
967
968 if (!entp)
969 break;
970
971 name = entp->d_name;
972
973 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
974 {
975 int len = strlen (name) + 1;
976
977 res++;
978
979 while (memofs + len > memlen)
980 {
981 memlen *= 2;
982 LOCK (wrklock);
983 req->data2ptr = names = realloc (names, memlen);
984 UNLOCK (wrklock);
985
986 if (!names)
987 break;
988 }
989
990 memcpy (names + memofs, name, len);
991 memofs += len;
992 }
993 }
994
995 if (errno)
996 res = -1;
997
998 req->result = res;
999}
1000
1001/*****************************************************************************/
1002
1003static void *aio_proc (void *thr_arg)
1004{
1005 aio_req req;
1006 worker *self = (worker *)thr_arg;
752 1007
753 for (;;) 1008 for (;;)
754 { 1009 {
755 errno = 0, readdir_r (dirp, &u.d, &entp); 1010 LOCK (reqlock);
756
757 if (!entp)
758 break;
759
760 name = entp->d_name;
761
762 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
763 {
764 int len = strlen (name) + 1;
765
766 res++;
767
768 while (memofs + len > memlen)
769 {
770 memlen *= 2;
771 names = realloc (names, memlen);
772 if (!names)
773 break;
774 }
775
776 memcpy (names + memofs, name, len);
777 memofs += len;
778 }
779 }
780
781 errorno = errno;
782 closedir (dirp);
783
784 if (errorno)
785 {
786 free (names);
787 errno = errorno;
788 res = -1;
789 }
790
791 *namesp = (void *)names;
792 return res;
793}
794
795/*****************************************************************************/
796
797static void *aio_proc (void *thr_arg)
798{
799 aio_req req;
800 int type;
801
802 do
803 {
804 pthread_mutex_lock (&reqlock);
805 1011
806 for (;;) 1012 for (;;)
807 { 1013 {
808 req = reqs; 1014 self->req = req = reqq_shift (&req_queue);
809
810 if (reqs)
811 {
812 reqs = reqs->next;
813 if (!reqs) reqe = 0;
814 }
815 1015
816 if (req) 1016 if (req)
817 break; 1017 break;
818 1018
819 pthread_cond_wait (&reqwait, &reqlock); 1019 pthread_cond_wait (&reqwait, &reqlock);
820 } 1020 }
821 1021
822 pthread_mutex_unlock (&reqlock); 1022 --nready;
1023
1024 UNLOCK (reqlock);
823 1025
824 errno = 0; /* strictly unnecessary */ 1026 errno = 0; /* strictly unnecessary */
825 type = req->type; /* remember type for QUIT check */
826 1027
827 if (!(req->flags & FLAG_CANCELLED)) 1028 if (!(req->flags & FLAG_CANCELLED))
828 switch (type) 1029 switch (req->type)
829 { 1030 {
830 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break; 1031 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break;
831 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break; 1032 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break;
832 1033
833 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break; 1034 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break;
834 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length); break; 1035 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length, self); break;
835 1036
836 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break; 1037 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break;
837 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break; 1038 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break;
838 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break; 1039 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break;
839 1040
842 case REQ_UNLINK: req->result = unlink (req->dataptr); break; 1043 case REQ_UNLINK: req->result = unlink (req->dataptr); break;
843 case REQ_RMDIR: req->result = rmdir (req->dataptr); break; 1044 case REQ_RMDIR: req->result = rmdir (req->dataptr); break;
844 case REQ_RENAME: req->result = rename (req->data2ptr, req->dataptr); break; 1045 case REQ_RENAME: req->result = rename (req->data2ptr, req->dataptr); break;
845 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break; 1046 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break;
846 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break; 1047 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break;
1048 case REQ_MKNOD: req->result = mknod (req->data2ptr, req->mode, (dev_t)req->offset); break;
847 1049
848 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break; 1050 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break;
849 case REQ_FSYNC: req->result = fsync (req->fd); break; 1051 case REQ_FSYNC: req->result = fsync (req->fd); break;
850 case REQ_READDIR: req->result = scandir_ (req->dataptr, &req->data2ptr); break; 1052 case REQ_READDIR: scandir_ (req, self); break;
851 1053
852 case REQ_SLEEP: 1054 case REQ_BUSY:
853 { 1055 {
854 struct timeval tv; 1056 struct timeval tv;
855 1057
856 tv.tv_sec = req->fd; 1058 tv.tv_sec = req->fd;
857 tv.tv_usec = req->fd2; 1059 tv.tv_usec = req->fd2;
859 req->result = select (0, 0, 0, 0, &tv); 1061 req->result = select (0, 0, 0, 0, &tv);
860 } 1062 }
861 1063
862 case REQ_GROUP: 1064 case REQ_GROUP:
863 case REQ_NOP: 1065 case REQ_NOP:
1066 break;
1067
864 case REQ_QUIT: 1068 case REQ_QUIT:
1069 LOCK (wrklock);
1070 worker_free (self);
1071 --started;
1072 UNLOCK (wrklock);
865 break; 1073 return 0;
866 1074
867 default: 1075 default:
868 req->result = ENOSYS; 1076 req->result = ENOSYS;
869 break; 1077 break;
870 } 1078 }
871 1079
872 req->errorno = errno; 1080 req->errorno = errno;
873 1081
874 pthread_mutex_lock (&reslock); 1082 LOCK (reslock);
875 1083
876 req->next = 0; 1084 ++npending;
877 1085
878 if (rese) 1086 if (!reqq_push (&res_queue, req))
879 {
880 rese->next = req;
881 rese = req;
882 }
883 else
884 {
885 rese = ress = req;
886
887 /* write a dummy byte to the pipe so fh becomes ready */ 1087 /* write a dummy byte to the pipe so fh becomes ready */
888 write (respipe [1], &respipe, 1); 1088 write (respipe [1], &respipe, 1);
889 }
890 1089
891 pthread_mutex_unlock (&reslock); 1090 self->req = 0;
892 } 1091 worker_clear (self);
893 while (type != REQ_QUIT);
894 1092
895 return 0; 1093 UNLOCK (reslock);
1094 }
896} 1095}
897 1096
898/*****************************************************************************/ 1097/*****************************************************************************/
899 1098
900static void atfork_prepare (void) 1099static void atfork_prepare (void)
901{ 1100{
902 pthread_mutex_lock (&reqlock); 1101 LOCK (wrklock);
903 pthread_mutex_lock (&reslock); 1102 LOCK (reqlock);
1103 LOCK (reslock);
904#if !HAVE_PREADWRITE 1104#if !HAVE_PREADWRITE
905 pthread_mutex_lock (&preadwritelock); 1105 LOCK (preadwritelock);
906#endif 1106#endif
907#if !HAVE_READDIR_R 1107#if !HAVE_READDIR_R
908 pthread_mutex_lock (&readdirlock); 1108 LOCK (readdirlock);
909#endif 1109#endif
910} 1110}
911 1111
912static void atfork_parent (void) 1112static void atfork_parent (void)
913{ 1113{
914#if !HAVE_READDIR_R 1114#if !HAVE_READDIR_R
915 pthread_mutex_unlock (&readdirlock); 1115 UNLOCK (readdirlock);
916#endif 1116#endif
917#if !HAVE_PREADWRITE 1117#if !HAVE_PREADWRITE
918 pthread_mutex_unlock (&preadwritelock); 1118 UNLOCK (preadwritelock);
919#endif 1119#endif
920 pthread_mutex_unlock (&reslock); 1120 UNLOCK (reslock);
921 pthread_mutex_unlock (&reqlock); 1121 UNLOCK (reqlock);
1122 UNLOCK (wrklock);
922} 1123}
923 1124
924static void atfork_child (void) 1125static void atfork_child (void)
925{ 1126{
926 aio_req prv; 1127 aio_req prv;
927 1128
1129 while (prv = reqq_shift (&req_queue))
1130 req_free (prv);
1131
1132 while (prv = reqq_shift (&res_queue))
1133 req_free (prv);
1134
1135 while (wrk_first.next != &wrk_first)
1136 {
1137 worker *wrk = wrk_first.next;
1138
1139 if (wrk->req)
1140 req_free (wrk->req);
1141
1142 worker_clear (wrk);
1143 worker_free (wrk);
1144 }
1145
928 started = 0; 1146 started = 0;
929 1147 nreqs = 0;
930 while (reqs)
931 {
932 prv = reqs;
933 reqs = prv->next;
934 req_free (prv);
935 }
936
937 reqs = reqe = 0;
938
939 while (ress)
940 {
941 prv = ress;
942 ress = prv->next;
943 req_free (prv);
944 }
945
946 ress = rese = 0;
947 1148
948 close (respipe [0]); 1149 close (respipe [0]);
949 close (respipe [1]); 1150 close (respipe [1]);
950 create_pipe (); 1151 create_pipe ();
951 1152
978PROTOTYPES: ENABLE 1179PROTOTYPES: ENABLE
979 1180
980BOOT: 1181BOOT:
981{ 1182{
982 HV *stash = gv_stashpv ("IO::AIO", 1); 1183 HV *stash = gv_stashpv ("IO::AIO", 1);
1184
983 newCONSTSUB (stash, "EXDEV", newSViv (EXDEV)); 1185 newCONSTSUB (stash, "EXDEV", newSViv (EXDEV));
984 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY)); 1186 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY));
985 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY)); 1187 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY));
1188 newCONSTSUB (stash, "O_CREAT", newSViv (O_CREAT));
1189 newCONSTSUB (stash, "O_TRUNC", newSViv (O_TRUNC));
1190 newCONSTSUB (stash, "S_IFIFO", newSViv (S_IFIFO));
986 1191
987 create_pipe (); 1192 create_pipe ();
988 pthread_atfork (atfork_prepare, atfork_parent, atfork_child); 1193 pthread_atfork (atfork_prepare, atfork_parent, atfork_child);
989}
990 1194
1195 start_thread ();
1196}
1197
991void 1198void
992min_parallel (nthreads) 1199min_parallel (int nthreads)
993 int nthreads
994 PROTOTYPE: $ 1200 PROTOTYPE: $
995 1201
996void 1202void
997max_parallel (nthreads) 1203max_parallel (int nthreads)
998 int nthreads
999 PROTOTYPE: $ 1204 PROTOTYPE: $
1000 1205
1001int 1206int
1002max_outstanding (nreqs) 1207max_outstanding (int maxreqs)
1003 int nreqs 1208 PROTOTYPE: $
1004 PROTOTYPE: $
1005 CODE: 1209 CODE:
1006 RETVAL = max_outstanding; 1210 RETVAL = max_outstanding;
1007 max_outstanding = nreqs; 1211 max_outstanding = maxreqs;
1212 OUTPUT:
1213 RETVAL
1008 1214
1009void 1215void
1010aio_open (pathname,flags,mode,callback=&PL_sv_undef) 1216aio_open (pathname,flags,mode,callback=&PL_sv_undef)
1011 SV * pathname 1217 SV * pathname
1012 int flags 1218 int flags
1227 1433
1228 REQ_SEND; 1434 REQ_SEND;
1229} 1435}
1230 1436
1231void 1437void
1438aio_mknod (pathname,mode,dev,callback=&PL_sv_undef)
1439 SV * pathname
1440 SV * callback
1441 UV mode
1442 UV dev
1443 PPCODE:
1444{
1445 dREQ;
1446
1447 req->type = REQ_MKNOD;
1448 req->data = newSVsv (pathname);
1449 req->dataptr = SvPVbyte_nolen (req->data);
1450 req->mode = (mode_t)mode;
1451 req->offset = dev;
1452
1453 REQ_SEND;
1454}
1455
1456void
1232aio_sleep (delay,callback=&PL_sv_undef) 1457aio_busy (delay,callback=&PL_sv_undef)
1233 double delay 1458 double delay
1234 SV * callback 1459 SV * callback
1235 PPCODE: 1460 PPCODE:
1236{ 1461{
1237 dREQ; 1462 dREQ;
1238 1463
1239 req->type = REQ_SLEEP; 1464 req->type = REQ_BUSY;
1240 req->fd = delay < 0. ? 0 : delay; 1465 req->fd = delay < 0. ? 0 : delay;
1241 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd); 1466 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd);
1242 1467
1243 REQ_SEND; 1468 REQ_SEND;
1244} 1469}
1267 req->type = REQ_NOP; 1492 req->type = REQ_NOP;
1268 1493
1269 REQ_SEND; 1494 REQ_SEND;
1270} 1495}
1271 1496
1272#if 0 1497int
1273 1498aioreq_pri (int pri = 0)
1274void 1499 PROTOTYPE: ;$
1275aio_pri (int pri = DEFAULT_PRI)
1276 CODE: 1500 CODE:
1501 RETVAL = next_pri - PRI_BIAS;
1502 if (items > 0)
1503 {
1277 if (pri < PRI_MIN) pri = PRI_MIN; 1504 if (pri < PRI_MIN) pri = PRI_MIN;
1278 if (pri > PRI_MAX) pri = PRI_MAX; 1505 if (pri > PRI_MAX) pri = PRI_MAX;
1279 next_pri = pri + PRI_BIAS; 1506 next_pri = pri + PRI_BIAS;
1507 }
1508 OUTPUT:
1509 RETVAL
1280 1510
1281#endif 1511void
1512aioreq_nice (int nice = 0)
1513 CODE:
1514 nice = next_pri - nice;
1515 if (nice < PRI_MIN) nice = PRI_MIN;
1516 if (nice > PRI_MAX) nice = PRI_MAX;
1517 next_pri = nice + PRI_BIAS;
1282 1518
1283void 1519void
1284flush () 1520flush ()
1285 PROTOTYPE: 1521 PROTOTYPE:
1286 CODE: 1522 CODE:
1287 while (nreqs) 1523 while (nreqs)
1288 { 1524 {
1289 poll_wait (); 1525 poll_wait ();
1290 poll_cb (); 1526 poll_cb (0);
1291 } 1527 }
1292 1528
1293void 1529void
1294poll() 1530poll()
1295 PROTOTYPE: 1531 PROTOTYPE:
1296 CODE: 1532 CODE:
1297 if (nreqs) 1533 if (nreqs)
1298 { 1534 {
1299 poll_wait (); 1535 poll_wait ();
1300 poll_cb (); 1536 poll_cb (0);
1301 } 1537 }
1302 1538
1303int 1539int
1304poll_fileno() 1540poll_fileno()
1305 PROTOTYPE: 1541 PROTOTYPE:
1310 1546
1311int 1547int
1312poll_cb(...) 1548poll_cb(...)
1313 PROTOTYPE: 1549 PROTOTYPE:
1314 CODE: 1550 CODE:
1315 RETVAL = poll_cb (); 1551 RETVAL = poll_cb (0);
1552 OUTPUT:
1553 RETVAL
1554
1555int
1556poll_some(int max = 0)
1557 PROTOTYPE: $
1558 CODE:
1559 RETVAL = poll_cb (max);
1316 OUTPUT: 1560 OUTPUT:
1317 RETVAL 1561 RETVAL
1318 1562
1319void 1563void
1320poll_wait() 1564poll_wait()
1329 CODE: 1573 CODE:
1330 RETVAL = nreqs; 1574 RETVAL = nreqs;
1331 OUTPUT: 1575 OUTPUT:
1332 RETVAL 1576 RETVAL
1333 1577
1578int
1579nready()
1580 PROTOTYPE:
1581 CODE:
1582 RETVAL = get_nready ();
1583 OUTPUT:
1584 RETVAL
1585
1586int
1587npending()
1588 PROTOTYPE:
1589 CODE:
1590 RETVAL = get_npending ();
1591 OUTPUT:
1592 RETVAL
1593
1334PROTOTYPES: DISABLE 1594PROTOTYPES: DISABLE
1335 1595
1336MODULE = IO::AIO PACKAGE = IO::AIO::REQ 1596MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1337 1597
1338void 1598void
1339cancel (aio_req_ornot req) 1599cancel (aio_req_ornot req)
1340 PROTOTYPE:
1341 CODE: 1600 CODE:
1342 req_cancel (req); 1601 req_cancel (req);
1343 1602
1344void 1603void
1345cb (aio_req_ornot req, SV *callback=&PL_sv_undef) 1604cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1381 } 1640 }
1382 } 1641 }
1383} 1642}
1384 1643
1385void 1644void
1645cancel_subs (aio_req_ornot req)
1646 CODE:
1647 req_cancel_subs (req);
1648
1649void
1386result (aio_req grp, ...) 1650result (aio_req grp, ...)
1387 CODE: 1651 CODE:
1388{ 1652{
1389 int i; 1653 int i;
1654 AV *av;
1655
1656 grp->errorno = errno;
1657
1390 AV *av = newAV (); 1658 av = newAV ();
1391 1659
1392 for (i = 1; i < items; ++i ) 1660 for (i = 1; i < items; ++i )
1393 av_push (av, newSVsv (ST (i))); 1661 av_push (av, newSVsv (ST (i)));
1394 1662
1395 SvREFCNT_dec (grp->data); 1663 SvREFCNT_dec (grp->data);
1396 grp->data = (SV *)av; 1664 grp->data = (SV *)av;
1397} 1665}
1398 1666
1399void 1667void
1668errno (aio_req grp, int errorno = errno)
1669 CODE:
1670 grp->errorno = errorno;
1671
1672void
1400feed_limit (aio_req grp, int limit) 1673limit (aio_req grp, int limit)
1401 CODE: 1674 CODE:
1402 grp->fd2 = limit; 1675 grp->fd2 = limit;
1403 aio_grp_feed (grp); 1676 aio_grp_feed (grp);
1404 1677
1405void 1678void

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines