ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
(Generate patch)

Comparing IO-AIO/AIO.xs (file contents):
Revision 1.64 by root, Mon Oct 23 23:54:41 2006 UTC vs.
Revision 1.81 by root, Fri Oct 27 20:10:06 2006 UTC

1#if __linux 1/* solaris */
2#define _POSIX_PTHREAD_SEMANTICS 1
3
4#if __linux && !defined(_GNU_SOURCE)
2# define _GNU_SOURCE 5# define _GNU_SOURCE
3#endif 6#endif
4 7
8/* just in case */
5#define _REENTRANT 1 9#define _REENTRANT 1
6 10
7#include <errno.h> 11#include <errno.h>
8 12
9#include "EXTERN.h" 13#include "EXTERN.h"
44/* used for struct dirent, AIX doesn't provide it */ 48/* used for struct dirent, AIX doesn't provide it */
45#ifndef NAME_MAX 49#ifndef NAME_MAX
46# define NAME_MAX 4096 50# define NAME_MAX 4096
47#endif 51#endif
48 52
53#ifndef PTHREAD_STACK_MIN
54/* care for broken platforms, e.g. windows */
55# define PTHREAD_STACK_MIN 16384
56#endif
57
49#if __ia64 58#if __ia64
50# define STACKSIZE 65536 59# define STACKSIZE 65536
60#elif __i386 || __x86_64 /* 16k is unreasonably high :( */
61# define STACKSIZE PTHREAD_STACK_MIN
51#else 62#else
52# define STACKSIZE 8192 63# define STACKSIZE 16384
64#endif
65
66/* wether word reads are potentially non-atomic.
67 * this is conservatice, likely most arches this runs
68 * on have atomic word read/writes.
69 */
70#ifndef WORDREAD_UNSAFE
71# if __i386 || __x86_64
72# define WORDREAD_UNSAFE 0
73# else
74# define WORDREAD_UNSAFE 1
53#endif 75# endif
76#endif
77
78/* buffer size for various temporary buffers */
79#define AIO_BUFSIZE 65536
80
81#define dBUF \
82 char *aio_buf; \
83 LOCK (wrklock); \
84 self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \
85 UNLOCK (wrklock); \
86 if (!aio_buf) \
87 return -1;
54 88
55enum { 89enum {
56 REQ_QUIT, 90 REQ_QUIT,
57 REQ_OPEN, REQ_CLOSE, 91 REQ_OPEN, REQ_CLOSE,
58 REQ_READ, REQ_WRITE, REQ_READAHEAD, 92 REQ_READ, REQ_WRITE, REQ_READAHEAD,
59 REQ_SENDFILE, 93 REQ_SENDFILE,
60 REQ_STAT, REQ_LSTAT, REQ_FSTAT, 94 REQ_STAT, REQ_LSTAT, REQ_FSTAT,
61 REQ_FSYNC, REQ_FDATASYNC, 95 REQ_FSYNC, REQ_FDATASYNC,
62 REQ_UNLINK, REQ_RMDIR, REQ_RENAME, 96 REQ_UNLINK, REQ_RMDIR, REQ_RENAME,
63 REQ_READDIR, 97 REQ_MKNOD, REQ_READDIR,
64 REQ_LINK, REQ_SYMLINK, 98 REQ_LINK, REQ_SYMLINK,
65 REQ_GROUP, REQ_NOP, 99 REQ_GROUP, REQ_NOP,
66 REQ_SLEEP, 100 REQ_BUSY,
67}; 101};
68 102
69#define AIO_REQ_KLASS "IO::AIO::REQ" 103#define AIO_REQ_KLASS "IO::AIO::REQ"
70#define AIO_GRP_KLASS "IO::AIO::GRP" 104#define AIO_GRP_KLASS "IO::AIO::GRP"
71 105
105 PRI_MIN = -4, 139 PRI_MIN = -4,
106 PRI_MAX = 4, 140 PRI_MAX = 4,
107 141
108 DEFAULT_PRI = 0, 142 DEFAULT_PRI = 0,
109 PRI_BIAS = -PRI_MIN, 143 PRI_BIAS = -PRI_MIN,
144 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
110}; 145};
111 146
112static int next_pri = DEFAULT_PRI + PRI_BIAS; 147static int next_pri = DEFAULT_PRI + PRI_BIAS;
113 148
114static int started, wanted; 149static unsigned int started, wanted;
115static volatile int nreqs;
116static int max_outstanding = 1<<30;
117static int respipe [2];
118 150
119#if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP) 151#if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)
120# define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP 152# define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
121#else 153#else
122# define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER 154# define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
123#endif 155#endif
124 156
157#define LOCK(mutex) pthread_mutex_lock (&(mutex))
158#define UNLOCK(mutex) pthread_mutex_unlock (&(mutex))
159
160/* worker threads management */
161static pthread_mutex_t wrklock = AIO_MUTEX_INIT;
162
163typedef struct worker {
164 /* locked by wrklock */
165 struct worker *prev, *next;
166
167 pthread_t tid;
168
169 /* locked by reslock, reqlock or wrklock */
170 aio_req req; /* currently processed request */
171 void *dbuf;
172 DIR *dirp;
173} worker;
174
175static worker wrk_first = { &wrk_first, &wrk_first, 0 };
176
177static void worker_clear (worker *wrk)
178{
179 if (wrk->dirp)
180 {
181 closedir (wrk->dirp);
182 wrk->dirp = 0;
183 }
184
185 if (wrk->dbuf)
186 {
187 free (wrk->dbuf);
188 wrk->dbuf = 0;
189 }
190}
191
192static void worker_free (worker *wrk)
193{
194 wrk->next->prev = wrk->prev;
195 wrk->prev->next = wrk->next;
196
197 free (wrk);
198}
199
200static volatile unsigned int nreqs, nready, npending;
201static volatile unsigned int max_outstanding = 0xffffffff;
202static int respipe [2];
203
125static pthread_mutex_t reslock = AIO_MUTEX_INIT; 204static pthread_mutex_t reslock = AIO_MUTEX_INIT;
126static pthread_mutex_t reqlock = AIO_MUTEX_INIT; 205static pthread_mutex_t reqlock = AIO_MUTEX_INIT;
127static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER; 206static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER;
128 207
129static volatile aio_req reqs, reqe; /* queue start, queue end */ 208#if WORDREAD_UNSAFE
130static volatile aio_req ress, rese; /* queue start, queue end */
131 209
210static unsigned int get_nready ()
211{
212 unsigned int retval;
213
214 LOCK (reqlock);
215 retval = nready;
216 UNLOCK (reqlock);
217
218 return retval;
219}
220
221static unsigned int get_npending ()
222{
223 unsigned int retval;
224
225 LOCK (reslock);
226 retval = npending;
227 UNLOCK (reslock);
228
229 return retval;
230}
231
232#else
233
234# define get_nready() nready
235# define get_npending() npending
236
237#endif
238
239/*
240 * a somewhat faster data structure might be nice, but
241 * with 8 priorities this actually needs <20 insns
242 * per shift, the most expensive operation.
243 */
244typedef struct {
245 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
246 int size;
247} reqq;
248
249static reqq req_queue;
250static reqq res_queue;
251
252int reqq_push (reqq *q, aio_req req)
253{
254 int pri = req->pri;
255 req->next = 0;
256
257 if (q->qe[pri])
258 {
259 q->qe[pri]->next = req;
260 q->qe[pri] = req;
261 }
262 else
263 q->qe[pri] = q->qs[pri] = req;
264
265 return q->size++;
266}
267
268aio_req reqq_shift (reqq *q)
269{
270 int pri;
271
272 if (!q->size)
273 return 0;
274
275 --q->size;
276
277 for (pri = NUM_PRI; pri--; )
278 {
279 aio_req req = q->qs[pri];
280
281 if (req)
282 {
283 if (!(q->qs[pri] = req->next))
284 q->qe[pri] = 0;
285
286 return req;
287 }
288 }
289
290 abort ();
291}
292
293static int poll_cb (int max);
132static void req_invoke (aio_req req); 294static void req_invoke (aio_req req);
133static void req_free (aio_req req); 295static void req_free (aio_req req);
296static void req_cancel (aio_req req);
134 297
135/* must be called at most once */ 298/* must be called at most once */
136static SV *req_sv (aio_req req, const char *klass) 299static SV *req_sv (aio_req req, const char *klass)
137{ 300{
138 if (!req->self) 301 if (!req->self)
169 ENTER; 332 ENTER;
170 SAVETMPS; 333 SAVETMPS;
171 PUSHMARK (SP); 334 PUSHMARK (SP);
172 XPUSHs (req_sv (grp, AIO_GRP_KLASS)); 335 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
173 PUTBACK; 336 PUTBACK;
174 call_sv (grp->fh2, G_VOID | G_EVAL); 337 call_sv (grp->fh2, G_VOID | G_EVAL | G_KEEPERR);
175 SPAGAIN; 338 SPAGAIN;
176 FREETMPS; 339 FREETMPS;
177 LEAVE; 340 LEAVE;
178 } 341 }
179 342
200 req_invoke (grp); 363 req_invoke (grp);
201 req_free (grp); 364 req_free (grp);
202 } 365 }
203} 366}
204 367
205static void poll_wait ()
206{
207 fd_set rfd;
208
209 while (nreqs)
210 {
211 aio_req req;
212#if !(__x86 || __x86_64) /* safe without sempahore on this archs */
213 pthread_mutex_lock (&reslock);
214#endif
215 req = ress;
216#if !(__x86 || __x86_64) /* safe without sempahore on this archs */
217 pthread_mutex_unlock (&reslock);
218#endif
219
220 if (req)
221 return;
222
223 FD_ZERO(&rfd);
224 FD_SET(respipe [0], &rfd);
225
226 select (respipe [0] + 1, &rfd, 0, 0, 0);
227 }
228}
229
230static void req_invoke (aio_req req) 368static void req_invoke (aio_req req)
231{ 369{
232 dSP; 370 dSP;
233 int errorno = errno;
234 371
235 if (req->flags & FLAG_CANCELLED || !SvOK (req->callback)) 372 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
236 return; 373 {
237
238 errno = req->errorno;
239
240 ENTER; 374 ENTER;
241 SAVETMPS; 375 SAVETMPS;
242 PUSHMARK (SP); 376 PUSHMARK (SP);
243 EXTEND (SP, 1); 377 EXTEND (SP, 1);
244 378
245 switch (req->type) 379 switch (req->type)
246 {
247 case REQ_READDIR:
248 { 380 {
249 SV *rv = &PL_sv_undef; 381 case REQ_READDIR:
250
251 if (req->result >= 0)
252 { 382 {
253 char *buf = req->data2ptr; 383 SV *rv = &PL_sv_undef;
254 AV *av = newAV ();
255 384
256 while (req->result) 385 if (req->result >= 0)
257 { 386 {
387 int i;
388 char *buf = req->data2ptr;
389 AV *av = newAV ();
390
391 av_extend (av, req->result - 1);
392
393 for (i = 0; i < req->result; ++i)
394 {
258 SV *sv = newSVpv (buf, 0); 395 SV *sv = newSVpv (buf, 0);
259 396
260 av_push (av, sv); 397 av_store (av, i, sv);
261 buf += SvCUR (sv) + 1; 398 buf += SvCUR (sv) + 1;
262 req->result--; 399 }
400
401 rv = sv_2mortal (newRV_noinc ((SV *)av));
263 } 402 }
264 403
265 rv = sv_2mortal (newRV_noinc ((SV *)av)); 404 PUSHs (rv);
266 } 405 }
406 break;
267 407
268 PUSHs (rv); 408 case REQ_OPEN:
409 {
410 /* convert fd to fh */
411 SV *fh;
412
413 PUSHs (sv_2mortal (newSViv (req->result)));
414 PUTBACK;
415 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
416 SPAGAIN;
417
418 fh = SvREFCNT_inc (POPs);
419
420 PUSHMARK (SP);
421 XPUSHs (sv_2mortal (fh));
422 }
423 break;
424
425 case REQ_GROUP:
426 req->fd = 2; /* mark group as finished */
427
428 if (req->data)
429 {
430 int i;
431 AV *av = (AV *)req->data;
432
433 EXTEND (SP, AvFILL (av) + 1);
434 for (i = 0; i <= AvFILL (av); ++i)
435 PUSHs (*av_fetch (av, i, 0));
436 }
437 break;
438
439 case REQ_NOP:
440 case REQ_BUSY:
441 break;
442
443 default:
444 PUSHs (sv_2mortal (newSViv (req->result)));
445 break;
269 } 446 }
270 break;
271 447
272 case REQ_OPEN: 448 errno = req->errorno;
273 {
274 /* convert fd to fh */
275 SV *fh;
276 449
277 PUSHs (sv_2mortal (newSViv (req->result)));
278 PUTBACK; 450 PUTBACK;
279 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
280 SPAGAIN;
281
282 fh = SvREFCNT_inc (POPs);
283
284 PUSHMARK (SP);
285 XPUSHs (sv_2mortal (fh));
286 }
287 break;
288
289 case REQ_GROUP:
290 req->fd = 2; /* mark group as finished */
291
292 if (req->data)
293 {
294 int i;
295 AV *av = (AV *)req->data;
296
297 EXTEND (SP, AvFILL (av) + 1);
298 for (i = 0; i <= AvFILL (av); ++i)
299 PUSHs (*av_fetch (av, i, 0));
300 }
301 break;
302
303 case REQ_NOP:
304 case REQ_SLEEP:
305 break;
306
307 default:
308 PUSHs (sv_2mortal (newSViv (req->result)));
309 break;
310 }
311
312
313 PUTBACK;
314 call_sv (req->callback, G_VOID | G_EVAL); 451 call_sv (req->callback, G_VOID | G_EVAL);
315 SPAGAIN; 452 SPAGAIN;
316 453
317 FREETMPS; 454 FREETMPS;
318 LEAVE; 455 LEAVE;
319
320 errno = errorno;
321
322 if (SvTRUE (ERRSV))
323 { 456 }
324 req_free (req);
325 croak (0);
326 }
327}
328 457
329static void req_free (aio_req req)
330{
331 if (req->grp) 458 if (req->grp)
332 { 459 {
333 aio_req grp = req->grp; 460 aio_req grp = req->grp;
334 461
335 /* unlink request */ 462 /* unlink request */
340 grp->grp_first = req->grp_next; 467 grp->grp_first = req->grp_next;
341 468
342 aio_grp_dec (grp); 469 aio_grp_dec (grp);
343 } 470 }
344 471
472 if (SvTRUE (ERRSV))
473 {
474 req_free (req);
475 croak (0);
476 }
477}
478
479static void req_free (aio_req req)
480{
345 if (req->self) 481 if (req->self)
346 { 482 {
347 sv_unmagic (req->self, PERL_MAGIC_ext); 483 sv_unmagic (req->self, PERL_MAGIC_ext);
348 SvREFCNT_dec (req->self); 484 SvREFCNT_dec (req->self);
349 } 485 }
352 SvREFCNT_dec (req->fh); 488 SvREFCNT_dec (req->fh);
353 SvREFCNT_dec (req->fh2); 489 SvREFCNT_dec (req->fh2);
354 SvREFCNT_dec (req->callback); 490 SvREFCNT_dec (req->callback);
355 Safefree (req->statdata); 491 Safefree (req->statdata);
356 492
357 if (req->type == REQ_READDIR && req->result >= 0) 493 if (req->type == REQ_READDIR)
358 free (req->data2ptr); 494 free (req->data2ptr);
359 495
360 Safefree (req); 496 Safefree (req);
361} 497}
362 498
499static void req_cancel_subs (aio_req grp)
500{
501 aio_req sub;
502
503 if (grp->type != REQ_GROUP)
504 return;
505
506 SvREFCNT_dec (grp->fh2);
507 grp->fh2 = 0;
508
509 for (sub = grp->grp_first; sub; sub = sub->grp_next)
510 req_cancel (sub);
511}
512
363static void req_cancel (aio_req req) 513static void req_cancel (aio_req req)
364{ 514{
365 req->flags |= FLAG_CANCELLED; 515 req->flags |= FLAG_CANCELLED;
366 516
367 if (req->type == REQ_GROUP) 517 req_cancel_subs (req);
368 { 518}
369 aio_req sub;
370 519
371 for (sub = req->grp_first; sub; sub = sub->grp_next) 520static void *aio_proc(void *arg);
372 req_cancel (sub); 521
522static void start_thread (void)
523{
524 sigset_t fullsigset, oldsigset;
525 pthread_attr_t attr;
526
527 worker *wrk = calloc (1, sizeof (worker));
528
529 if (!wrk)
530 croak ("unable to allocate worker thread data");
531
532 pthread_attr_init (&attr);
533 pthread_attr_setstacksize (&attr, STACKSIZE);
534 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
535#ifdef PTHREAD_SCOPE_PROCESS
536 pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS);
537#endif
538
539 sigfillset (&fullsigset);
540
541 LOCK (wrklock);
542 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
543
544 if (pthread_create (&wrk->tid, &attr, aio_proc, (void *)wrk) == 0)
373 } 545 {
374} 546 wrk->prev = &wrk_first;
547 wrk->next = wrk_first.next;
548 wrk_first.next->prev = wrk;
549 wrk_first.next = wrk;
550 ++started;
551 }
552 else
553 free (wrk);
375 554
555 sigprocmask (SIG_SETMASK, &oldsigset, 0);
556 UNLOCK (wrklock);
557}
558
559static void maybe_start_thread ()
560{
561#if 0
562 static struct timeval last;
563 struct timeval diff, now;
564#endif
565
566 if (started >= wanted)
567 return;
568
569 if (nready <= nreqs - get_nready () - get_npending ())
570 return;
571
572#if 0
573 gettimeofday (&now, 0);
574
575 diff.tv_sec = now.tv_sec - last.tv_sec;
576 diff.tv_usec = now.tv_usec - last.tv_usec;
577
578 if (diff.tv_usec < 0)
579 {
580 --diff.tv_sec;
581 diff.tv_usec += 1000000;
582 }
583
584 if (!diff.tv_sec && diff.tv_usec < 10000)
585 return;
586
587 last = now;
588#endif
589
590 start_thread ();
591}
592
593static void req_send (aio_req req)
594{
595 ++nreqs;
596
597 LOCK (reqlock);
598 ++nready;
599 reqq_push (&req_queue, req);
600 pthread_cond_signal (&reqwait);
601 UNLOCK (reqlock);
602
603 maybe_start_thread ();
604}
605
606static void end_thread (void)
607{
608 aio_req req;
609
610 Newz (0, req, 1, aio_cb);
611
612 req->type = REQ_QUIT;
613 req->pri = PRI_MAX + PRI_BIAS;
614
615 req_send (req);
616
617 LOCK (wrklock);
618 --started;
619 UNLOCK (wrklock);
620}
621
622static void min_parallel (int nthreads)
623{
624 if (wanted < nthreads)
625 wanted = nthreads;
626}
627
628static void max_parallel (int nthreads)
629{
630 if (wanted > nthreads)
631 wanted = nthreads;
632
633 while (started > wanted)
634 end_thread ();
635}
636
637static void poll_wait ()
638{
639 fd_set rfd;
640
641 while (nreqs)
642 {
643 int size;
644 if (WORDREAD_UNSAFE) LOCK (reslock);
645 size = res_queue.size;
646 if (WORDREAD_UNSAFE) UNLOCK (reslock);
647
648 if (size)
649 return;
650
651 maybe_start_thread ();
652
653 FD_ZERO(&rfd);
654 FD_SET(respipe [0], &rfd);
655
656 select (respipe [0] + 1, &rfd, 0, 0, 0);
657 }
658}
659
376static int poll_cb () 660static int poll_cb (int max)
377{ 661{
378 dSP; 662 dSP;
379 int count = 0; 663 int count = 0;
380 int do_croak = 0; 664 int do_croak = 0;
381 aio_req req; 665 aio_req req;
382 666
383 for (;;) 667 for (;;)
384 { 668 {
385 pthread_mutex_lock (&reslock); 669 while (max <= 0 || count < max)
386 req = ress;
387
388 if (req)
389 { 670 {
390 ress = req->next; 671 maybe_start_thread ();
391 672
673 LOCK (reslock);
674 req = reqq_shift (&res_queue);
675
392 if (!ress) 676 if (req)
393 { 677 {
678 --npending;
679
680 if (!res_queue.size)
681 {
394 /* read any signals sent by the worker threads */ 682 /* read any signals sent by the worker threads */
395 char buf [32]; 683 char buf [32];
396 while (read (respipe [0], buf, 32) == 32) 684 while (read (respipe [0], buf, 32) == 32)
685 ;
397 ; 686 }
398
399 rese = 0;
400 } 687 }
688
689 UNLOCK (reslock);
690
691 if (!req)
692 break;
693
694 --nreqs;
695
696 if (req->type == REQ_GROUP && req->length)
697 {
698 req->fd = 1; /* mark request as delayed */
699 continue;
700 }
701 else
702 {
703 if (req->type == REQ_READ)
704 SvCUR_set (req->data, req->dataoffset + (req->result > 0 ? req->result : 0));
705
706 if (req->data2ptr && (req->type == REQ_READ || req->type == REQ_WRITE))
707 SvREADONLY_off (req->data);
708
709 if (req->statdata)
710 {
711 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
712 PL_laststatval = req->result;
713 PL_statcache = *(req->statdata);
714 }
715
716 req_invoke (req);
717
718 count++;
719 }
720
721 req_free (req);
401 } 722 }
402 723
403 pthread_mutex_unlock (&reslock); 724 if (nreqs <= max_outstanding)
404
405 if (!req)
406 break; 725 break;
407 726
408 --nreqs; 727 poll_wait ();
409 728
410 if (req->type == REQ_QUIT) 729 max = 0;
411 started--;
412 else if (req->type == REQ_GROUP && req->length)
413 {
414 req->fd = 1; /* mark request as delayed */
415 continue;
416 }
417 else
418 {
419 if (req->type == REQ_READ)
420 SvCUR_set (req->data, req->dataoffset + (req->result > 0 ? req->result : 0));
421
422 if (req->data2ptr && (req->type == REQ_READ || req->type == REQ_WRITE))
423 SvREADONLY_off (req->data);
424
425 if (req->statdata)
426 {
427 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
428 PL_laststatval = req->result;
429 PL_statcache = *(req->statdata);
430 }
431
432 req_invoke (req);
433
434 count++;
435 }
436
437 req_free (req);
438 } 730 }
439 731
440 return count; 732 return count;
441}
442
443static void *aio_proc(void *arg);
444
445static void start_thread (void)
446{
447 sigset_t fullsigset, oldsigset;
448 pthread_t tid;
449 pthread_attr_t attr;
450
451 pthread_attr_init (&attr);
452 pthread_attr_setstacksize (&attr, STACKSIZE);
453 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
454
455 sigfillset (&fullsigset);
456 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
457
458 if (pthread_create (&tid, &attr, aio_proc, 0) == 0)
459 started++;
460
461 sigprocmask (SIG_SETMASK, &oldsigset, 0);
462}
463
464static void req_send (aio_req req)
465{
466 while (started < wanted && nreqs >= started)
467 start_thread ();
468
469 ++nreqs;
470
471 pthread_mutex_lock (&reqlock);
472
473 req->next = 0;
474
475 if (reqe)
476 {
477 reqe->next = req;
478 reqe = req;
479 }
480 else
481 reqe = reqs = req;
482
483 pthread_cond_signal (&reqwait);
484 pthread_mutex_unlock (&reqlock);
485
486 if (nreqs > max_outstanding)
487 for (;;)
488 {
489 poll_cb ();
490
491 if (nreqs <= max_outstanding)
492 break;
493
494 poll_wait ();
495 }
496}
497
498static void end_thread (void)
499{
500 aio_req req;
501 Newz (0, req, 1, aio_cb);
502 req->type = REQ_QUIT;
503
504 req_send (req);
505}
506
507static void min_parallel (int nthreads)
508{
509 if (wanted < nthreads)
510 wanted = nthreads;
511}
512
513static void max_parallel (int nthreads)
514{
515 int cur = started;
516
517 if (wanted > nthreads)
518 wanted = nthreads;
519
520 while (cur > wanted)
521 {
522 end_thread ();
523 cur--;
524 }
525
526 while (started > wanted)
527 {
528 poll_wait ();
529 poll_cb ();
530 }
531} 733}
532 734
533static void create_pipe () 735static void create_pipe ()
534{ 736{
535 if (pipe (respipe)) 737 if (pipe (respipe))
559static ssize_t pread (int fd, void *buf, size_t count, off_t offset) 761static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
560{ 762{
561 ssize_t res; 763 ssize_t res;
562 off_t ooffset; 764 off_t ooffset;
563 765
564 pthread_mutex_lock (&preadwritelock); 766 LOCK (preadwritelock);
565 ooffset = lseek (fd, 0, SEEK_CUR); 767 ooffset = lseek (fd, 0, SEEK_CUR);
566 lseek (fd, offset, SEEK_SET); 768 lseek (fd, offset, SEEK_SET);
567 res = read (fd, buf, count); 769 res = read (fd, buf, count);
568 lseek (fd, ooffset, SEEK_SET); 770 lseek (fd, ooffset, SEEK_SET);
569 pthread_mutex_unlock (&preadwritelock); 771 UNLOCK (preadwritelock);
570 772
571 return res; 773 return res;
572} 774}
573 775
574static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset) 776static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
575{ 777{
576 ssize_t res; 778 ssize_t res;
577 off_t ooffset; 779 off_t ooffset;
578 780
579 pthread_mutex_lock (&preadwritelock); 781 LOCK (preadwritelock);
580 ooffset = lseek (fd, 0, SEEK_CUR); 782 ooffset = lseek (fd, 0, SEEK_CUR);
581 lseek (fd, offset, SEEK_SET); 783 lseek (fd, offset, SEEK_SET);
582 res = write (fd, buf, count); 784 res = write (fd, buf, count);
583 lseek (fd, offset, SEEK_SET); 785 lseek (fd, offset, SEEK_SET);
584 pthread_mutex_unlock (&preadwritelock); 786 UNLOCK (preadwritelock);
585 787
586 return res; 788 return res;
587} 789}
588#endif 790#endif
589 791
590#if !HAVE_FDATASYNC 792#if !HAVE_FDATASYNC
591# define fdatasync fsync 793# define fdatasync fsync
592#endif 794#endif
593 795
594#if !HAVE_READAHEAD 796#if !HAVE_READAHEAD
595# define readahead aio_readahead 797# define readahead(fd,offset,count) aio_readahead (fd, offset, count, self)
596 798
597static ssize_t readahead (int fd, off_t offset, size_t count) 799static ssize_t aio_readahead (int fd, off_t offset, size_t count, worker *self)
598{ 800{
599 char readahead_buf[4096]; 801 dBUF;
600 802
601 while (count > 0) 803 while (count > 0)
602 { 804 {
603 size_t len = count < sizeof (readahead_buf) ? count : sizeof (readahead_buf); 805 size_t len = count < AIO_BUFSIZE ? count : AIO_BUFSIZE;
604 806
605 pread (fd, readahead_buf, len, offset); 807 pread (fd, aio_buf, len, offset);
606 offset += len; 808 offset += len;
607 count -= len; 809 count -= len;
608 } 810 }
609 811
610 errno = 0; 812 errno = 0;
611} 813}
814
612#endif 815#endif
613 816
614#if !HAVE_READDIR_R 817#if !HAVE_READDIR_R
615# define readdir_r aio_readdir_r 818# define readdir_r aio_readdir_r
616 819
619static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res) 822static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res)
620{ 823{
621 struct dirent *e; 824 struct dirent *e;
622 int errorno; 825 int errorno;
623 826
624 pthread_mutex_lock (&readdirlock); 827 LOCK (readdirlock);
625 828
626 e = readdir (dirp); 829 e = readdir (dirp);
627 errorno = errno; 830 errorno = errno;
628 831
629 if (e) 832 if (e)
632 strcpy (ent->d_name, e->d_name); 835 strcpy (ent->d_name, e->d_name);
633 } 836 }
634 else 837 else
635 *res = 0; 838 *res = 0;
636 839
637 pthread_mutex_unlock (&readdirlock); 840 UNLOCK (readdirlock);
638 841
639 errno = errorno; 842 errno = errorno;
640 return e ? 0 : -1; 843 return e ? 0 : -1;
641} 844}
642#endif 845#endif
643 846
644/* sendfile always needs emulation */ 847/* sendfile always needs emulation */
645static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count) 848static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self)
646{ 849{
647 ssize_t res; 850 ssize_t res;
648 851
649 if (!count) 852 if (!count)
650 return 0; 853 return 0;
661 { 864 {
662 off_t sbytes; 865 off_t sbytes;
663 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0); 866 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0);
664 867
665 if (res < 0 && sbytes) 868 if (res < 0 && sbytes)
666 /* maybe only on EAGAIN only: as usual, the manpage leaves you guessing */ 869 /* maybe only on EAGAIN: as usual, the manpage leaves you guessing */
667 res = sbytes; 870 res = sbytes;
668 } 871 }
669 872
670# elif __hpux 873# elif __hpux
671 res = sendfile (ofd, ifd, offset, count, 0, 0); 874 res = sendfile (ofd, ifd, offset, count, 0, 0);
699#endif 902#endif
700 ) 903 )
701 ) 904 )
702 { 905 {
703 /* emulate sendfile. this is a major pain in the ass */ 906 /* emulate sendfile. this is a major pain in the ass */
704 char buf[4096]; 907 dBUF;
908
705 res = 0; 909 res = 0;
706 910
707 while (count) 911 while (count)
708 { 912 {
709 ssize_t cnt; 913 ssize_t cnt;
710 914
711 cnt = pread (ifd, buf, count > 4096 ? 4096 : count, offset); 915 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
712 916
713 if (cnt <= 0) 917 if (cnt <= 0)
714 { 918 {
715 if (cnt && !res) res = -1; 919 if (cnt && !res) res = -1;
716 break; 920 break;
717 } 921 }
718 922
719 cnt = write (ofd, buf, cnt); 923 cnt = write (ofd, aio_buf, cnt);
720 924
721 if (cnt <= 0) 925 if (cnt <= 0)
722 { 926 {
723 if (cnt && !res) res = -1; 927 if (cnt && !res) res = -1;
724 break; 928 break;
732 936
733 return res; 937 return res;
734} 938}
735 939
736/* read a full directory */ 940/* read a full directory */
737static int scandir_ (const char *path, void **namesp) 941static void scandir_ (aio_req req, worker *self)
738{ 942{
739 DIR *dirp = opendir (path); 943 DIR *dirp;
740 union 944 union
741 { 945 {
742 struct dirent d; 946 struct dirent d;
743 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1]; 947 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1];
744 } u; 948 } *u;
745 struct dirent *entp; 949 struct dirent *entp;
746 char *name, *names; 950 char *name, *names;
747 int memlen = 4096; 951 int memlen = 4096;
748 int memofs = 0; 952 int memofs = 0;
749 int res = 0; 953 int res = 0;
750 int errorno; 954 int errorno;
751 955
752 if (!dirp) 956 LOCK (wrklock);
753 return -1; 957 self->dirp = dirp = opendir (req->dataptr);
754 958 self->dbuf = u = malloc (sizeof (*u));
755 names = malloc (memlen); 959 req->data2ptr = names = malloc (memlen);
960 UNLOCK (wrklock);
961
962 if (dirp && u && names)
963 for (;;)
964 {
965 errno = 0;
966 readdir_r (dirp, &u->d, &entp);
967
968 if (!entp)
969 break;
970
971 name = entp->d_name;
972
973 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
974 {
975 int len = strlen (name) + 1;
976
977 res++;
978
979 while (memofs + len > memlen)
980 {
981 memlen *= 2;
982 LOCK (wrklock);
983 req->data2ptr = names = realloc (names, memlen);
984 UNLOCK (wrklock);
985
986 if (!names)
987 break;
988 }
989
990 memcpy (names + memofs, name, len);
991 memofs += len;
992 }
993 }
994
995 if (errno)
996 res = -1;
997
998 req->result = res;
999}
1000
1001/*****************************************************************************/
1002
1003static void *aio_proc (void *thr_arg)
1004{
1005 aio_req req;
1006 worker *self = (worker *)thr_arg;
756 1007
757 for (;;) 1008 for (;;)
758 { 1009 {
759 errno = 0, readdir_r (dirp, &u.d, &entp); 1010 LOCK (reqlock);
760
761 if (!entp)
762 break;
763
764 name = entp->d_name;
765
766 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
767 {
768 int len = strlen (name) + 1;
769
770 res++;
771
772 while (memofs + len > memlen)
773 {
774 memlen *= 2;
775 names = realloc (names, memlen);
776 if (!names)
777 break;
778 }
779
780 memcpy (names + memofs, name, len);
781 memofs += len;
782 }
783 }
784
785 errorno = errno;
786 closedir (dirp);
787
788 if (errorno)
789 {
790 free (names);
791 errno = errorno;
792 res = -1;
793 }
794
795 *namesp = (void *)names;
796 return res;
797}
798
799/*****************************************************************************/
800
801static void *aio_proc (void *thr_arg)
802{
803 aio_req req;
804 int type;
805
806 do
807 {
808 pthread_mutex_lock (&reqlock);
809 1011
810 for (;;) 1012 for (;;)
811 { 1013 {
812 req = reqs; 1014 self->req = req = reqq_shift (&req_queue);
813
814 if (reqs)
815 {
816 reqs = reqs->next;
817 if (!reqs) reqe = 0;
818 }
819 1015
820 if (req) 1016 if (req)
821 break; 1017 break;
822 1018
823 pthread_cond_wait (&reqwait, &reqlock); 1019 pthread_cond_wait (&reqwait, &reqlock);
824 } 1020 }
825 1021
826 pthread_mutex_unlock (&reqlock); 1022 --nready;
1023
1024 UNLOCK (reqlock);
827 1025
828 errno = 0; /* strictly unnecessary */ 1026 errno = 0; /* strictly unnecessary */
829 type = req->type; /* remember type for QUIT check */
830 1027
831 if (!(req->flags & FLAG_CANCELLED)) 1028 if (!(req->flags & FLAG_CANCELLED))
832 switch (type) 1029 switch (req->type)
833 { 1030 {
834 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break; 1031 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break;
835 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break; 1032 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break;
836 1033
837 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break; 1034 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break;
838 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length); break; 1035 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length, self); break;
839 1036
840 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break; 1037 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break;
841 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break; 1038 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break;
842 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break; 1039 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break;
843 1040
846 case REQ_UNLINK: req->result = unlink (req->dataptr); break; 1043 case REQ_UNLINK: req->result = unlink (req->dataptr); break;
847 case REQ_RMDIR: req->result = rmdir (req->dataptr); break; 1044 case REQ_RMDIR: req->result = rmdir (req->dataptr); break;
848 case REQ_RENAME: req->result = rename (req->data2ptr, req->dataptr); break; 1045 case REQ_RENAME: req->result = rename (req->data2ptr, req->dataptr); break;
849 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break; 1046 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break;
850 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break; 1047 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break;
1048 case REQ_MKNOD: req->result = mknod (req->data2ptr, req->mode, (dev_t)req->offset); break;
851 1049
852 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break; 1050 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break;
853 case REQ_FSYNC: req->result = fsync (req->fd); break; 1051 case REQ_FSYNC: req->result = fsync (req->fd); break;
854 case REQ_READDIR: req->result = scandir_ (req->dataptr, &req->data2ptr); break; 1052 case REQ_READDIR: scandir_ (req, self); break;
855 1053
856 case REQ_SLEEP: 1054 case REQ_BUSY:
857 { 1055 {
858 struct timeval tv; 1056 struct timeval tv;
859 1057
860 tv.tv_sec = req->fd; 1058 tv.tv_sec = req->fd;
861 tv.tv_usec = req->fd2; 1059 tv.tv_usec = req->fd2;
863 req->result = select (0, 0, 0, 0, &tv); 1061 req->result = select (0, 0, 0, 0, &tv);
864 } 1062 }
865 1063
866 case REQ_GROUP: 1064 case REQ_GROUP:
867 case REQ_NOP: 1065 case REQ_NOP:
1066 break;
1067
868 case REQ_QUIT: 1068 case REQ_QUIT:
1069 LOCK (wrklock);
1070 worker_free (self);
1071 --started;
1072 UNLOCK (wrklock);
869 break; 1073 return 0;
870 1074
871 default: 1075 default:
872 req->result = ENOSYS; 1076 req->result = ENOSYS;
873 break; 1077 break;
874 } 1078 }
875 1079
876 req->errorno = errno; 1080 req->errorno = errno;
877 1081
878 pthread_mutex_lock (&reslock); 1082 LOCK (reslock);
879 1083
880 req->next = 0; 1084 ++npending;
881 1085
882 if (rese) 1086 if (!reqq_push (&res_queue, req))
883 {
884 rese->next = req;
885 rese = req;
886 }
887 else
888 {
889 rese = ress = req;
890
891 /* write a dummy byte to the pipe so fh becomes ready */ 1087 /* write a dummy byte to the pipe so fh becomes ready */
892 write (respipe [1], &respipe, 1); 1088 write (respipe [1], &respipe, 1);
893 }
894 1089
895 pthread_mutex_unlock (&reslock); 1090 self->req = 0;
896 } 1091 worker_clear (self);
897 while (type != REQ_QUIT);
898 1092
899 return 0; 1093 UNLOCK (reslock);
1094 }
900} 1095}
901 1096
902/*****************************************************************************/ 1097/*****************************************************************************/
903 1098
904static void atfork_prepare (void) 1099static void atfork_prepare (void)
905{ 1100{
906 pthread_mutex_lock (&reqlock); 1101 LOCK (wrklock);
907 pthread_mutex_lock (&reslock); 1102 LOCK (reqlock);
1103 LOCK (reslock);
908#if !HAVE_PREADWRITE 1104#if !HAVE_PREADWRITE
909 pthread_mutex_lock (&preadwritelock); 1105 LOCK (preadwritelock);
910#endif 1106#endif
911#if !HAVE_READDIR_R 1107#if !HAVE_READDIR_R
912 pthread_mutex_lock (&readdirlock); 1108 LOCK (readdirlock);
913#endif 1109#endif
914} 1110}
915 1111
916static void atfork_parent (void) 1112static void atfork_parent (void)
917{ 1113{
918#if !HAVE_READDIR_R 1114#if !HAVE_READDIR_R
919 pthread_mutex_unlock (&readdirlock); 1115 UNLOCK (readdirlock);
920#endif 1116#endif
921#if !HAVE_PREADWRITE 1117#if !HAVE_PREADWRITE
922 pthread_mutex_unlock (&preadwritelock); 1118 UNLOCK (preadwritelock);
923#endif 1119#endif
924 pthread_mutex_unlock (&reslock); 1120 UNLOCK (reslock);
925 pthread_mutex_unlock (&reqlock); 1121 UNLOCK (reqlock);
1122 UNLOCK (wrklock);
926} 1123}
927 1124
928static void atfork_child (void) 1125static void atfork_child (void)
929{ 1126{
930 aio_req prv; 1127 aio_req prv;
931 1128
1129 while (prv = reqq_shift (&req_queue))
1130 req_free (prv);
1131
1132 while (prv = reqq_shift (&res_queue))
1133 req_free (prv);
1134
1135 while (wrk_first.next != &wrk_first)
1136 {
1137 worker *wrk = wrk_first.next;
1138
1139 if (wrk->req)
1140 req_free (wrk->req);
1141
1142 worker_clear (wrk);
1143 worker_free (wrk);
1144 }
1145
932 started = 0; 1146 started = 0;
933 1147 nreqs = 0;
934 while (reqs)
935 {
936 prv = reqs;
937 reqs = prv->next;
938 req_free (prv);
939 }
940
941 reqs = reqe = 0;
942
943 while (ress)
944 {
945 prv = ress;
946 ress = prv->next;
947 req_free (prv);
948 }
949
950 ress = rese = 0;
951 1148
952 close (respipe [0]); 1149 close (respipe [0]);
953 close (respipe [1]); 1150 close (respipe [1]);
954 create_pipe (); 1151 create_pipe ();
955 1152
985{ 1182{
986 HV *stash = gv_stashpv ("IO::AIO", 1); 1183 HV *stash = gv_stashpv ("IO::AIO", 1);
987 newCONSTSUB (stash, "EXDEV", newSViv (EXDEV)); 1184 newCONSTSUB (stash, "EXDEV", newSViv (EXDEV));
988 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY)); 1185 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY));
989 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY)); 1186 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY));
1187 newCONSTSUB (stash, "O_CREAT", newSViv (O_CREAT));
1188 newCONSTSUB (stash, "O_TRUNC", newSViv (O_TRUNC));
990 1189
991 create_pipe (); 1190 create_pipe ();
992 pthread_atfork (atfork_prepare, atfork_parent, atfork_child); 1191 pthread_atfork (atfork_prepare, atfork_parent, atfork_child);
993}
994 1192
1193 start_thread ();
1194}
1195
995void 1196void
996min_parallel (nthreads) 1197min_parallel (int nthreads)
997 int nthreads
998 PROTOTYPE: $ 1198 PROTOTYPE: $
999 1199
1000void 1200void
1001max_parallel (nthreads) 1201max_parallel (int nthreads)
1002 int nthreads
1003 PROTOTYPE: $ 1202 PROTOTYPE: $
1004 1203
1005int 1204int
1006max_outstanding (nreqs) 1205max_outstanding (int maxreqs)
1007 int nreqs 1206 PROTOTYPE: $
1008 PROTOTYPE: $
1009 CODE: 1207 CODE:
1010 RETVAL = max_outstanding; 1208 RETVAL = max_outstanding;
1011 max_outstanding = nreqs; 1209 max_outstanding = maxreqs;
1210 OUTPUT:
1211 RETVAL
1012 1212
1013void 1213void
1014aio_open (pathname,flags,mode,callback=&PL_sv_undef) 1214aio_open (pathname,flags,mode,callback=&PL_sv_undef)
1015 SV * pathname 1215 SV * pathname
1016 int flags 1216 int flags
1231 1431
1232 REQ_SEND; 1432 REQ_SEND;
1233} 1433}
1234 1434
1235void 1435void
1436aio_mknod (pathname,mode,dev,callback=&PL_sv_undef)
1437 SV * pathname
1438 SV * callback
1439 UV mode
1440 UV dev
1441 PPCODE:
1442{
1443 dREQ;
1444
1445 req->type = REQ_MKNOD;
1446 req->data = newSVsv (pathname);
1447 req->dataptr = SvPVbyte_nolen (req->data);
1448 req->mode = (mode_t)mode;
1449 req->offset = dev;
1450
1451 REQ_SEND;
1452}
1453
1454void
1236aio_sleep (delay,callback=&PL_sv_undef) 1455aio_busy (delay,callback=&PL_sv_undef)
1237 double delay 1456 double delay
1238 SV * callback 1457 SV * callback
1239 PPCODE: 1458 PPCODE:
1240{ 1459{
1241 dREQ; 1460 dREQ;
1242 1461
1243 req->type = REQ_SLEEP; 1462 req->type = REQ_BUSY;
1244 req->fd = delay < 0. ? 0 : delay; 1463 req->fd = delay < 0. ? 0 : delay;
1245 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd); 1464 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd);
1246 1465
1247 REQ_SEND; 1466 REQ_SEND;
1248} 1467}
1271 req->type = REQ_NOP; 1490 req->type = REQ_NOP;
1272 1491
1273 REQ_SEND; 1492 REQ_SEND;
1274} 1493}
1275 1494
1276#if 0 1495int
1277 1496aioreq_pri (int pri = 0)
1278void 1497 PROTOTYPE: ;$
1279aio_pri (int pri = DEFAULT_PRI)
1280 CODE: 1498 CODE:
1499 RETVAL = next_pri - PRI_BIAS;
1500 if (items > 0)
1501 {
1281 if (pri < PRI_MIN) pri = PRI_MIN; 1502 if (pri < PRI_MIN) pri = PRI_MIN;
1282 if (pri > PRI_MAX) pri = PRI_MAX; 1503 if (pri > PRI_MAX) pri = PRI_MAX;
1283 next_pri = pri + PRI_BIAS; 1504 next_pri = pri + PRI_BIAS;
1505 }
1506 OUTPUT:
1507 RETVAL
1284 1508
1285#endif 1509void
1510aioreq_nice (int nice = 0)
1511 CODE:
1512 nice = next_pri - nice;
1513 if (nice < PRI_MIN) nice = PRI_MIN;
1514 if (nice > PRI_MAX) nice = PRI_MAX;
1515 next_pri = nice + PRI_BIAS;
1286 1516
1287void 1517void
1288flush () 1518flush ()
1289 PROTOTYPE: 1519 PROTOTYPE:
1290 CODE: 1520 CODE:
1291 while (nreqs) 1521 while (nreqs)
1292 { 1522 {
1293 poll_wait (); 1523 poll_wait ();
1294 poll_cb (); 1524 poll_cb (0);
1295 } 1525 }
1296 1526
1297void 1527void
1298poll() 1528poll()
1299 PROTOTYPE: 1529 PROTOTYPE:
1300 CODE: 1530 CODE:
1301 if (nreqs) 1531 if (nreqs)
1302 { 1532 {
1303 poll_wait (); 1533 poll_wait ();
1304 poll_cb (); 1534 poll_cb (0);
1305 } 1535 }
1306 1536
1307int 1537int
1308poll_fileno() 1538poll_fileno()
1309 PROTOTYPE: 1539 PROTOTYPE:
1314 1544
1315int 1545int
1316poll_cb(...) 1546poll_cb(...)
1317 PROTOTYPE: 1547 PROTOTYPE:
1318 CODE: 1548 CODE:
1319 RETVAL = poll_cb (); 1549 RETVAL = poll_cb (0);
1550 OUTPUT:
1551 RETVAL
1552
1553int
1554poll_some(int max = 0)
1555 PROTOTYPE: $
1556 CODE:
1557 RETVAL = poll_cb (max);
1320 OUTPUT: 1558 OUTPUT:
1321 RETVAL 1559 RETVAL
1322 1560
1323void 1561void
1324poll_wait() 1562poll_wait()
1333 CODE: 1571 CODE:
1334 RETVAL = nreqs; 1572 RETVAL = nreqs;
1335 OUTPUT: 1573 OUTPUT:
1336 RETVAL 1574 RETVAL
1337 1575
1576int
1577nready()
1578 PROTOTYPE:
1579 CODE:
1580 RETVAL = get_nready ();
1581 OUTPUT:
1582 RETVAL
1583
1584int
1585npending()
1586 PROTOTYPE:
1587 CODE:
1588 RETVAL = get_npending ();
1589 OUTPUT:
1590 RETVAL
1591
1338PROTOTYPES: DISABLE 1592PROTOTYPES: DISABLE
1339 1593
1340MODULE = IO::AIO PACKAGE = IO::AIO::REQ 1594MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1341 1595
1342void 1596void
1343cancel (aio_req_ornot req) 1597cancel (aio_req_ornot req)
1344 PROTOTYPE:
1345 CODE: 1598 CODE:
1346 req_cancel (req); 1599 req_cancel (req);
1347 1600
1348void 1601void
1349cb (aio_req_ornot req, SV *callback=&PL_sv_undef) 1602cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1385 } 1638 }
1386 } 1639 }
1387} 1640}
1388 1641
1389void 1642void
1643cancel_subs (aio_req_ornot req)
1644 CODE:
1645 req_cancel_subs (req);
1646
1647void
1390result (aio_req grp, ...) 1648result (aio_req grp, ...)
1391 CODE: 1649 CODE:
1392{ 1650{
1393 int i; 1651 int i;
1652 AV *av;
1653
1654 grp->errorno = errno;
1655
1394 AV *av = newAV (); 1656 av = newAV ();
1395 1657
1396 for (i = 1; i < items; ++i ) 1658 for (i = 1; i < items; ++i )
1397 av_push (av, newSVsv (ST (i))); 1659 av_push (av, newSVsv (ST (i)));
1398 1660
1399 SvREFCNT_dec (grp->data); 1661 SvREFCNT_dec (grp->data);
1400 grp->data = (SV *)av; 1662 grp->data = (SV *)av;
1401} 1663}
1402 1664
1403void 1665void
1666errno (aio_req grp, int errorno = errno)
1667 CODE:
1668 grp->errorno = errorno;
1669
1670void
1404feed_limit (aio_req grp, int limit) 1671limit (aio_req grp, int limit)
1405 CODE: 1672 CODE:
1406 grp->fd2 = limit; 1673 grp->fd2 = limit;
1407 aio_grp_feed (grp); 1674 aio_grp_feed (grp);
1408 1675
1409void 1676void

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines