ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
(Generate patch)

Comparing IO-AIO/AIO.xs (file contents):
Revision 1.64 by root, Mon Oct 23 23:54:41 2006 UTC vs.
Revision 1.84 by root, Sat Oct 28 01:24:19 2006 UTC

1#if __linux 1/* solaris */
2#define _POSIX_PTHREAD_SEMANTICS 1
3
4#if __linux && !defined(_GNU_SOURCE)
2# define _GNU_SOURCE 5# define _GNU_SOURCE
3#endif 6#endif
4 7
8/* just in case */
5#define _REENTRANT 1 9#define _REENTRANT 1
6 10
7#include <errno.h> 11#include <errno.h>
8 12
9#include "EXTERN.h" 13#include "EXTERN.h"
39# else 43# else
40# error sendfile support requested but not available 44# error sendfile support requested but not available
41# endif 45# endif
42#endif 46#endif
43 47
48/* number of seconds after which idle threads exit */
49#define IDLE_TIMEOUT 10
50
44/* used for struct dirent, AIX doesn't provide it */ 51/* used for struct dirent, AIX doesn't provide it */
45#ifndef NAME_MAX 52#ifndef NAME_MAX
46# define NAME_MAX 4096 53# define NAME_MAX 4096
47#endif 54#endif
48 55
56#ifndef PTHREAD_STACK_MIN
57/* care for broken platforms, e.g. windows */
58# define PTHREAD_STACK_MIN 16384
59#endif
60
49#if __ia64 61#if __ia64
50# define STACKSIZE 65536 62# define STACKSIZE 65536
63#elif __i386 || __x86_64 /* 16k is unreasonably high :( */
64# define STACKSIZE PTHREAD_STACK_MIN
51#else 65#else
52# define STACKSIZE 8192 66# define STACKSIZE 16384
67#endif
68
69/* wether word reads are potentially non-atomic.
70 * this is conservatice, likely most arches this runs
71 * on have atomic word read/writes.
72 */
73#ifndef WORDREAD_UNSAFE
74# if __i386 || __x86_64
75# define WORDREAD_UNSAFE 0
76# else
77# define WORDREAD_UNSAFE 1
53#endif 78# endif
79#endif
80
81/* buffer size for various temporary buffers */
82#define AIO_BUFSIZE 65536
83
84#define dBUF \
85 char *aio_buf; \
86 LOCK (wrklock); \
87 self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \
88 UNLOCK (wrklock); \
89 if (!aio_buf) \
90 return -1;
54 91
55enum { 92enum {
56 REQ_QUIT, 93 REQ_QUIT,
57 REQ_OPEN, REQ_CLOSE, 94 REQ_OPEN, REQ_CLOSE,
58 REQ_READ, REQ_WRITE, REQ_READAHEAD, 95 REQ_READ, REQ_WRITE, REQ_READAHEAD,
59 REQ_SENDFILE, 96 REQ_SENDFILE,
60 REQ_STAT, REQ_LSTAT, REQ_FSTAT, 97 REQ_STAT, REQ_LSTAT, REQ_FSTAT,
61 REQ_FSYNC, REQ_FDATASYNC, 98 REQ_FSYNC, REQ_FDATASYNC,
62 REQ_UNLINK, REQ_RMDIR, REQ_RENAME, 99 REQ_UNLINK, REQ_RMDIR, REQ_RENAME,
63 REQ_READDIR, 100 REQ_MKNOD, REQ_READDIR,
64 REQ_LINK, REQ_SYMLINK, 101 REQ_LINK, REQ_SYMLINK,
65 REQ_GROUP, REQ_NOP, 102 REQ_GROUP, REQ_NOP,
66 REQ_SLEEP, 103 REQ_BUSY,
67}; 104};
68 105
69#define AIO_REQ_KLASS "IO::AIO::REQ" 106#define AIO_REQ_KLASS "IO::AIO::REQ"
70#define AIO_GRP_KLASS "IO::AIO::GRP" 107#define AIO_GRP_KLASS "IO::AIO::GRP"
71 108
105 PRI_MIN = -4, 142 PRI_MIN = -4,
106 PRI_MAX = 4, 143 PRI_MAX = 4,
107 144
108 DEFAULT_PRI = 0, 145 DEFAULT_PRI = 0,
109 PRI_BIAS = -PRI_MIN, 146 PRI_BIAS = -PRI_MIN,
147 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
110}; 148};
111 149
112static int next_pri = DEFAULT_PRI + PRI_BIAS; 150static int next_pri = DEFAULT_PRI + PRI_BIAS;
113 151
114static int started, wanted; 152static unsigned int started, idle, wanted;
115static volatile int nreqs;
116static int max_outstanding = 1<<30;
117static int respipe [2];
118 153
119#if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP) 154#if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)
120# define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP 155# define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
121#else 156#else
122# define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER 157# define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
123#endif 158#endif
124 159
160#define LOCK(mutex) pthread_mutex_lock (&(mutex))
161#define UNLOCK(mutex) pthread_mutex_unlock (&(mutex))
162
163/* worker threads management */
164static pthread_mutex_t wrklock = AIO_MUTEX_INIT;
165
166typedef struct worker {
167 /* locked by wrklock */
168 struct worker *prev, *next;
169
170 pthread_t tid;
171
172 /* locked by reslock, reqlock or wrklock */
173 aio_req req; /* currently processed request */
174 void *dbuf;
175 DIR *dirp;
176} worker;
177
178static worker wrk_first = { &wrk_first, &wrk_first, 0 };
179
180static void worker_clear (worker *wrk)
181{
182 if (wrk->dirp)
183 {
184 closedir (wrk->dirp);
185 wrk->dirp = 0;
186 }
187
188 if (wrk->dbuf)
189 {
190 free (wrk->dbuf);
191 wrk->dbuf = 0;
192 }
193}
194
195static void worker_free (worker *wrk)
196{
197 wrk->next->prev = wrk->prev;
198 wrk->prev->next = wrk->next;
199
200 free (wrk);
201}
202
203static volatile unsigned int nreqs, nready, npending;
204static volatile unsigned int max_idle = 4;
205static volatile unsigned int max_outstanding = 0xffffffff;
206static int respipe [2];
207
125static pthread_mutex_t reslock = AIO_MUTEX_INIT; 208static pthread_mutex_t reslock = AIO_MUTEX_INIT;
126static pthread_mutex_t reqlock = AIO_MUTEX_INIT; 209static pthread_mutex_t reqlock = AIO_MUTEX_INIT;
127static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER; 210static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER;
128 211
129static volatile aio_req reqs, reqe; /* queue start, queue end */ 212#if WORDREAD_UNSAFE
130static volatile aio_req ress, rese; /* queue start, queue end */
131 213
214static unsigned int get_nready ()
215{
216 unsigned int retval;
217
218 LOCK (reqlock);
219 retval = nready;
220 UNLOCK (reqlock);
221
222 return retval;
223}
224
225static unsigned int get_npending ()
226{
227 unsigned int retval;
228
229 LOCK (reslock);
230 retval = npending;
231 UNLOCK (reslock);
232
233 return retval;
234}
235
236#else
237
238# define get_nready() nready
239# define get_npending() npending
240
241#endif
242
243/*
244 * a somewhat faster data structure might be nice, but
245 * with 8 priorities this actually needs <20 insns
246 * per shift, the most expensive operation.
247 */
248typedef struct {
249 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
250 int size;
251} reqq;
252
253static reqq req_queue;
254static reqq res_queue;
255
256int reqq_push (reqq *q, aio_req req)
257{
258 int pri = req->pri;
259 req->next = 0;
260
261 if (q->qe[pri])
262 {
263 q->qe[pri]->next = req;
264 q->qe[pri] = req;
265 }
266 else
267 q->qe[pri] = q->qs[pri] = req;
268
269 return q->size++;
270}
271
272aio_req reqq_shift (reqq *q)
273{
274 int pri;
275
276 if (!q->size)
277 return 0;
278
279 --q->size;
280
281 for (pri = NUM_PRI; pri--; )
282 {
283 aio_req req = q->qs[pri];
284
285 if (req)
286 {
287 if (!(q->qs[pri] = req->next))
288 q->qe[pri] = 0;
289
290 return req;
291 }
292 }
293
294 abort ();
295}
296
297static int poll_cb (int max);
132static void req_invoke (aio_req req); 298static void req_invoke (aio_req req);
133static void req_free (aio_req req); 299static void req_free (aio_req req);
300static void req_cancel (aio_req req);
134 301
135/* must be called at most once */ 302/* must be called at most once */
136static SV *req_sv (aio_req req, const char *klass) 303static SV *req_sv (aio_req req, const char *klass)
137{ 304{
138 if (!req->self) 305 if (!req->self)
169 ENTER; 336 ENTER;
170 SAVETMPS; 337 SAVETMPS;
171 PUSHMARK (SP); 338 PUSHMARK (SP);
172 XPUSHs (req_sv (grp, AIO_GRP_KLASS)); 339 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
173 PUTBACK; 340 PUTBACK;
174 call_sv (grp->fh2, G_VOID | G_EVAL); 341 call_sv (grp->fh2, G_VOID | G_EVAL | G_KEEPERR);
175 SPAGAIN; 342 SPAGAIN;
176 FREETMPS; 343 FREETMPS;
177 LEAVE; 344 LEAVE;
178 } 345 }
179 346
200 req_invoke (grp); 367 req_invoke (grp);
201 req_free (grp); 368 req_free (grp);
202 } 369 }
203} 370}
204 371
205static void poll_wait ()
206{
207 fd_set rfd;
208
209 while (nreqs)
210 {
211 aio_req req;
212#if !(__x86 || __x86_64) /* safe without sempahore on this archs */
213 pthread_mutex_lock (&reslock);
214#endif
215 req = ress;
216#if !(__x86 || __x86_64) /* safe without sempahore on this archs */
217 pthread_mutex_unlock (&reslock);
218#endif
219
220 if (req)
221 return;
222
223 FD_ZERO(&rfd);
224 FD_SET(respipe [0], &rfd);
225
226 select (respipe [0] + 1, &rfd, 0, 0, 0);
227 }
228}
229
230static void req_invoke (aio_req req) 372static void req_invoke (aio_req req)
231{ 373{
232 dSP; 374 dSP;
233 int errorno = errno;
234 375
235 if (req->flags & FLAG_CANCELLED || !SvOK (req->callback)) 376 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
236 return; 377 {
237
238 errno = req->errorno;
239
240 ENTER; 378 ENTER;
241 SAVETMPS; 379 SAVETMPS;
242 PUSHMARK (SP); 380 PUSHMARK (SP);
243 EXTEND (SP, 1); 381 EXTEND (SP, 1);
244 382
245 switch (req->type) 383 switch (req->type)
246 {
247 case REQ_READDIR:
248 { 384 {
249 SV *rv = &PL_sv_undef; 385 case REQ_READDIR:
250
251 if (req->result >= 0)
252 { 386 {
253 char *buf = req->data2ptr; 387 SV *rv = &PL_sv_undef;
254 AV *av = newAV ();
255 388
256 while (req->result) 389 if (req->result >= 0)
257 { 390 {
391 int i;
392 char *buf = req->data2ptr;
393 AV *av = newAV ();
394
395 av_extend (av, req->result - 1);
396
397 for (i = 0; i < req->result; ++i)
398 {
258 SV *sv = newSVpv (buf, 0); 399 SV *sv = newSVpv (buf, 0);
259 400
260 av_push (av, sv); 401 av_store (av, i, sv);
261 buf += SvCUR (sv) + 1; 402 buf += SvCUR (sv) + 1;
262 req->result--; 403 }
404
405 rv = sv_2mortal (newRV_noinc ((SV *)av));
263 } 406 }
264 407
265 rv = sv_2mortal (newRV_noinc ((SV *)av)); 408 PUSHs (rv);
266 } 409 }
410 break;
267 411
268 PUSHs (rv); 412 case REQ_OPEN:
413 {
414 /* convert fd to fh */
415 SV *fh;
416
417 PUSHs (sv_2mortal (newSViv (req->result)));
418 PUTBACK;
419 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
420 SPAGAIN;
421
422 fh = SvREFCNT_inc (POPs);
423
424 PUSHMARK (SP);
425 XPUSHs (sv_2mortal (fh));
426 }
427 break;
428
429 case REQ_GROUP:
430 req->fd = 2; /* mark group as finished */
431
432 if (req->data)
433 {
434 int i;
435 AV *av = (AV *)req->data;
436
437 EXTEND (SP, AvFILL (av) + 1);
438 for (i = 0; i <= AvFILL (av); ++i)
439 PUSHs (*av_fetch (av, i, 0));
440 }
441 break;
442
443 case REQ_NOP:
444 case REQ_BUSY:
445 break;
446
447 default:
448 PUSHs (sv_2mortal (newSViv (req->result)));
449 break;
269 } 450 }
270 break;
271 451
272 case REQ_OPEN: 452 errno = req->errorno;
273 {
274 /* convert fd to fh */
275 SV *fh;
276 453
277 PUSHs (sv_2mortal (newSViv (req->result)));
278 PUTBACK; 454 PUTBACK;
279 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
280 SPAGAIN;
281
282 fh = SvREFCNT_inc (POPs);
283
284 PUSHMARK (SP);
285 XPUSHs (sv_2mortal (fh));
286 }
287 break;
288
289 case REQ_GROUP:
290 req->fd = 2; /* mark group as finished */
291
292 if (req->data)
293 {
294 int i;
295 AV *av = (AV *)req->data;
296
297 EXTEND (SP, AvFILL (av) + 1);
298 for (i = 0; i <= AvFILL (av); ++i)
299 PUSHs (*av_fetch (av, i, 0));
300 }
301 break;
302
303 case REQ_NOP:
304 case REQ_SLEEP:
305 break;
306
307 default:
308 PUSHs (sv_2mortal (newSViv (req->result)));
309 break;
310 }
311
312
313 PUTBACK;
314 call_sv (req->callback, G_VOID | G_EVAL); 455 call_sv (req->callback, G_VOID | G_EVAL);
315 SPAGAIN; 456 SPAGAIN;
316 457
317 FREETMPS; 458 FREETMPS;
318 LEAVE; 459 LEAVE;
319
320 errno = errorno;
321
322 if (SvTRUE (ERRSV))
323 { 460 }
324 req_free (req);
325 croak (0);
326 }
327}
328 461
329static void req_free (aio_req req)
330{
331 if (req->grp) 462 if (req->grp)
332 { 463 {
333 aio_req grp = req->grp; 464 aio_req grp = req->grp;
334 465
335 /* unlink request */ 466 /* unlink request */
340 grp->grp_first = req->grp_next; 471 grp->grp_first = req->grp_next;
341 472
342 aio_grp_dec (grp); 473 aio_grp_dec (grp);
343 } 474 }
344 475
476 if (SvTRUE (ERRSV))
477 {
478 req_free (req);
479 croak (0);
480 }
481}
482
483static void req_free (aio_req req)
484{
345 if (req->self) 485 if (req->self)
346 { 486 {
347 sv_unmagic (req->self, PERL_MAGIC_ext); 487 sv_unmagic (req->self, PERL_MAGIC_ext);
348 SvREFCNT_dec (req->self); 488 SvREFCNT_dec (req->self);
349 } 489 }
352 SvREFCNT_dec (req->fh); 492 SvREFCNT_dec (req->fh);
353 SvREFCNT_dec (req->fh2); 493 SvREFCNT_dec (req->fh2);
354 SvREFCNT_dec (req->callback); 494 SvREFCNT_dec (req->callback);
355 Safefree (req->statdata); 495 Safefree (req->statdata);
356 496
357 if (req->type == REQ_READDIR && req->result >= 0) 497 if (req->type == REQ_READDIR)
358 free (req->data2ptr); 498 free (req->data2ptr);
359 499
360 Safefree (req); 500 Safefree (req);
361} 501}
362 502
503static void req_cancel_subs (aio_req grp)
504{
505 aio_req sub;
506
507 if (grp->type != REQ_GROUP)
508 return;
509
510 SvREFCNT_dec (grp->fh2);
511 grp->fh2 = 0;
512
513 for (sub = grp->grp_first; sub; sub = sub->grp_next)
514 req_cancel (sub);
515}
516
363static void req_cancel (aio_req req) 517static void req_cancel (aio_req req)
364{ 518{
365 req->flags |= FLAG_CANCELLED; 519 req->flags |= FLAG_CANCELLED;
366 520
367 if (req->type == REQ_GROUP) 521 req_cancel_subs (req);
368 { 522}
369 aio_req sub;
370 523
371 for (sub = req->grp_first; sub; sub = sub->grp_next) 524static void *aio_proc(void *arg);
372 req_cancel (sub); 525
526static void start_thread (void)
527{
528 sigset_t fullsigset, oldsigset;
529 pthread_attr_t attr;
530
531 worker *wrk = calloc (1, sizeof (worker));
532
533 if (!wrk)
534 croak ("unable to allocate worker thread data");
535
536 pthread_attr_init (&attr);
537 pthread_attr_setstacksize (&attr, STACKSIZE);
538 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
539#ifdef PTHREAD_SCOPE_PROCESS
540 pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS);
541#endif
542
543 sigfillset (&fullsigset);
544
545 LOCK (wrklock);
546 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
547
548 if (pthread_create (&wrk->tid, &attr, aio_proc, (void *)wrk) == 0)
373 } 549 {
374} 550 wrk->prev = &wrk_first;
551 wrk->next = wrk_first.next;
552 wrk_first.next->prev = wrk;
553 wrk_first.next = wrk;
554 ++started;
555 }
556 else
557 free (wrk);
375 558
559 sigprocmask (SIG_SETMASK, &oldsigset, 0);
560 UNLOCK (wrklock);
561}
562
563static void maybe_start_thread ()
564{
565 if (started >= wanted)
566 return;
567
568 /* todo: maybe use idle here, but might be less exact */
569 if ((int)nready <= (int)started - (int)(nreqs - get_nready () - get_npending ()))
570 return;
571
572 start_thread ();
573}
574
575static void req_send (aio_req req)
576{
577 ++nreqs;
578
579 LOCK (reqlock);
580 ++nready;
581 reqq_push (&req_queue, req);
582 pthread_cond_signal (&reqwait);
583 UNLOCK (reqlock);
584
585 maybe_start_thread ();
586}
587
588static void end_thread (void)
589{
590 aio_req req;
591
592 Newz (0, req, 1, aio_cb);
593
594 req->type = REQ_QUIT;
595 req->pri = PRI_MAX + PRI_BIAS;
596
597 LOCK (reqlock);
598 reqq_push (&req_queue, req);
599 pthread_cond_signal (&reqwait);
600 UNLOCK (reqlock);
601
602 LOCK (wrklock);
603 --started;
604 UNLOCK (wrklock);
605}
606
607static void min_parallel (int nthreads)
608{
609 if (wanted < nthreads)
610 wanted = nthreads;
611}
612
613static void max_parallel (int nthreads)
614{
615 if (wanted > nthreads)
616 wanted = nthreads;
617
618 while (started > wanted)
619 end_thread ();
620}
621
622static void poll_wait ()
623{
624 fd_set rfd;
625
626 while (nreqs)
627 {
628 int size;
629 if (WORDREAD_UNSAFE) LOCK (reslock);
630 size = res_queue.size;
631 if (WORDREAD_UNSAFE) UNLOCK (reslock);
632
633 if (size)
634 return;
635
636 maybe_start_thread ();
637
638 FD_ZERO(&rfd);
639 FD_SET(respipe [0], &rfd);
640
641 select (respipe [0] + 1, &rfd, 0, 0, 0);
642 }
643}
644
376static int poll_cb () 645static int poll_cb (int max)
377{ 646{
378 dSP; 647 dSP;
379 int count = 0; 648 int count = 0;
380 int do_croak = 0; 649 int do_croak = 0;
381 aio_req req; 650 aio_req req;
382 651
383 for (;;) 652 for (;;)
384 { 653 {
385 pthread_mutex_lock (&reslock); 654 while (max <= 0 || count < max)
386 req = ress;
387
388 if (req)
389 { 655 {
390 ress = req->next; 656 maybe_start_thread ();
391 657
658 LOCK (reslock);
659 req = reqq_shift (&res_queue);
660
392 if (!ress) 661 if (req)
393 { 662 {
663 --npending;
664
665 if (!res_queue.size)
666 {
394 /* read any signals sent by the worker threads */ 667 /* read any signals sent by the worker threads */
395 char buf [32]; 668 char buf [32];
396 while (read (respipe [0], buf, 32) == 32) 669 while (read (respipe [0], buf, 32) == 32)
670 ;
397 ; 671 }
398
399 rese = 0;
400 } 672 }
673
674 UNLOCK (reslock);
675
676 if (!req)
677 break;
678
679 --nreqs;
680
681 if (req->type == REQ_GROUP && req->length)
682 {
683 req->fd = 1; /* mark request as delayed */
684 continue;
685 }
686 else
687 {
688 if (req->type == REQ_READ)
689 SvCUR_set (req->data, req->dataoffset + (req->result > 0 ? req->result : 0));
690
691 if (req->data2ptr && (req->type == REQ_READ || req->type == REQ_WRITE))
692 SvREADONLY_off (req->data);
693
694 if (req->statdata)
695 {
696 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
697 PL_laststatval = req->result;
698 PL_statcache = *(req->statdata);
699 }
700
701 req_invoke (req);
702
703 count++;
704 }
705
706 req_free (req);
401 } 707 }
402 708
403 pthread_mutex_unlock (&reslock); 709 if (nreqs <= max_outstanding)
404
405 if (!req)
406 break; 710 break;
407 711
408 --nreqs; 712 poll_wait ();
409 713
410 if (req->type == REQ_QUIT) 714 max = 0;
411 started--;
412 else if (req->type == REQ_GROUP && req->length)
413 {
414 req->fd = 1; /* mark request as delayed */
415 continue;
416 }
417 else
418 {
419 if (req->type == REQ_READ)
420 SvCUR_set (req->data, req->dataoffset + (req->result > 0 ? req->result : 0));
421
422 if (req->data2ptr && (req->type == REQ_READ || req->type == REQ_WRITE))
423 SvREADONLY_off (req->data);
424
425 if (req->statdata)
426 {
427 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
428 PL_laststatval = req->result;
429 PL_statcache = *(req->statdata);
430 }
431
432 req_invoke (req);
433
434 count++;
435 }
436
437 req_free (req);
438 } 715 }
439 716
440 return count; 717 return count;
441}
442
443static void *aio_proc(void *arg);
444
445static void start_thread (void)
446{
447 sigset_t fullsigset, oldsigset;
448 pthread_t tid;
449 pthread_attr_t attr;
450
451 pthread_attr_init (&attr);
452 pthread_attr_setstacksize (&attr, STACKSIZE);
453 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
454
455 sigfillset (&fullsigset);
456 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
457
458 if (pthread_create (&tid, &attr, aio_proc, 0) == 0)
459 started++;
460
461 sigprocmask (SIG_SETMASK, &oldsigset, 0);
462}
463
464static void req_send (aio_req req)
465{
466 while (started < wanted && nreqs >= started)
467 start_thread ();
468
469 ++nreqs;
470
471 pthread_mutex_lock (&reqlock);
472
473 req->next = 0;
474
475 if (reqe)
476 {
477 reqe->next = req;
478 reqe = req;
479 }
480 else
481 reqe = reqs = req;
482
483 pthread_cond_signal (&reqwait);
484 pthread_mutex_unlock (&reqlock);
485
486 if (nreqs > max_outstanding)
487 for (;;)
488 {
489 poll_cb ();
490
491 if (nreqs <= max_outstanding)
492 break;
493
494 poll_wait ();
495 }
496}
497
498static void end_thread (void)
499{
500 aio_req req;
501 Newz (0, req, 1, aio_cb);
502 req->type = REQ_QUIT;
503
504 req_send (req);
505}
506
507static void min_parallel (int nthreads)
508{
509 if (wanted < nthreads)
510 wanted = nthreads;
511}
512
513static void max_parallel (int nthreads)
514{
515 int cur = started;
516
517 if (wanted > nthreads)
518 wanted = nthreads;
519
520 while (cur > wanted)
521 {
522 end_thread ();
523 cur--;
524 }
525
526 while (started > wanted)
527 {
528 poll_wait ();
529 poll_cb ();
530 }
531} 718}
532 719
533static void create_pipe () 720static void create_pipe ()
534{ 721{
535 if (pipe (respipe)) 722 if (pipe (respipe))
559static ssize_t pread (int fd, void *buf, size_t count, off_t offset) 746static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
560{ 747{
561 ssize_t res; 748 ssize_t res;
562 off_t ooffset; 749 off_t ooffset;
563 750
564 pthread_mutex_lock (&preadwritelock); 751 LOCK (preadwritelock);
565 ooffset = lseek (fd, 0, SEEK_CUR); 752 ooffset = lseek (fd, 0, SEEK_CUR);
566 lseek (fd, offset, SEEK_SET); 753 lseek (fd, offset, SEEK_SET);
567 res = read (fd, buf, count); 754 res = read (fd, buf, count);
568 lseek (fd, ooffset, SEEK_SET); 755 lseek (fd, ooffset, SEEK_SET);
569 pthread_mutex_unlock (&preadwritelock); 756 UNLOCK (preadwritelock);
570 757
571 return res; 758 return res;
572} 759}
573 760
574static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset) 761static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
575{ 762{
576 ssize_t res; 763 ssize_t res;
577 off_t ooffset; 764 off_t ooffset;
578 765
579 pthread_mutex_lock (&preadwritelock); 766 LOCK (preadwritelock);
580 ooffset = lseek (fd, 0, SEEK_CUR); 767 ooffset = lseek (fd, 0, SEEK_CUR);
581 lseek (fd, offset, SEEK_SET); 768 lseek (fd, offset, SEEK_SET);
582 res = write (fd, buf, count); 769 res = write (fd, buf, count);
583 lseek (fd, offset, SEEK_SET); 770 lseek (fd, offset, SEEK_SET);
584 pthread_mutex_unlock (&preadwritelock); 771 UNLOCK (preadwritelock);
585 772
586 return res; 773 return res;
587} 774}
588#endif 775#endif
589 776
590#if !HAVE_FDATASYNC 777#if !HAVE_FDATASYNC
591# define fdatasync fsync 778# define fdatasync fsync
592#endif 779#endif
593 780
594#if !HAVE_READAHEAD 781#if !HAVE_READAHEAD
595# define readahead aio_readahead 782# define readahead(fd,offset,count) aio_readahead (fd, offset, count, self)
596 783
597static ssize_t readahead (int fd, off_t offset, size_t count) 784static ssize_t aio_readahead (int fd, off_t offset, size_t count, worker *self)
598{ 785{
599 char readahead_buf[4096]; 786 dBUF;
600 787
601 while (count > 0) 788 while (count > 0)
602 { 789 {
603 size_t len = count < sizeof (readahead_buf) ? count : sizeof (readahead_buf); 790 size_t len = count < AIO_BUFSIZE ? count : AIO_BUFSIZE;
604 791
605 pread (fd, readahead_buf, len, offset); 792 pread (fd, aio_buf, len, offset);
606 offset += len; 793 offset += len;
607 count -= len; 794 count -= len;
608 } 795 }
609 796
610 errno = 0; 797 errno = 0;
611} 798}
799
612#endif 800#endif
613 801
614#if !HAVE_READDIR_R 802#if !HAVE_READDIR_R
615# define readdir_r aio_readdir_r 803# define readdir_r aio_readdir_r
616 804
619static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res) 807static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res)
620{ 808{
621 struct dirent *e; 809 struct dirent *e;
622 int errorno; 810 int errorno;
623 811
624 pthread_mutex_lock (&readdirlock); 812 LOCK (readdirlock);
625 813
626 e = readdir (dirp); 814 e = readdir (dirp);
627 errorno = errno; 815 errorno = errno;
628 816
629 if (e) 817 if (e)
632 strcpy (ent->d_name, e->d_name); 820 strcpy (ent->d_name, e->d_name);
633 } 821 }
634 else 822 else
635 *res = 0; 823 *res = 0;
636 824
637 pthread_mutex_unlock (&readdirlock); 825 UNLOCK (readdirlock);
638 826
639 errno = errorno; 827 errno = errorno;
640 return e ? 0 : -1; 828 return e ? 0 : -1;
641} 829}
642#endif 830#endif
643 831
644/* sendfile always needs emulation */ 832/* sendfile always needs emulation */
645static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count) 833static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self)
646{ 834{
647 ssize_t res; 835 ssize_t res;
648 836
649 if (!count) 837 if (!count)
650 return 0; 838 return 0;
661 { 849 {
662 off_t sbytes; 850 off_t sbytes;
663 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0); 851 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0);
664 852
665 if (res < 0 && sbytes) 853 if (res < 0 && sbytes)
666 /* maybe only on EAGAIN only: as usual, the manpage leaves you guessing */ 854 /* maybe only on EAGAIN: as usual, the manpage leaves you guessing */
667 res = sbytes; 855 res = sbytes;
668 } 856 }
669 857
670# elif __hpux 858# elif __hpux
671 res = sendfile (ofd, ifd, offset, count, 0, 0); 859 res = sendfile (ofd, ifd, offset, count, 0, 0);
699#endif 887#endif
700 ) 888 )
701 ) 889 )
702 { 890 {
703 /* emulate sendfile. this is a major pain in the ass */ 891 /* emulate sendfile. this is a major pain in the ass */
704 char buf[4096]; 892 dBUF;
893
705 res = 0; 894 res = 0;
706 895
707 while (count) 896 while (count)
708 { 897 {
709 ssize_t cnt; 898 ssize_t cnt;
710 899
711 cnt = pread (ifd, buf, count > 4096 ? 4096 : count, offset); 900 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
712 901
713 if (cnt <= 0) 902 if (cnt <= 0)
714 { 903 {
715 if (cnt && !res) res = -1; 904 if (cnt && !res) res = -1;
716 break; 905 break;
717 } 906 }
718 907
719 cnt = write (ofd, buf, cnt); 908 cnt = write (ofd, aio_buf, cnt);
720 909
721 if (cnt <= 0) 910 if (cnt <= 0)
722 { 911 {
723 if (cnt && !res) res = -1; 912 if (cnt && !res) res = -1;
724 break; 913 break;
732 921
733 return res; 922 return res;
734} 923}
735 924
736/* read a full directory */ 925/* read a full directory */
737static int scandir_ (const char *path, void **namesp) 926static void scandir_ (aio_req req, worker *self)
738{ 927{
739 DIR *dirp = opendir (path); 928 DIR *dirp;
740 union 929 union
741 { 930 {
742 struct dirent d; 931 struct dirent d;
743 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1]; 932 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1];
744 } u; 933 } *u;
745 struct dirent *entp; 934 struct dirent *entp;
746 char *name, *names; 935 char *name, *names;
747 int memlen = 4096; 936 int memlen = 4096;
748 int memofs = 0; 937 int memofs = 0;
749 int res = 0; 938 int res = 0;
750 int errorno; 939 int errorno;
751 940
752 if (!dirp) 941 LOCK (wrklock);
753 return -1; 942 self->dirp = dirp = opendir (req->dataptr);
754 943 self->dbuf = u = malloc (sizeof (*u));
755 names = malloc (memlen); 944 req->data2ptr = names = malloc (memlen);
945 UNLOCK (wrklock);
946
947 if (dirp && u && names)
948 for (;;)
949 {
950 errno = 0;
951 readdir_r (dirp, &u->d, &entp);
952
953 if (!entp)
954 break;
955
956 name = entp->d_name;
957
958 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
959 {
960 int len = strlen (name) + 1;
961
962 res++;
963
964 while (memofs + len > memlen)
965 {
966 memlen *= 2;
967 LOCK (wrklock);
968 req->data2ptr = names = realloc (names, memlen);
969 UNLOCK (wrklock);
970
971 if (!names)
972 break;
973 }
974
975 memcpy (names + memofs, name, len);
976 memofs += len;
977 }
978 }
979
980 if (errno)
981 res = -1;
982
983 req->result = res;
984}
985
986/*****************************************************************************/
987
988static void *aio_proc (void *thr_arg)
989{
990 aio_req req;
991 struct timespec ts;
992 worker *self = (worker *)thr_arg;
993
994 /* try to distribute timeouts somewhat evenly */
995 ts.tv_nsec = (((unsigned long)self + (unsigned long)ts.tv_sec) & 1023UL)
996 * (1000000000UL / 1024UL);
756 997
757 for (;;) 998 for (;;)
758 { 999 {
759 errno = 0, readdir_r (dirp, &u.d, &entp); 1000 ts.tv_sec = time (0) + IDLE_TIMEOUT;
760 1001
761 if (!entp) 1002 LOCK (reqlock);
762 break;
763
764 name = entp->d_name;
765
766 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
767 {
768 int len = strlen (name) + 1;
769
770 res++;
771
772 while (memofs + len > memlen)
773 {
774 memlen *= 2;
775 names = realloc (names, memlen);
776 if (!names)
777 break;
778 }
779
780 memcpy (names + memofs, name, len);
781 memofs += len;
782 }
783 }
784
785 errorno = errno;
786 closedir (dirp);
787
788 if (errorno)
789 {
790 free (names);
791 errno = errorno;
792 res = -1;
793 }
794
795 *namesp = (void *)names;
796 return res;
797}
798
799/*****************************************************************************/
800
801static void *aio_proc (void *thr_arg)
802{
803 aio_req req;
804 int type;
805
806 do
807 {
808 pthread_mutex_lock (&reqlock);
809 1003
810 for (;;) 1004 for (;;)
811 { 1005 {
812 req = reqs; 1006 self->req = req = reqq_shift (&req_queue);
813
814 if (reqs)
815 {
816 reqs = reqs->next;
817 if (!reqs) reqe = 0;
818 }
819 1007
820 if (req) 1008 if (req)
821 break; 1009 break;
822 1010
1011 ++idle;
1012
1013 if (pthread_cond_timedwait (&reqwait, &reqlock, &ts)
1014 == ETIMEDOUT)
1015 {
1016 if (idle > max_idle)
1017 {
1018 --idle;
1019 UNLOCK (reqlock);
1020 LOCK (wrklock);
1021 --started;
1022 UNLOCK (wrklock);
1023 goto quit;
1024 }
1025
1026 /* we are allowed to idle, so do so without any timeout */
823 pthread_cond_wait (&reqwait, &reqlock); 1027 pthread_cond_wait (&reqwait, &reqlock);
1028 ts.tv_sec = time (0) + IDLE_TIMEOUT;
1029 }
1030
1031 --idle;
824 } 1032 }
825 1033
826 pthread_mutex_unlock (&reqlock); 1034 --nready;
1035
1036 UNLOCK (reqlock);
827 1037
828 errno = 0; /* strictly unnecessary */ 1038 errno = 0; /* strictly unnecessary */
829 type = req->type; /* remember type for QUIT check */
830 1039
831 if (!(req->flags & FLAG_CANCELLED)) 1040 if (!(req->flags & FLAG_CANCELLED))
832 switch (type) 1041 switch (req->type)
833 { 1042 {
834 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break; 1043 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break;
835 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break; 1044 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break;
836 1045
837 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break; 1046 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break;
838 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length); break; 1047 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length, self); break;
839 1048
840 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break; 1049 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break;
841 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break; 1050 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break;
842 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break; 1051 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break;
843 1052
846 case REQ_UNLINK: req->result = unlink (req->dataptr); break; 1055 case REQ_UNLINK: req->result = unlink (req->dataptr); break;
847 case REQ_RMDIR: req->result = rmdir (req->dataptr); break; 1056 case REQ_RMDIR: req->result = rmdir (req->dataptr); break;
848 case REQ_RENAME: req->result = rename (req->data2ptr, req->dataptr); break; 1057 case REQ_RENAME: req->result = rename (req->data2ptr, req->dataptr); break;
849 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break; 1058 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break;
850 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break; 1059 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break;
1060 case REQ_MKNOD: req->result = mknod (req->data2ptr, req->mode, (dev_t)req->offset); break;
851 1061
852 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break; 1062 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break;
853 case REQ_FSYNC: req->result = fsync (req->fd); break; 1063 case REQ_FSYNC: req->result = fsync (req->fd); break;
854 case REQ_READDIR: req->result = scandir_ (req->dataptr, &req->data2ptr); break; 1064 case REQ_READDIR: scandir_ (req, self); break;
855 1065
856 case REQ_SLEEP: 1066 case REQ_BUSY:
857 { 1067 {
858 struct timeval tv; 1068 struct timeval tv;
859 1069
860 tv.tv_sec = req->fd; 1070 tv.tv_sec = req->fd;
861 tv.tv_usec = req->fd2; 1071 tv.tv_usec = req->fd2;
863 req->result = select (0, 0, 0, 0, &tv); 1073 req->result = select (0, 0, 0, 0, &tv);
864 } 1074 }
865 1075
866 case REQ_GROUP: 1076 case REQ_GROUP:
867 case REQ_NOP: 1077 case REQ_NOP:
1078 break;
1079
868 case REQ_QUIT: 1080 case REQ_QUIT:
869 break; 1081 goto quit;
870 1082
871 default: 1083 default:
872 req->result = ENOSYS; 1084 req->result = ENOSYS;
873 break; 1085 break;
874 } 1086 }
875 1087
876 req->errorno = errno; 1088 req->errorno = errno;
877 1089
878 pthread_mutex_lock (&reslock); 1090 LOCK (reslock);
879 1091
880 req->next = 0; 1092 ++npending;
881 1093
882 if (rese) 1094 if (!reqq_push (&res_queue, req))
883 {
884 rese->next = req;
885 rese = req;
886 }
887 else
888 {
889 rese = ress = req;
890
891 /* write a dummy byte to the pipe so fh becomes ready */ 1095 /* write a dummy byte to the pipe so fh becomes ready */
892 write (respipe [1], &respipe, 1); 1096 write (respipe [1], &respipe, 1);
893 }
894 1097
895 pthread_mutex_unlock (&reslock); 1098 self->req = 0;
1099 worker_clear (self);
1100
1101 UNLOCK (reslock);
896 } 1102 }
897 while (type != REQ_QUIT); 1103
1104quit:
1105 LOCK (wrklock);
1106 worker_free (self);
1107 UNLOCK (wrklock);
898 1108
899 return 0; 1109 return 0;
900} 1110}
901 1111
902/*****************************************************************************/ 1112/*****************************************************************************/
903 1113
904static void atfork_prepare (void) 1114static void atfork_prepare (void)
905{ 1115{
906 pthread_mutex_lock (&reqlock); 1116 LOCK (wrklock);
907 pthread_mutex_lock (&reslock); 1117 LOCK (reqlock);
1118 LOCK (reslock);
908#if !HAVE_PREADWRITE 1119#if !HAVE_PREADWRITE
909 pthread_mutex_lock (&preadwritelock); 1120 LOCK (preadwritelock);
910#endif 1121#endif
911#if !HAVE_READDIR_R 1122#if !HAVE_READDIR_R
912 pthread_mutex_lock (&readdirlock); 1123 LOCK (readdirlock);
913#endif 1124#endif
914} 1125}
915 1126
916static void atfork_parent (void) 1127static void atfork_parent (void)
917{ 1128{
918#if !HAVE_READDIR_R 1129#if !HAVE_READDIR_R
919 pthread_mutex_unlock (&readdirlock); 1130 UNLOCK (readdirlock);
920#endif 1131#endif
921#if !HAVE_PREADWRITE 1132#if !HAVE_PREADWRITE
922 pthread_mutex_unlock (&preadwritelock); 1133 UNLOCK (preadwritelock);
923#endif 1134#endif
924 pthread_mutex_unlock (&reslock); 1135 UNLOCK (reslock);
925 pthread_mutex_unlock (&reqlock); 1136 UNLOCK (reqlock);
1137 UNLOCK (wrklock);
926} 1138}
927 1139
928static void atfork_child (void) 1140static void atfork_child (void)
929{ 1141{
930 aio_req prv; 1142 aio_req prv;
931 1143
1144 while (prv = reqq_shift (&req_queue))
1145 req_free (prv);
1146
1147 while (prv = reqq_shift (&res_queue))
1148 req_free (prv);
1149
1150 while (wrk_first.next != &wrk_first)
1151 {
1152 worker *wrk = wrk_first.next;
1153
1154 if (wrk->req)
1155 req_free (wrk->req);
1156
1157 worker_clear (wrk);
1158 worker_free (wrk);
1159 }
1160
932 started = 0; 1161 started = 0;
933 1162 idle = 0;
934 while (reqs) 1163 nreqs = 0;
935 { 1164 nready = 0;
936 prv = reqs; 1165 npending = 0;
937 reqs = prv->next;
938 req_free (prv);
939 }
940
941 reqs = reqe = 0;
942
943 while (ress)
944 {
945 prv = ress;
946 ress = prv->next;
947 req_free (prv);
948 }
949
950 ress = rese = 0;
951 1166
952 close (respipe [0]); 1167 close (respipe [0]);
953 close (respipe [1]); 1168 close (respipe [1]);
954 create_pipe (); 1169 create_pipe ();
955 1170
982PROTOTYPES: ENABLE 1197PROTOTYPES: ENABLE
983 1198
984BOOT: 1199BOOT:
985{ 1200{
986 HV *stash = gv_stashpv ("IO::AIO", 1); 1201 HV *stash = gv_stashpv ("IO::AIO", 1);
1202
987 newCONSTSUB (stash, "EXDEV", newSViv (EXDEV)); 1203 newCONSTSUB (stash, "EXDEV", newSViv (EXDEV));
988 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY)); 1204 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY));
989 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY)); 1205 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY));
1206 newCONSTSUB (stash, "O_CREAT", newSViv (O_CREAT));
1207 newCONSTSUB (stash, "O_TRUNC", newSViv (O_TRUNC));
1208 newCONSTSUB (stash, "S_IFIFO", newSViv (S_IFIFO));
990 1209
991 create_pipe (); 1210 create_pipe ();
992 pthread_atfork (atfork_prepare, atfork_parent, atfork_child); 1211 pthread_atfork (atfork_prepare, atfork_parent, atfork_child);
993} 1212}
994 1213
995void 1214void
996min_parallel (nthreads) 1215min_parallel (int nthreads)
997 int nthreads
998 PROTOTYPE: $ 1216 PROTOTYPE: $
999 1217
1000void 1218void
1001max_parallel (nthreads) 1219max_parallel (int nthreads)
1002 int nthreads
1003 PROTOTYPE: $ 1220 PROTOTYPE: $
1004 1221
1005int 1222int
1006max_outstanding (nreqs) 1223max_outstanding (int maxreqs)
1007 int nreqs 1224 PROTOTYPE: $
1008 PROTOTYPE: $
1009 CODE: 1225 CODE:
1010 RETVAL = max_outstanding; 1226 RETVAL = max_outstanding;
1011 max_outstanding = nreqs; 1227 max_outstanding = maxreqs;
1228 OUTPUT:
1229 RETVAL
1012 1230
1013void 1231void
1014aio_open (pathname,flags,mode,callback=&PL_sv_undef) 1232aio_open (pathname,flags,mode,callback=&PL_sv_undef)
1015 SV * pathname 1233 SV * pathname
1016 int flags 1234 int flags
1231 1449
1232 REQ_SEND; 1450 REQ_SEND;
1233} 1451}
1234 1452
1235void 1453void
1454aio_mknod (pathname,mode,dev,callback=&PL_sv_undef)
1455 SV * pathname
1456 SV * callback
1457 UV mode
1458 UV dev
1459 PPCODE:
1460{
1461 dREQ;
1462
1463 req->type = REQ_MKNOD;
1464 req->data = newSVsv (pathname);
1465 req->dataptr = SvPVbyte_nolen (req->data);
1466 req->mode = (mode_t)mode;
1467 req->offset = dev;
1468
1469 REQ_SEND;
1470}
1471
1472void
1236aio_sleep (delay,callback=&PL_sv_undef) 1473aio_busy (delay,callback=&PL_sv_undef)
1237 double delay 1474 double delay
1238 SV * callback 1475 SV * callback
1239 PPCODE: 1476 PPCODE:
1240{ 1477{
1241 dREQ; 1478 dREQ;
1242 1479
1243 req->type = REQ_SLEEP; 1480 req->type = REQ_BUSY;
1244 req->fd = delay < 0. ? 0 : delay; 1481 req->fd = delay < 0. ? 0 : delay;
1245 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd); 1482 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd);
1246 1483
1247 REQ_SEND; 1484 REQ_SEND;
1248} 1485}
1271 req->type = REQ_NOP; 1508 req->type = REQ_NOP;
1272 1509
1273 REQ_SEND; 1510 REQ_SEND;
1274} 1511}
1275 1512
1276#if 0 1513int
1277 1514aioreq_pri (int pri = 0)
1278void 1515 PROTOTYPE: ;$
1279aio_pri (int pri = DEFAULT_PRI)
1280 CODE: 1516 CODE:
1517 RETVAL = next_pri - PRI_BIAS;
1518 if (items > 0)
1519 {
1281 if (pri < PRI_MIN) pri = PRI_MIN; 1520 if (pri < PRI_MIN) pri = PRI_MIN;
1282 if (pri > PRI_MAX) pri = PRI_MAX; 1521 if (pri > PRI_MAX) pri = PRI_MAX;
1283 next_pri = pri + PRI_BIAS; 1522 next_pri = pri + PRI_BIAS;
1523 }
1524 OUTPUT:
1525 RETVAL
1284 1526
1285#endif 1527void
1528aioreq_nice (int nice = 0)
1529 CODE:
1530 nice = next_pri - nice;
1531 if (nice < PRI_MIN) nice = PRI_MIN;
1532 if (nice > PRI_MAX) nice = PRI_MAX;
1533 next_pri = nice + PRI_BIAS;
1286 1534
1287void 1535void
1288flush () 1536flush ()
1289 PROTOTYPE: 1537 PROTOTYPE:
1290 CODE: 1538 CODE:
1291 while (nreqs) 1539 while (nreqs)
1292 { 1540 {
1293 poll_wait (); 1541 poll_wait ();
1294 poll_cb (); 1542 poll_cb (0);
1295 } 1543 }
1296 1544
1297void 1545void
1298poll() 1546poll()
1299 PROTOTYPE: 1547 PROTOTYPE:
1300 CODE: 1548 CODE:
1301 if (nreqs) 1549 if (nreqs)
1302 { 1550 {
1303 poll_wait (); 1551 poll_wait ();
1304 poll_cb (); 1552 poll_cb (0);
1305 } 1553 }
1306 1554
1307int 1555int
1308poll_fileno() 1556poll_fileno()
1309 PROTOTYPE: 1557 PROTOTYPE:
1314 1562
1315int 1563int
1316poll_cb(...) 1564poll_cb(...)
1317 PROTOTYPE: 1565 PROTOTYPE:
1318 CODE: 1566 CODE:
1319 RETVAL = poll_cb (); 1567 RETVAL = poll_cb (0);
1568 OUTPUT:
1569 RETVAL
1570
1571int
1572poll_some(int max = 0)
1573 PROTOTYPE: $
1574 CODE:
1575 RETVAL = poll_cb (max);
1320 OUTPUT: 1576 OUTPUT:
1321 RETVAL 1577 RETVAL
1322 1578
1323void 1579void
1324poll_wait() 1580poll_wait()
1333 CODE: 1589 CODE:
1334 RETVAL = nreqs; 1590 RETVAL = nreqs;
1335 OUTPUT: 1591 OUTPUT:
1336 RETVAL 1592 RETVAL
1337 1593
1594int
1595nready()
1596 PROTOTYPE:
1597 CODE:
1598 RETVAL = get_nready ();
1599 OUTPUT:
1600 RETVAL
1601
1602int
1603npending()
1604 PROTOTYPE:
1605 CODE:
1606 RETVAL = get_npending ();
1607 OUTPUT:
1608 RETVAL
1609
1338PROTOTYPES: DISABLE 1610PROTOTYPES: DISABLE
1339 1611
1340MODULE = IO::AIO PACKAGE = IO::AIO::REQ 1612MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1341 1613
1342void 1614void
1343cancel (aio_req_ornot req) 1615cancel (aio_req_ornot req)
1344 PROTOTYPE:
1345 CODE: 1616 CODE:
1346 req_cancel (req); 1617 req_cancel (req);
1347 1618
1348void 1619void
1349cb (aio_req_ornot req, SV *callback=&PL_sv_undef) 1620cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1385 } 1656 }
1386 } 1657 }
1387} 1658}
1388 1659
1389void 1660void
1661cancel_subs (aio_req_ornot req)
1662 CODE:
1663 req_cancel_subs (req);
1664
1665void
1390result (aio_req grp, ...) 1666result (aio_req grp, ...)
1391 CODE: 1667 CODE:
1392{ 1668{
1393 int i; 1669 int i;
1670 AV *av;
1671
1672 grp->errorno = errno;
1673
1394 AV *av = newAV (); 1674 av = newAV ();
1395 1675
1396 for (i = 1; i < items; ++i ) 1676 for (i = 1; i < items; ++i )
1397 av_push (av, newSVsv (ST (i))); 1677 av_push (av, newSVsv (ST (i)));
1398 1678
1399 SvREFCNT_dec (grp->data); 1679 SvREFCNT_dec (grp->data);
1400 grp->data = (SV *)av; 1680 grp->data = (SV *)av;
1401} 1681}
1402 1682
1403void 1683void
1684errno (aio_req grp, int errorno = errno)
1685 CODE:
1686 grp->errorno = errorno;
1687
1688void
1404feed_limit (aio_req grp, int limit) 1689limit (aio_req grp, int limit)
1405 CODE: 1690 CODE:
1406 grp->fd2 = limit; 1691 grp->fd2 = limit;
1407 aio_grp_feed (grp); 1692 aio_grp_feed (grp);
1408 1693
1409void 1694void

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines