ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
(Generate patch)

Comparing IO-AIO/AIO.xs (file contents):
Revision 1.61 by root, Mon Oct 23 22:44:21 2006 UTC vs.
Revision 1.80 by root, Fri Oct 27 19:17:23 2006 UTC

1/* solaris */
2#define _POSIX_PTHREAD_SEMANTICS 1
3
4#if __linux && !defined(_GNU_SOURCE)
5# define _GNU_SOURCE
6#endif
7
8/* just in case */
1#define _REENTRANT 1 9#define _REENTRANT 1
10
2#include <errno.h> 11#include <errno.h>
3 12
4#include "EXTERN.h" 13#include "EXTERN.h"
5#include "perl.h" 14#include "perl.h"
6#include "XSUB.h" 15#include "XSUB.h"
39/* used for struct dirent, AIX doesn't provide it */ 48/* used for struct dirent, AIX doesn't provide it */
40#ifndef NAME_MAX 49#ifndef NAME_MAX
41# define NAME_MAX 4096 50# define NAME_MAX 4096
42#endif 51#endif
43 52
53#ifndef PTHREAD_STACK_MIN
54/* care for broken platforms, e.g. windows */
55# define PTHREAD_STACK_MIN 16384
56#endif
57
44#if __ia64 58#if __ia64
45# define STACKSIZE 65536 59# define STACKSIZE 65536
60#elif __i386 || __x86_64 /* 16k is unreasonably high :( */
61# define STACKSIZE PTHREAD_STACK_MIN
46#else 62#else
47# define STACKSIZE 8192 63# define STACKSIZE 16384
64#endif
65
66/* wether word reads are potentially non-atomic.
67 * this is conservatice, likely most arches this runs
68 * on have atomic word read/writes.
69 */
70#ifndef WORDREAD_UNSAFE
71# if __i386 || __x86_64
72# define WORDREAD_UNSAFE 0
73# else
74# define WORDREAD_UNSAFE 1
48#endif 75# endif
76#endif
77
78/* buffer size for various temporary buffers */
79#define AIO_BUFSIZE 65536
80
81#define dBUF \
82 char *aio_buf; \
83 LOCK (wrklock); \
84 self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \
85 UNLOCK (wrklock); \
86 if (!aio_buf) \
87 return -1;
49 88
50enum { 89enum {
51 REQ_QUIT, 90 REQ_QUIT,
52 REQ_OPEN, REQ_CLOSE, 91 REQ_OPEN, REQ_CLOSE,
53 REQ_READ, REQ_WRITE, REQ_READAHEAD, 92 REQ_READ, REQ_WRITE, REQ_READAHEAD,
56 REQ_FSYNC, REQ_FDATASYNC, 95 REQ_FSYNC, REQ_FDATASYNC,
57 REQ_UNLINK, REQ_RMDIR, REQ_RENAME, 96 REQ_UNLINK, REQ_RMDIR, REQ_RENAME,
58 REQ_READDIR, 97 REQ_READDIR,
59 REQ_LINK, REQ_SYMLINK, 98 REQ_LINK, REQ_SYMLINK,
60 REQ_GROUP, REQ_NOP, 99 REQ_GROUP, REQ_NOP,
61 REQ_SLEEP, 100 REQ_BUSY,
62}; 101};
63 102
64#define AIO_REQ_KLASS "IO::AIO::REQ" 103#define AIO_REQ_KLASS "IO::AIO::REQ"
65#define AIO_GRP_KLASS "IO::AIO::GRP" 104#define AIO_GRP_KLASS "IO::AIO::GRP"
66 105
100 PRI_MIN = -4, 139 PRI_MIN = -4,
101 PRI_MAX = 4, 140 PRI_MAX = 4,
102 141
103 DEFAULT_PRI = 0, 142 DEFAULT_PRI = 0,
104 PRI_BIAS = -PRI_MIN, 143 PRI_BIAS = -PRI_MIN,
144 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
105}; 145};
106 146
107static int next_pri = DEFAULT_PRI + PRI_BIAS; 147static int next_pri = DEFAULT_PRI + PRI_BIAS;
108 148
109static int started, wanted; 149static unsigned int started, wanted;
110static volatile int nreqs; 150
111static int max_outstanding = 1<<30; 151#if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)
152# define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
153#else
154# define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
155#endif
156
157#define LOCK(mutex) pthread_mutex_lock (&(mutex))
158#define UNLOCK(mutex) pthread_mutex_unlock (&(mutex))
159
160/* worker threads management */
161static pthread_mutex_t wrklock = AIO_MUTEX_INIT;
162
163typedef struct worker {
164 /* locked by wrklock */
165 struct worker *prev, *next;
166
167 pthread_t tid;
168
169 /* locked by reslock, reqlock or wrklock */
170 aio_req req; /* currently processed request */
171 void *dbuf;
172 DIR *dirp;
173} worker;
174
175static worker wrk_first = { &wrk_first, &wrk_first, 0 };
176
177static void worker_clear (worker *wrk)
178{
179 if (wrk->dirp)
180 {
181 closedir (wrk->dirp);
182 wrk->dirp = 0;
183 }
184
185 if (wrk->dbuf)
186 {
187 free (wrk->dbuf);
188 wrk->dbuf = 0;
189 }
190}
191
192static void worker_free (worker *wrk)
193{
194 wrk->next->prev = wrk->prev;
195 wrk->prev->next = wrk->next;
196
197 free (wrk);
198}
199
200static volatile unsigned int nreqs, nready, npending;
201static volatile unsigned int max_outstanding = 0xffffffff;
112static int respipe [2]; 202static int respipe [2];
113 203
114static pthread_mutex_t reslock = PTHREAD_MUTEX_INITIALIZER; 204static pthread_mutex_t reslock = AIO_MUTEX_INIT;
115static pthread_mutex_t reqlock = PTHREAD_MUTEX_INITIALIZER; 205static pthread_mutex_t reqlock = AIO_MUTEX_INIT;
116static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER; 206static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER;
117 207
118static volatile aio_req reqs, reqe; /* queue start, queue end */ 208#if WORDREAD_UNSAFE
119static volatile aio_req ress, rese; /* queue start, queue end */
120 209
210static unsigned int get_nready ()
211{
212 unsigned int retval;
213
214 LOCK (reqlock);
215 retval = nready;
216 UNLOCK (reqlock);
217
218 return retval;
219}
220
221static unsigned int get_npending ()
222{
223 unsigned int retval;
224
225 LOCK (reslock);
226 retval = npending;
227 UNLOCK (reslock);
228
229 return retval;
230}
231
232#else
233
234# define get_nready() nready
235# define get_npending() npending
236
237#endif
238
239/*
240 * a somewhat faster data structure might be nice, but
241 * with 8 priorities this actually needs <20 insns
242 * per shift, the most expensive operation.
243 */
244typedef struct {
245 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
246 int size;
247} reqq;
248
249static reqq req_queue;
250static reqq res_queue;
251
252int reqq_push (reqq *q, aio_req req)
253{
254 int pri = req->pri;
255 req->next = 0;
256
257 if (q->qe[pri])
258 {
259 q->qe[pri]->next = req;
260 q->qe[pri] = req;
261 }
262 else
263 q->qe[pri] = q->qs[pri] = req;
264
265 return q->size++;
266}
267
268aio_req reqq_shift (reqq *q)
269{
270 int pri;
271
272 if (!q->size)
273 return 0;
274
275 --q->size;
276
277 for (pri = NUM_PRI; pri--; )
278 {
279 aio_req req = q->qs[pri];
280
281 if (req)
282 {
283 if (!(q->qs[pri] = req->next))
284 q->qe[pri] = 0;
285
286 return req;
287 }
288 }
289
290 abort ();
291}
292
293static int poll_cb (int max);
121static void req_invoke (aio_req req); 294static void req_invoke (aio_req req);
122static void req_free (aio_req req); 295static void req_free (aio_req req);
296static void req_cancel (aio_req req);
123 297
124/* must be called at most once */ 298/* must be called at most once */
125static SV *req_sv (aio_req req, const char *klass) 299static SV *req_sv (aio_req req, const char *klass)
126{ 300{
127 if (!req->self) 301 if (!req->self)
158 ENTER; 332 ENTER;
159 SAVETMPS; 333 SAVETMPS;
160 PUSHMARK (SP); 334 PUSHMARK (SP);
161 XPUSHs (req_sv (grp, AIO_GRP_KLASS)); 335 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
162 PUTBACK; 336 PUTBACK;
163 call_sv (grp->fh2, G_VOID | G_EVAL); 337 call_sv (grp->fh2, G_VOID | G_EVAL | G_KEEPERR);
164 SPAGAIN; 338 SPAGAIN;
165 FREETMPS; 339 FREETMPS;
166 LEAVE; 340 LEAVE;
167 } 341 }
168 342
189 req_invoke (grp); 363 req_invoke (grp);
190 req_free (grp); 364 req_free (grp);
191 } 365 }
192} 366}
193 367
194static void poll_wait ()
195{
196 while (nreqs)
197 {
198 aio_req req;
199 pthread_mutex_lock (&reslock);
200 req = ress;
201 pthread_mutex_unlock (&reslock);
202
203 if (req)
204 return;
205
206 fd_set rfd;
207 FD_ZERO(&rfd);
208 FD_SET(respipe [0], &rfd);
209
210 select (respipe [0] + 1, &rfd, 0, 0, 0);
211 }
212}
213
214static void req_invoke (aio_req req) 368static void req_invoke (aio_req req)
215{ 369{
216 dSP; 370 dSP;
217 int errorno = errno;
218 371
219 if (req->flags & FLAG_CANCELLED || !SvOK (req->callback)) 372 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
220 return; 373 {
221
222 errno = req->errorno;
223
224 ENTER; 374 ENTER;
225 SAVETMPS; 375 SAVETMPS;
226 PUSHMARK (SP); 376 PUSHMARK (SP);
227 EXTEND (SP, 1); 377 EXTEND (SP, 1);
228 378
229 switch (req->type) 379 switch (req->type)
230 {
231 case REQ_READDIR:
232 { 380 {
233 SV *rv = &PL_sv_undef; 381 case REQ_READDIR:
234
235 if (req->result >= 0)
236 { 382 {
237 char *buf = req->data2ptr; 383 SV *rv = &PL_sv_undef;
238 AV *av = newAV ();
239 384
240 while (req->result) 385 if (req->result >= 0)
241 { 386 {
387 int i;
388 char *buf = req->data2ptr;
389 AV *av = newAV ();
390
391 av_extend (av, req->result - 1);
392
393 for (i = 0; i < req->result; ++i)
394 {
242 SV *sv = newSVpv (buf, 0); 395 SV *sv = newSVpv (buf, 0);
243 396
244 av_push (av, sv); 397 av_store (av, i, sv);
245 buf += SvCUR (sv) + 1; 398 buf += SvCUR (sv) + 1;
246 req->result--; 399 }
400
401 rv = sv_2mortal (newRV_noinc ((SV *)av));
247 } 402 }
248 403
249 rv = sv_2mortal (newRV_noinc ((SV *)av)); 404 PUSHs (rv);
250 } 405 }
406 break;
251 407
252 PUSHs (rv); 408 case REQ_OPEN:
409 {
410 /* convert fd to fh */
411 SV *fh;
412
413 PUSHs (sv_2mortal (newSViv (req->result)));
414 PUTBACK;
415 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
416 SPAGAIN;
417
418 fh = SvREFCNT_inc (POPs);
419
420 PUSHMARK (SP);
421 XPUSHs (sv_2mortal (fh));
422 }
423 break;
424
425 case REQ_GROUP:
426 req->fd = 2; /* mark group as finished */
427
428 if (req->data)
429 {
430 int i;
431 AV *av = (AV *)req->data;
432
433 EXTEND (SP, AvFILL (av) + 1);
434 for (i = 0; i <= AvFILL (av); ++i)
435 PUSHs (*av_fetch (av, i, 0));
436 }
437 break;
438
439 case REQ_NOP:
440 case REQ_BUSY:
441 break;
442
443 default:
444 PUSHs (sv_2mortal (newSViv (req->result)));
445 break;
253 } 446 }
254 break;
255 447
256 case REQ_OPEN: 448 errno = req->errorno;
257 {
258 /* convert fd to fh */
259 SV *fh;
260 449
261 PUSHs (sv_2mortal (newSViv (req->result)));
262 PUTBACK; 450 PUTBACK;
263 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
264 SPAGAIN;
265
266 fh = SvREFCNT_inc (POPs);
267
268 PUSHMARK (SP);
269 XPUSHs (sv_2mortal (fh));
270 }
271 break;
272
273 case REQ_GROUP:
274 req->fd = 2; /* mark group as finished */
275
276 if (req->data)
277 {
278 int i;
279 AV *av = (AV *)req->data;
280
281 EXTEND (SP, AvFILL (av) + 1);
282 for (i = 0; i <= AvFILL (av); ++i)
283 PUSHs (*av_fetch (av, i, 0));
284 }
285 break;
286
287 case REQ_NOP:
288 case REQ_SLEEP:
289 break;
290
291 default:
292 PUSHs (sv_2mortal (newSViv (req->result)));
293 break;
294 }
295
296
297 PUTBACK;
298 call_sv (req->callback, G_VOID | G_EVAL); 451 call_sv (req->callback, G_VOID | G_EVAL);
299 SPAGAIN; 452 SPAGAIN;
300 453
301 FREETMPS; 454 FREETMPS;
302 LEAVE; 455 LEAVE;
303
304 errno = errorno;
305
306 if (SvTRUE (ERRSV))
307 { 456 }
308 req_free (req);
309 croak (0);
310 }
311}
312 457
313static void req_free (aio_req req)
314{
315 if (req->grp) 458 if (req->grp)
316 { 459 {
317 aio_req grp = req->grp; 460 aio_req grp = req->grp;
318 461
319 /* unlink request */ 462 /* unlink request */
324 grp->grp_first = req->grp_next; 467 grp->grp_first = req->grp_next;
325 468
326 aio_grp_dec (grp); 469 aio_grp_dec (grp);
327 } 470 }
328 471
472 if (SvTRUE (ERRSV))
473 {
474 req_free (req);
475 croak (0);
476 }
477}
478
479static void req_free (aio_req req)
480{
329 if (req->self) 481 if (req->self)
330 { 482 {
331 sv_unmagic (req->self, PERL_MAGIC_ext); 483 sv_unmagic (req->self, PERL_MAGIC_ext);
332 SvREFCNT_dec (req->self); 484 SvREFCNT_dec (req->self);
333 } 485 }
336 SvREFCNT_dec (req->fh); 488 SvREFCNT_dec (req->fh);
337 SvREFCNT_dec (req->fh2); 489 SvREFCNT_dec (req->fh2);
338 SvREFCNT_dec (req->callback); 490 SvREFCNT_dec (req->callback);
339 Safefree (req->statdata); 491 Safefree (req->statdata);
340 492
341 if (req->type == REQ_READDIR && req->result >= 0) 493 if (req->type == REQ_READDIR)
342 free (req->data2ptr); 494 free (req->data2ptr);
343 495
344 Safefree (req); 496 Safefree (req);
345} 497}
346 498
499static void req_cancel_subs (aio_req grp)
500{
501 aio_req sub;
502
503 if (grp->type != REQ_GROUP)
504 return;
505
506 SvREFCNT_dec (grp->fh2);
507 grp->fh2 = 0;
508
509 for (sub = grp->grp_first; sub; sub = sub->grp_next)
510 req_cancel (sub);
511}
512
347static void req_cancel (aio_req req) 513static void req_cancel (aio_req req)
348{ 514{
349 req->flags |= FLAG_CANCELLED; 515 req->flags |= FLAG_CANCELLED;
350 516
351 if (req->type == REQ_GROUP) 517 req_cancel_subs (req);
352 { 518}
353 aio_req sub;
354 519
355 for (sub = req->grp_first; sub; sub = sub->grp_next) 520static void *aio_proc(void *arg);
356 req_cancel (sub); 521
522static void start_thread (void)
523{
524 sigset_t fullsigset, oldsigset;
525 pthread_attr_t attr;
526
527 worker *wrk = calloc (1, sizeof (worker));
528
529 if (!wrk)
530 croak ("unable to allocate worker thread data");
531
532 pthread_attr_init (&attr);
533 pthread_attr_setstacksize (&attr, STACKSIZE);
534 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
535#ifdef PTHREAD_SCOPE_PROCESS
536 pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS);
537#endif
538
539 sigfillset (&fullsigset);
540
541 LOCK (wrklock);
542 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
543
544 if (pthread_create (&wrk->tid, &attr, aio_proc, (void *)wrk) == 0)
357 } 545 {
358} 546 wrk->prev = &wrk_first;
547 wrk->next = wrk_first.next;
548 wrk_first.next->prev = wrk;
549 wrk_first.next = wrk;
550 ++started;
551 }
552 else
553 free (wrk);
359 554
555 sigprocmask (SIG_SETMASK, &oldsigset, 0);
556 UNLOCK (wrklock);
557}
558
559static void maybe_start_thread ()
560{
561#if 0
562 static struct timeval last;
563 struct timeval diff, now;
564#endif
565
566 if (started >= wanted)
567 return;
568
569 if (nready <= nreqs - get_nready () - get_npending ())
570 return;
571
572#if 0
573 gettimeofday (&now, 0);
574
575 diff.tv_sec = now.tv_sec - last.tv_sec;
576 diff.tv_usec = now.tv_usec - last.tv_usec;
577
578 if (diff.tv_usec < 0)
579 {
580 --diff.tv_sec;
581 diff.tv_usec += 1000000;
582 }
583
584 if (!diff.tv_sec && diff.tv_usec < 10000)
585 return;
586
587 last = now;
588#endif
589
590 start_thread ();
591}
592
593static void req_send (aio_req req)
594{
595 ++nreqs;
596
597 LOCK (reqlock);
598 ++nready;
599 reqq_push (&req_queue, req);
600 pthread_cond_signal (&reqwait);
601 UNLOCK (reqlock);
602
603 maybe_start_thread ();
604}
605
606static void end_thread (void)
607{
608 aio_req req;
609
610 Newz (0, req, 1, aio_cb);
611
612 req->type = REQ_QUIT;
613 req->pri = PRI_MAX + PRI_BIAS;
614
615 req_send (req);
616
617 LOCK (wrklock);
618 --started;
619 UNLOCK (wrklock);
620}
621
622static void min_parallel (int nthreads)
623{
624 if (wanted < nthreads)
625 wanted = nthreads;
626}
627
628static void max_parallel (int nthreads)
629{
630 if (wanted > nthreads)
631 wanted = nthreads;
632
633 while (started > wanted)
634 end_thread ();
635}
636
637static void poll_wait ()
638{
639 fd_set rfd;
640
641 while (nreqs)
642 {
643 int size;
644 if (WORDREAD_UNSAFE) LOCK (reslock);
645 size = res_queue.size;
646 if (WORDREAD_UNSAFE) UNLOCK (reslock);
647
648 if (size)
649 return;
650
651 maybe_start_thread ();
652
653 FD_ZERO(&rfd);
654 FD_SET(respipe [0], &rfd);
655
656 select (respipe [0] + 1, &rfd, 0, 0, 0);
657 }
658}
659
360static int poll_cb () 660static int poll_cb (int max)
361{ 661{
362 dSP; 662 dSP;
363 int count = 0; 663 int count = 0;
364 int do_croak = 0; 664 int do_croak = 0;
365 aio_req req; 665 aio_req req;
366 666
367 for (;;) 667 for (;;)
368 { 668 {
369 pthread_mutex_lock (&reslock); 669 while (max <= 0 || count < max)
370 req = ress;
371
372 if (req)
373 { 670 {
374 ress = req->next; 671 maybe_start_thread ();
375 672
673 LOCK (reslock);
674 req = reqq_shift (&res_queue);
675
376 if (!ress) 676 if (req)
377 { 677 {
678 --npending;
679
680 if (!res_queue.size)
681 {
378 /* read any signals sent by the worker threads */ 682 /* read any signals sent by the worker threads */
379 char buf [32]; 683 char buf [32];
380 while (read (respipe [0], buf, 32) == 32) 684 while (read (respipe [0], buf, 32) == 32)
685 ;
381 ; 686 }
382
383 rese = 0;
384 } 687 }
688
689 UNLOCK (reslock);
690
691 if (!req)
692 break;
693
694 --nreqs;
695
696 if (req->type == REQ_GROUP && req->length)
697 {
698 req->fd = 1; /* mark request as delayed */
699 continue;
700 }
701 else
702 {
703 if (req->type == REQ_READ)
704 SvCUR_set (req->data, req->dataoffset + (req->result > 0 ? req->result : 0));
705
706 if (req->data2ptr && (req->type == REQ_READ || req->type == REQ_WRITE))
707 SvREADONLY_off (req->data);
708
709 if (req->statdata)
710 {
711 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
712 PL_laststatval = req->result;
713 PL_statcache = *(req->statdata);
714 }
715
716 req_invoke (req);
717
718 count++;
719 }
720
721 req_free (req);
385 } 722 }
386 723
387 pthread_mutex_unlock (&reslock); 724 if (nreqs <= max_outstanding)
388
389 if (!req)
390 break; 725 break;
391 726
392 --nreqs; 727 poll_wait ();
393 728
394 if (req->type == REQ_QUIT) 729 max = 0;
395 started--;
396 else if (req->type == REQ_GROUP && req->length)
397 {
398 req->fd = 1; /* mark request as delayed */
399 continue;
400 }
401 else
402 {
403 if (req->type == REQ_READ)
404 SvCUR_set (req->data, req->dataoffset + (req->result > 0 ? req->result : 0));
405
406 if (req->data2ptr && (req->type == REQ_READ || req->type == REQ_WRITE))
407 SvREADONLY_off (req->data);
408
409 if (req->statdata)
410 {
411 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
412 PL_laststatval = req->result;
413 PL_statcache = *(req->statdata);
414 }
415
416 req_invoke (req);
417
418 count++;
419 }
420
421 req_free (req);
422 } 730 }
423 731
424 return count; 732 return count;
425}
426
427static void *aio_proc(void *arg);
428
429static void start_thread (void)
430{
431 sigset_t fullsigset, oldsigset;
432 pthread_t tid;
433 pthread_attr_t attr;
434
435 pthread_attr_init (&attr);
436 pthread_attr_setstacksize (&attr, STACKSIZE);
437 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
438
439 sigfillset (&fullsigset);
440 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
441
442 if (pthread_create (&tid, &attr, aio_proc, 0) == 0)
443 started++;
444
445 sigprocmask (SIG_SETMASK, &oldsigset, 0);
446}
447
448static void req_send (aio_req req)
449{
450 while (started < wanted && nreqs >= started)
451 start_thread ();
452
453 ++nreqs;
454
455 pthread_mutex_lock (&reqlock);
456
457 req->next = 0;
458
459 if (reqe)
460 {
461 reqe->next = req;
462 reqe = req;
463 }
464 else
465 reqe = reqs = req;
466
467 pthread_cond_signal (&reqwait);
468 pthread_mutex_unlock (&reqlock);
469
470 if (nreqs > max_outstanding)
471 for (;;)
472 {
473 poll_cb ();
474
475 if (nreqs <= max_outstanding)
476 break;
477
478 poll_wait ();
479 }
480}
481
482static void end_thread (void)
483{
484 aio_req req;
485 Newz (0, req, 1, aio_cb);
486 req->type = REQ_QUIT;
487
488 req_send (req);
489}
490
491static void min_parallel (int nthreads)
492{
493 if (wanted < nthreads)
494 wanted = nthreads;
495}
496
497static void max_parallel (int nthreads)
498{
499 int cur = started;
500
501 if (wanted > nthreads)
502 wanted = nthreads;
503
504 while (cur > wanted)
505 {
506 end_thread ();
507 cur--;
508 }
509
510 while (started > wanted)
511 {
512 poll_wait ();
513 poll_cb ();
514 }
515} 733}
516 734
517static void create_pipe () 735static void create_pipe ()
518{ 736{
519 if (pipe (respipe)) 737 if (pipe (respipe))
543static ssize_t pread (int fd, void *buf, size_t count, off_t offset) 761static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
544{ 762{
545 ssize_t res; 763 ssize_t res;
546 off_t ooffset; 764 off_t ooffset;
547 765
548 pthread_mutex_lock (&preadwritelock); 766 LOCK (preadwritelock);
549 ooffset = lseek (fd, 0, SEEK_CUR); 767 ooffset = lseek (fd, 0, SEEK_CUR);
550 lseek (fd, offset, SEEK_SET); 768 lseek (fd, offset, SEEK_SET);
551 res = read (fd, buf, count); 769 res = read (fd, buf, count);
552 lseek (fd, ooffset, SEEK_SET); 770 lseek (fd, ooffset, SEEK_SET);
553 pthread_mutex_unlock (&preadwritelock); 771 UNLOCK (preadwritelock);
554 772
555 return res; 773 return res;
556} 774}
557 775
558static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset) 776static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
559{ 777{
560 ssize_t res; 778 ssize_t res;
561 off_t ooffset; 779 off_t ooffset;
562 780
563 pthread_mutex_lock (&preadwritelock); 781 LOCK (preadwritelock);
564 ooffset = lseek (fd, 0, SEEK_CUR); 782 ooffset = lseek (fd, 0, SEEK_CUR);
565 lseek (fd, offset, SEEK_SET); 783 lseek (fd, offset, SEEK_SET);
566 res = write (fd, buf, count); 784 res = write (fd, buf, count);
567 lseek (fd, offset, SEEK_SET); 785 lseek (fd, offset, SEEK_SET);
568 pthread_mutex_unlock (&preadwritelock); 786 UNLOCK (preadwritelock);
569 787
570 return res; 788 return res;
571} 789}
572#endif 790#endif
573 791
574#if !HAVE_FDATASYNC 792#if !HAVE_FDATASYNC
575# define fdatasync fsync 793# define fdatasync fsync
576#endif 794#endif
577 795
578#if !HAVE_READAHEAD 796#if !HAVE_READAHEAD
579# define readahead aio_readahead 797# define readahead(fd,offset,count) aio_readahead (fd, offset, count, self)
580 798
581static ssize_t readahead (int fd, off_t offset, size_t count) 799static ssize_t aio_readahead (int fd, off_t offset, size_t count, worker *self)
582{ 800{
583 char readahead_buf[4096]; 801 dBUF;
584 802
585 while (count > 0) 803 while (count > 0)
586 { 804 {
587 size_t len = count < sizeof (readahead_buf) ? count : sizeof (readahead_buf); 805 size_t len = count < AIO_BUFSIZE ? count : AIO_BUFSIZE;
588 806
589 pread (fd, readahead_buf, len, offset); 807 pread (fd, aio_buf, len, offset);
590 offset += len; 808 offset += len;
591 count -= len; 809 count -= len;
592 } 810 }
593 811
594 errno = 0; 812 errno = 0;
595} 813}
814
596#endif 815#endif
597 816
598#if !HAVE_READDIR_R 817#if !HAVE_READDIR_R
599# define readdir_r aio_readdir_r 818# define readdir_r aio_readdir_r
600 819
603static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res) 822static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res)
604{ 823{
605 struct dirent *e; 824 struct dirent *e;
606 int errorno; 825 int errorno;
607 826
608 pthread_mutex_lock (&readdirlock); 827 LOCK (readdirlock);
609 828
610 e = readdir (dirp); 829 e = readdir (dirp);
611 errorno = errno; 830 errorno = errno;
612 831
613 if (e) 832 if (e)
616 strcpy (ent->d_name, e->d_name); 835 strcpy (ent->d_name, e->d_name);
617 } 836 }
618 else 837 else
619 *res = 0; 838 *res = 0;
620 839
621 pthread_mutex_unlock (&readdirlock); 840 UNLOCK (readdirlock);
622 841
623 errno = errorno; 842 errno = errorno;
624 return e ? 0 : -1; 843 return e ? 0 : -1;
625} 844}
626#endif 845#endif
627 846
628/* sendfile always needs emulation */ 847/* sendfile always needs emulation */
629static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count) 848static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self)
630{ 849{
631 ssize_t res; 850 ssize_t res;
632 851
633 if (!count) 852 if (!count)
634 return 0; 853 return 0;
645 { 864 {
646 off_t sbytes; 865 off_t sbytes;
647 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0); 866 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0);
648 867
649 if (res < 0 && sbytes) 868 if (res < 0 && sbytes)
650 /* maybe only on EAGAIN only: as usual, the manpage leaves you guessing */ 869 /* maybe only on EAGAIN: as usual, the manpage leaves you guessing */
651 res = sbytes; 870 res = sbytes;
652 } 871 }
653 872
654# elif __hpux 873# elif __hpux
655 res = sendfile (ofd, ifd, offset, count, 0, 0); 874 res = sendfile (ofd, ifd, offset, count, 0, 0);
683#endif 902#endif
684 ) 903 )
685 ) 904 )
686 { 905 {
687 /* emulate sendfile. this is a major pain in the ass */ 906 /* emulate sendfile. this is a major pain in the ass */
688 char buf[4096]; 907 dBUF;
908
689 res = 0; 909 res = 0;
690 910
691 while (count) 911 while (count)
692 { 912 {
693 ssize_t cnt; 913 ssize_t cnt;
694 914
695 cnt = pread (ifd, buf, count > 4096 ? 4096 : count, offset); 915 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
696 916
697 if (cnt <= 0) 917 if (cnt <= 0)
698 { 918 {
699 if (cnt && !res) res = -1; 919 if (cnt && !res) res = -1;
700 break; 920 break;
701 } 921 }
702 922
703 cnt = write (ofd, buf, cnt); 923 cnt = write (ofd, aio_buf, cnt);
704 924
705 if (cnt <= 0) 925 if (cnt <= 0)
706 { 926 {
707 if (cnt && !res) res = -1; 927 if (cnt && !res) res = -1;
708 break; 928 break;
716 936
717 return res; 937 return res;
718} 938}
719 939
720/* read a full directory */ 940/* read a full directory */
721static int scandir_ (const char *path, void **namesp) 941static void scandir_ (aio_req req, worker *self)
722{ 942{
723 DIR *dirp = opendir (path); 943 DIR *dirp;
724 union 944 union
725 { 945 {
726 struct dirent d; 946 struct dirent d;
727 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1]; 947 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1];
728 } u; 948 } *u;
729 struct dirent *entp; 949 struct dirent *entp;
730 char *name, *names; 950 char *name, *names;
731 int memlen = 4096; 951 int memlen = 4096;
732 int memofs = 0; 952 int memofs = 0;
733 int res = 0; 953 int res = 0;
734 int errorno; 954 int errorno;
735 955
736 if (!dirp) 956 LOCK (wrklock);
737 return -1; 957 self->dirp = dirp = opendir (req->dataptr);
738 958 self->dbuf = u = malloc (sizeof (*u));
739 names = malloc (memlen); 959 req->data2ptr = names = malloc (memlen);
960 UNLOCK (wrklock);
961
962 if (dirp && u && names)
963 for (;;)
964 {
965 errno = 0;
966 readdir_r (dirp, &u->d, &entp);
967
968 if (!entp)
969 break;
970
971 name = entp->d_name;
972
973 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
974 {
975 int len = strlen (name) + 1;
976
977 res++;
978
979 while (memofs + len > memlen)
980 {
981 memlen *= 2;
982 LOCK (wrklock);
983 req->data2ptr = names = realloc (names, memlen);
984 UNLOCK (wrklock);
985
986 if (!names)
987 break;
988 }
989
990 memcpy (names + memofs, name, len);
991 memofs += len;
992 }
993 }
994
995 if (errno)
996 res = -1;
997
998 req->result = res;
999}
1000
1001/*****************************************************************************/
1002
1003static void *aio_proc (void *thr_arg)
1004{
1005 aio_req req;
1006 worker *self = (worker *)thr_arg;
740 1007
741 for (;;) 1008 for (;;)
742 { 1009 {
743 errno = 0, readdir_r (dirp, &u.d, &entp); 1010 LOCK (reqlock);
744
745 if (!entp)
746 break;
747
748 name = entp->d_name;
749
750 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
751 {
752 int len = strlen (name) + 1;
753
754 res++;
755
756 while (memofs + len > memlen)
757 {
758 memlen *= 2;
759 names = realloc (names, memlen);
760 if (!names)
761 break;
762 }
763
764 memcpy (names + memofs, name, len);
765 memofs += len;
766 }
767 }
768
769 errorno = errno;
770 closedir (dirp);
771
772 if (errorno)
773 {
774 free (names);
775 errno = errorno;
776 res = -1;
777 }
778
779 *namesp = (void *)names;
780 return res;
781}
782
783/*****************************************************************************/
784
785static void *aio_proc (void *thr_arg)
786{
787 aio_req req;
788 int type;
789
790 do
791 {
792 pthread_mutex_lock (&reqlock);
793 1011
794 for (;;) 1012 for (;;)
795 { 1013 {
796 req = reqs; 1014 self->req = req = reqq_shift (&req_queue);
797
798 if (reqs)
799 {
800 reqs = reqs->next;
801 if (!reqs) reqe = 0;
802 }
803 1015
804 if (req) 1016 if (req)
805 break; 1017 break;
806 1018
807 pthread_cond_wait (&reqwait, &reqlock); 1019 pthread_cond_wait (&reqwait, &reqlock);
808 } 1020 }
809 1021
810 pthread_mutex_unlock (&reqlock); 1022 --nready;
1023
1024 UNLOCK (reqlock);
811 1025
812 errno = 0; /* strictly unnecessary */ 1026 errno = 0; /* strictly unnecessary */
813 type = req->type; /* remember type for QUIT check */
814 1027
815 if (!(req->flags & FLAG_CANCELLED)) 1028 if (!(req->flags & FLAG_CANCELLED))
816 switch (type) 1029 switch (req->type)
817 { 1030 {
818 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break; 1031 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break;
819 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break; 1032 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break;
820 1033
821 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break; 1034 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break;
822 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length); break; 1035 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length, self); break;
823 1036
824 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break; 1037 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break;
825 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break; 1038 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break;
826 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break; 1039 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break;
827 1040
833 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break; 1046 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break;
834 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break; 1047 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break;
835 1048
836 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break; 1049 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break;
837 case REQ_FSYNC: req->result = fsync (req->fd); break; 1050 case REQ_FSYNC: req->result = fsync (req->fd); break;
838 case REQ_READDIR: req->result = scandir_ (req->dataptr, &req->data2ptr); break; 1051 case REQ_READDIR: scandir_ (req, self); break;
839 1052
840 case REQ_SLEEP: 1053 case REQ_BUSY:
841 { 1054 {
842 struct timeval tv; 1055 struct timeval tv;
843 1056
844 tv.tv_sec = req->fd; 1057 tv.tv_sec = req->fd;
845 tv.tv_usec = req->fd2; 1058 tv.tv_usec = req->fd2;
847 req->result = select (0, 0, 0, 0, &tv); 1060 req->result = select (0, 0, 0, 0, &tv);
848 } 1061 }
849 1062
850 case REQ_GROUP: 1063 case REQ_GROUP:
851 case REQ_NOP: 1064 case REQ_NOP:
1065 break;
1066
852 case REQ_QUIT: 1067 case REQ_QUIT:
1068 LOCK (wrklock);
1069 worker_free (self);
1070 --started;
1071 UNLOCK (wrklock);
853 break; 1072 return 0;
854 1073
855 default: 1074 default:
856 req->result = ENOSYS; 1075 req->result = ENOSYS;
857 break; 1076 break;
858 } 1077 }
859 1078
860 req->errorno = errno; 1079 req->errorno = errno;
861 1080
862 pthread_mutex_lock (&reslock); 1081 LOCK (reslock);
863 1082
864 req->next = 0; 1083 ++npending;
865 1084
866 if (rese) 1085 if (!reqq_push (&res_queue, req))
867 {
868 rese->next = req;
869 rese = req;
870 }
871 else
872 {
873 rese = ress = req;
874
875 /* write a dummy byte to the pipe so fh becomes ready */ 1086 /* write a dummy byte to the pipe so fh becomes ready */
876 write (respipe [1], &respipe, 1); 1087 write (respipe [1], &respipe, 1);
877 }
878 1088
879 pthread_mutex_unlock (&reslock); 1089 self->req = 0;
880 } 1090 worker_clear (self);
881 while (type != REQ_QUIT);
882 1091
883 return 0; 1092 UNLOCK (reslock);
1093 }
884} 1094}
885 1095
886/*****************************************************************************/ 1096/*****************************************************************************/
887 1097
888static void atfork_prepare (void) 1098static void atfork_prepare (void)
889{ 1099{
890 pthread_mutex_lock (&reqlock); 1100 LOCK (wrklock);
891 pthread_mutex_lock (&reslock); 1101 LOCK (reqlock);
1102 LOCK (reslock);
892#if !HAVE_PREADWRITE 1103#if !HAVE_PREADWRITE
893 pthread_mutex_lock (&preadwritelock); 1104 LOCK (preadwritelock);
894#endif 1105#endif
895#if !HAVE_READDIR_R 1106#if !HAVE_READDIR_R
896 pthread_mutex_lock (&readdirlock); 1107 LOCK (readdirlock);
897#endif 1108#endif
898} 1109}
899 1110
900static void atfork_parent (void) 1111static void atfork_parent (void)
901{ 1112{
902#if !HAVE_READDIR_R 1113#if !HAVE_READDIR_R
903 pthread_mutex_unlock (&readdirlock); 1114 UNLOCK (readdirlock);
904#endif 1115#endif
905#if !HAVE_PREADWRITE 1116#if !HAVE_PREADWRITE
906 pthread_mutex_unlock (&preadwritelock); 1117 UNLOCK (preadwritelock);
907#endif 1118#endif
908 pthread_mutex_unlock (&reslock); 1119 UNLOCK (reslock);
909 pthread_mutex_unlock (&reqlock); 1120 UNLOCK (reqlock);
1121 UNLOCK (wrklock);
910} 1122}
911 1123
912static void atfork_child (void) 1124static void atfork_child (void)
913{ 1125{
914 aio_req prv; 1126 aio_req prv;
915 1127
1128 while (prv = reqq_shift (&req_queue))
1129 req_free (prv);
1130
1131 while (prv = reqq_shift (&res_queue))
1132 req_free (prv);
1133
1134 while (wrk_first.next != &wrk_first)
1135 {
1136 worker *wrk = wrk_first.next;
1137
1138 if (wrk->req)
1139 req_free (wrk->req);
1140
1141 worker_clear (wrk);
1142 worker_free (wrk);
1143 }
1144
916 started = 0; 1145 started = 0;
917 1146 nreqs = 0;
918 while (reqs)
919 {
920 prv = reqs;
921 reqs = prv->next;
922 req_free (prv);
923 }
924
925 reqs = reqe = 0;
926
927 while (ress)
928 {
929 prv = ress;
930 ress = prv->next;
931 req_free (prv);
932 }
933
934 ress = rese = 0;
935 1147
936 close (respipe [0]); 1148 close (respipe [0]);
937 close (respipe [1]); 1149 close (respipe [1]);
938 create_pipe (); 1150 create_pipe ();
939 1151
972 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY)); 1184 newCONSTSUB (stash, "O_RDONLY", newSViv (O_RDONLY));
973 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY)); 1185 newCONSTSUB (stash, "O_WRONLY", newSViv (O_WRONLY));
974 1186
975 create_pipe (); 1187 create_pipe ();
976 pthread_atfork (atfork_prepare, atfork_parent, atfork_child); 1188 pthread_atfork (atfork_prepare, atfork_parent, atfork_child);
977}
978 1189
1190 start_thread ();
1191}
1192
979void 1193void
980min_parallel (nthreads) 1194min_parallel (int nthreads)
981 int nthreads
982 PROTOTYPE: $ 1195 PROTOTYPE: $
983 1196
984void 1197void
985max_parallel (nthreads) 1198max_parallel (int nthreads)
986 int nthreads
987 PROTOTYPE: $ 1199 PROTOTYPE: $
988 1200
989int 1201int
990max_outstanding (nreqs) 1202max_outstanding (int maxreqs)
991 int nreqs 1203 PROTOTYPE: $
992 PROTOTYPE: $
993 CODE: 1204 CODE:
994 RETVAL = max_outstanding; 1205 RETVAL = max_outstanding;
995 max_outstanding = nreqs; 1206 max_outstanding = maxreqs;
1207 OUTPUT:
1208 RETVAL
996 1209
997void 1210void
998aio_open (pathname,flags,mode,callback=&PL_sv_undef) 1211aio_open (pathname,flags,mode,callback=&PL_sv_undef)
999 SV * pathname 1212 SV * pathname
1000 int flags 1213 int flags
1215 1428
1216 REQ_SEND; 1429 REQ_SEND;
1217} 1430}
1218 1431
1219void 1432void
1220aio_sleep (delay,callback=&PL_sv_undef) 1433aio_busy (delay,callback=&PL_sv_undef)
1221 double delay 1434 double delay
1222 SV * callback 1435 SV * callback
1223 PPCODE: 1436 PPCODE:
1224{ 1437{
1225 dREQ; 1438 dREQ;
1226 1439
1227 req->type = REQ_SLEEP; 1440 req->type = REQ_BUSY;
1228 req->fd = delay < 0. ? 0 : delay; 1441 req->fd = delay < 0. ? 0 : delay;
1229 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd); 1442 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd);
1230 1443
1231 REQ_SEND; 1444 REQ_SEND;
1232} 1445}
1255 req->type = REQ_NOP; 1468 req->type = REQ_NOP;
1256 1469
1257 REQ_SEND; 1470 REQ_SEND;
1258} 1471}
1259 1472
1260#if 0 1473int
1261 1474aioreq_pri (int pri = 0)
1262void 1475 PROTOTYPE: ;$
1263aio_pri (int pri = DEFAULT_PRI)
1264 CODE: 1476 CODE:
1477 RETVAL = next_pri - PRI_BIAS;
1478 if (items > 0)
1479 {
1265 if (pri < PRI_MIN) pri = PRI_MIN; 1480 if (pri < PRI_MIN) pri = PRI_MIN;
1266 if (pri > PRI_MAX) pri = PRI_MAX; 1481 if (pri > PRI_MAX) pri = PRI_MAX;
1267 next_pri = pri + PRI_BIAS; 1482 next_pri = pri + PRI_BIAS;
1483 }
1484 OUTPUT:
1485 RETVAL
1268 1486
1269#endif 1487void
1488aioreq_nice (int nice = 0)
1489 CODE:
1490 nice = next_pri - nice;
1491 if (nice < PRI_MIN) nice = PRI_MIN;
1492 if (nice > PRI_MAX) nice = PRI_MAX;
1493 next_pri = nice + PRI_BIAS;
1270 1494
1271void 1495void
1272flush () 1496flush ()
1273 PROTOTYPE: 1497 PROTOTYPE:
1274 CODE: 1498 CODE:
1275 while (nreqs) 1499 while (nreqs)
1276 { 1500 {
1277 poll_wait (); 1501 poll_wait ();
1278 poll_cb (); 1502 poll_cb (0);
1279 } 1503 }
1280 1504
1281void 1505void
1282poll() 1506poll()
1283 PROTOTYPE: 1507 PROTOTYPE:
1284 CODE: 1508 CODE:
1285 if (nreqs) 1509 if (nreqs)
1286 { 1510 {
1287 poll_wait (); 1511 poll_wait ();
1288 poll_cb (); 1512 poll_cb (0);
1289 } 1513 }
1290 1514
1291int 1515int
1292poll_fileno() 1516poll_fileno()
1293 PROTOTYPE: 1517 PROTOTYPE:
1298 1522
1299int 1523int
1300poll_cb(...) 1524poll_cb(...)
1301 PROTOTYPE: 1525 PROTOTYPE:
1302 CODE: 1526 CODE:
1303 RETVAL = poll_cb (); 1527 RETVAL = poll_cb (0);
1528 OUTPUT:
1529 RETVAL
1530
1531int
1532poll_some(int max = 0)
1533 PROTOTYPE: $
1534 CODE:
1535 RETVAL = poll_cb (max);
1304 OUTPUT: 1536 OUTPUT:
1305 RETVAL 1537 RETVAL
1306 1538
1307void 1539void
1308poll_wait() 1540poll_wait()
1317 CODE: 1549 CODE:
1318 RETVAL = nreqs; 1550 RETVAL = nreqs;
1319 OUTPUT: 1551 OUTPUT:
1320 RETVAL 1552 RETVAL
1321 1553
1554int
1555nready()
1556 PROTOTYPE:
1557 CODE:
1558 RETVAL = get_nready ();
1559 OUTPUT:
1560 RETVAL
1561
1562int
1563npending()
1564 PROTOTYPE:
1565 CODE:
1566 RETVAL = get_npending ();
1567 OUTPUT:
1568 RETVAL
1569
1322PROTOTYPES: DISABLE 1570PROTOTYPES: DISABLE
1323 1571
1324MODULE = IO::AIO PACKAGE = IO::AIO::REQ 1572MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1325 1573
1326void 1574void
1327cancel (aio_req_ornot req) 1575cancel (aio_req_ornot req)
1328 PROTOTYPE:
1329 CODE: 1576 CODE:
1330 req_cancel (req); 1577 req_cancel (req);
1331 1578
1332void 1579void
1333cb (aio_req_ornot req, SV *callback=&PL_sv_undef) 1580cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1369 } 1616 }
1370 } 1617 }
1371} 1618}
1372 1619
1373void 1620void
1621cancel_subs (aio_req_ornot req)
1622 CODE:
1623 req_cancel_subs (req);
1624
1625void
1374result (aio_req grp, ...) 1626result (aio_req grp, ...)
1375 CODE: 1627 CODE:
1376{ 1628{
1377 int i; 1629 int i;
1630 AV *av;
1631
1632 grp->errorno = errno;
1633
1378 AV *av = newAV (); 1634 av = newAV ();
1379 1635
1380 for (i = 1; i < items; ++i ) 1636 for (i = 1; i < items; ++i )
1381 av_push (av, newSVsv (ST (i))); 1637 av_push (av, newSVsv (ST (i)));
1382 1638
1383 SvREFCNT_dec (grp->data); 1639 SvREFCNT_dec (grp->data);
1384 grp->data = (SV *)av; 1640 grp->data = (SV *)av;
1385} 1641}
1386 1642
1387void 1643void
1644errno (aio_req grp, int errorno = errno)
1645 CODE:
1646 grp->errorno = errorno;
1647
1648void
1388feed_limit (aio_req grp, int limit) 1649limit (aio_req grp, int limit)
1389 CODE: 1650 CODE:
1390 grp->fd2 = limit; 1651 grp->fd2 = limit;
1391 aio_grp_feed (grp); 1652 aio_grp_feed (grp);
1392 1653
1393void 1654void

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines