ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
(Generate patch)

Comparing IO-AIO/AIO.xs (file contents):
Revision 1.54 by root, Mon Oct 23 00:34:36 2006 UTC vs.
Revision 1.78 by root, Thu Oct 26 14:35:34 2006 UTC

1/* solaris */
2#define _POSIX_PTHREAD_SEMANTICS 1
3
4#if __linux && !defined(_GNU_SOURCE)
5# define _GNU_SOURCE
6#endif
7
8/* just in case */
1#define _REENTRANT 1 9#define _REENTRANT 1
10
2#include <errno.h> 11#include <errno.h>
3 12
4#include "EXTERN.h" 13#include "EXTERN.h"
5#include "perl.h" 14#include "perl.h"
6#include "XSUB.h" 15#include "XSUB.h"
39/* used for struct dirent, AIX doesn't provide it */ 48/* used for struct dirent, AIX doesn't provide it */
40#ifndef NAME_MAX 49#ifndef NAME_MAX
41# define NAME_MAX 4096 50# define NAME_MAX 4096
42#endif 51#endif
43 52
53#ifndef PTHREAD_STACK_MIN
54/* care for broken platforms, e.g. windows */
55# define PTHREAD_STACK_MIN 16384
56#endif
57
44#if __ia64 58#if __ia64
45# define STACKSIZE 65536 59# define STACKSIZE 65536
60#elif __i386 || __x86_64 /* 16k is unreasonably high :( */
61# define STACKSIZE PTHREAD_STACK_MIN
46#else 62#else
47# define STACKSIZE 8192 63# define STACKSIZE 16384
48#endif 64#endif
65
66/* buffer size for various temporary buffers */
67#define AIO_BUFSIZE 65536
68
69#define dBUF \
70 char *aio_buf; \
71 LOCK (wrklock); \
72 self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \
73 UNLOCK (wrklock); \
74 if (!aio_buf) \
75 return -1;
49 76
50enum { 77enum {
51 REQ_QUIT, 78 REQ_QUIT,
52 REQ_OPEN, REQ_CLOSE, 79 REQ_OPEN, REQ_CLOSE,
53 REQ_READ, REQ_WRITE, REQ_READAHEAD, 80 REQ_READ, REQ_WRITE, REQ_READAHEAD,
56 REQ_FSYNC, REQ_FDATASYNC, 83 REQ_FSYNC, REQ_FDATASYNC,
57 REQ_UNLINK, REQ_RMDIR, REQ_RENAME, 84 REQ_UNLINK, REQ_RMDIR, REQ_RENAME,
58 REQ_READDIR, 85 REQ_READDIR,
59 REQ_LINK, REQ_SYMLINK, 86 REQ_LINK, REQ_SYMLINK,
60 REQ_GROUP, REQ_NOP, 87 REQ_GROUP, REQ_NOP,
61 REQ_SLEEP, 88 REQ_BUSY,
62}; 89};
63 90
64#define AIO_REQ_KLASS "IO::AIO::REQ" 91#define AIO_REQ_KLASS "IO::AIO::REQ"
65#define AIO_GRP_KLASS "IO::AIO::GRP" 92#define AIO_GRP_KLASS "IO::AIO::GRP"
66 93
67typedef struct aio_cb 94typedef struct aio_cb
68{ 95{
69 struct aio_cb *volatile next; 96 struct aio_cb *volatile next;
70
71 struct aio_cb *grp, *grp_prev, *grp_next, *grp_first;
72
73 SV *self; /* the perl counterpart of this request, if any */
74 97
75 SV *data, *callback; 98 SV *data, *callback;
76 SV *fh, *fh2; 99 SV *fh, *fh2;
77 void *dataptr, *data2ptr; 100 void *dataptr, *data2ptr;
78 Stat_t *statdata; 101 Stat_t *statdata;
79 off_t offset; 102 off_t offset;
80 size_t length; 103 size_t length;
81 ssize_t result; 104 ssize_t result;
82 105
106 STRLEN dataoffset;
83 int type; 107 int type;
84 int fd, fd2; 108 int fd, fd2;
85 int errorno; 109 int errorno;
86 STRLEN dataoffset;
87 mode_t mode; /* open */ 110 mode_t mode; /* open */
111
88 unsigned char cancelled; 112 unsigned char flags;
113 unsigned char pri;
114
115 SV *self; /* the perl counterpart of this request, if any */
116 struct aio_cb *grp, *grp_prev, *grp_next, *grp_first;
89} aio_cb; 117} aio_cb;
118
119enum {
120 FLAG_CANCELLED = 0x01,
121};
90 122
91typedef aio_cb *aio_req; 123typedef aio_cb *aio_req;
92typedef aio_cb *aio_req_ornot; 124typedef aio_cb *aio_req_ornot;
93 125
126enum {
127 PRI_MIN = -4,
128 PRI_MAX = 4,
129
130 DEFAULT_PRI = 0,
131 PRI_BIAS = -PRI_MIN,
132 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
133};
134
135static int next_pri = DEFAULT_PRI + PRI_BIAS;
136
94static int started, wanted; 137static unsigned int started, wanted;
95static volatile int nreqs; 138static volatile unsigned int nreqs;
96static int max_outstanding = 1<<30; 139static volatile unsigned int max_outstanding = 0xffffffff;
97static int respipe [2]; 140static int respipe [2];
98 141
142#if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)
143# define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
144#else
145# define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
146#endif
147
148#define LOCK(mutex) pthread_mutex_lock (&(mutex))
149#define UNLOCK(mutex) pthread_mutex_unlock (&(mutex))
150
151/* worker threads management */
152static pthread_mutex_t wrklock = AIO_MUTEX_INIT;
153
154typedef struct worker {
155 /* locked by wrklock */
156 struct worker *prev, *next;
157
158 pthread_t tid;
159
160 /* locked by reslock, reqlock or wrklock */
161 aio_req req; /* currently processed request */
162 void *dbuf;
163 DIR *dirp;
164} worker;
165
166static worker wrk_first = { &wrk_first, &wrk_first, 0 };
167
168static void worker_clear (worker *wrk)
169{
170 if (wrk->dirp)
171 {
172 closedir (wrk->dirp);
173 wrk->dirp = 0;
174 }
175
176 if (wrk->dbuf)
177 {
178 free (wrk->dbuf);
179 wrk->dbuf = 0;
180 }
181}
182
183static void worker_free (worker *wrk)
184{
185 wrk->next->prev = wrk->prev;
186 wrk->prev->next = wrk->next;
187
188 free (wrk);
189}
190
99static pthread_mutex_t reslock = PTHREAD_MUTEX_INITIALIZER; 191static pthread_mutex_t reslock = AIO_MUTEX_INIT;
100static pthread_mutex_t reqlock = PTHREAD_MUTEX_INITIALIZER; 192static pthread_mutex_t reqlock = AIO_MUTEX_INIT;
101static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER; 193static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER;
102 194
103static volatile aio_req reqs, reqe; /* queue start, queue end */ 195/*
104static volatile aio_req ress, rese; /* queue start, queue end */ 196 * a somewhat faster data structure might be nice, but
197 * with 8 priorities this actually needs <20 insns
198 * per shift, the most expensive operation.
199 */
200typedef struct {
201 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
202 int size;
203} reqq;
105 204
205static reqq req_queue;
206static reqq res_queue;
207
208int reqq_push (reqq *q, aio_req req)
209{
210 int pri = req->pri;
211 req->next = 0;
212
213 if (q->qe[pri])
214 {
215 q->qe[pri]->next = req;
216 q->qe[pri] = req;
217 }
218 else
219 q->qe[pri] = q->qs[pri] = req;
220
221 return q->size++;
222}
223
224aio_req reqq_shift (reqq *q)
225{
226 int pri;
227
228 if (!q->size)
229 return 0;
230
231 --q->size;
232
233 for (pri = NUM_PRI; pri--; )
234 {
235 aio_req req = q->qs[pri];
236
237 if (req)
238 {
239 if (!(q->qs[pri] = req->next))
240 q->qe[pri] = 0;
241
242 return req;
243 }
244 }
245
246 abort ();
247}
248
249static int poll_cb (int max);
106static void req_invoke (aio_req req); 250static void req_invoke (aio_req req);
107static void req_free (aio_req req); 251static void req_free (aio_req req);
252static void req_cancel (aio_req req);
108 253
109/* must be called at most once */ 254/* must be called at most once */
110static SV *req_sv (aio_req req, const char *klass) 255static SV *req_sv (aio_req req, const char *klass)
111{ 256{
112 if (!req->self) 257 if (!req->self)
130 return mg ? (aio_req)mg->mg_ptr : 0; 275 return mg ? (aio_req)mg->mg_ptr : 0;
131} 276}
132 277
133static void aio_grp_feed (aio_req grp) 278static void aio_grp_feed (aio_req grp)
134{ 279{
135 while (grp->length < grp->fd2) 280 while (grp->length < grp->fd2 && !(grp->flags & FLAG_CANCELLED))
136 { 281 {
137 int old_len = grp->length; 282 int old_len = grp->length;
138 283
139 if (grp->fh2 && SvOK (grp->fh2)) 284 if (grp->fh2 && SvOK (grp->fh2))
140 { 285 {
143 ENTER; 288 ENTER;
144 SAVETMPS; 289 SAVETMPS;
145 PUSHMARK (SP); 290 PUSHMARK (SP);
146 XPUSHs (req_sv (grp, AIO_GRP_KLASS)); 291 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
147 PUTBACK; 292 PUTBACK;
148 call_sv (grp->fh2, G_VOID | G_EVAL); 293 call_sv (grp->fh2, G_VOID | G_EVAL | G_KEEPERR);
149 SPAGAIN; 294 SPAGAIN;
150 FREETMPS; 295 FREETMPS;
151 LEAVE; 296 LEAVE;
152 } 297 }
153 298
176 } 321 }
177} 322}
178 323
179static void poll_wait () 324static void poll_wait ()
180{ 325{
181 if (nreqs && !ress)
182 {
183 fd_set rfd; 326 fd_set rfd;
327
328 while (nreqs)
329 {
330 int size;
331#if !(__i386 || __x86_64) /* safe without sempahore on these archs */
332 LOCK (reslock);
333#endif
334 size = res_queue.size;
335#if !(__i386 || __x86_64) /* safe without sempahore on these archs */
336 UNLOCK (reslock);
337#endif
338
339 if (size)
340 return;
341
184 FD_ZERO(&rfd); 342 FD_ZERO(&rfd);
185 FD_SET(respipe [0], &rfd); 343 FD_SET(respipe [0], &rfd);
186 344
187 select (respipe [0] + 1, &rfd, 0, 0, 0); 345 select (respipe [0] + 1, &rfd, 0, 0, 0);
188 } 346 }
189} 347}
190 348
191static void req_invoke (aio_req req) 349static void req_invoke (aio_req req)
192{ 350{
193 dSP; 351 dSP;
194 int errorno = errno;
195 352
196 if (req->cancelled || !SvOK (req->callback)) 353 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
197 return; 354 {
198
199 errno = req->errorno; 355 errno = req->errorno;
200 356
201 ENTER; 357 ENTER;
202 SAVETMPS; 358 SAVETMPS;
203 PUSHMARK (SP); 359 PUSHMARK (SP);
204 EXTEND (SP, 1); 360 EXTEND (SP, 1);
205 361
206 switch (req->type) 362 switch (req->type)
207 {
208 case REQ_READDIR:
209 { 363 {
210 SV *rv = &PL_sv_undef; 364 case REQ_READDIR:
211
212 if (req->result >= 0)
213 { 365 {
214 char *buf = req->data2ptr; 366 SV *rv = &PL_sv_undef;
215 AV *av = newAV ();
216 367
217 while (req->result) 368 if (req->result >= 0)
218 { 369 {
370 int i;
371 char *buf = req->data2ptr;
372 AV *av = newAV ();
373
374 av_extend (av, req->result - 1);
375
376 for (i = 0; i < req->result; ++i)
377 {
219 SV *sv = newSVpv (buf, 0); 378 SV *sv = newSVpv (buf, 0);
220 379
221 av_push (av, sv); 380 av_store (av, i, sv);
222 buf += SvCUR (sv) + 1; 381 buf += SvCUR (sv) + 1;
223 req->result--; 382 }
383
384 rv = sv_2mortal (newRV_noinc ((SV *)av));
224 } 385 }
225 386
226 rv = sv_2mortal (newRV_noinc ((SV *)av)); 387 PUSHs (rv);
227 } 388 }
389 break;
228 390
229 PUSHs (rv); 391 case REQ_OPEN:
392 {
393 /* convert fd to fh */
394 SV *fh;
395
396 PUSHs (sv_2mortal (newSViv (req->result)));
397 PUTBACK;
398 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
399 SPAGAIN;
400
401 fh = SvREFCNT_inc (POPs);
402
403 PUSHMARK (SP);
404 XPUSHs (sv_2mortal (fh));
405 }
406 break;
407
408 case REQ_GROUP:
409 req->fd = 2; /* mark group as finished */
410
411 if (req->data)
412 {
413 int i;
414 AV *av = (AV *)req->data;
415
416 EXTEND (SP, AvFILL (av) + 1);
417 for (i = 0; i <= AvFILL (av); ++i)
418 PUSHs (*av_fetch (av, i, 0));
419 }
420 break;
421
422 case REQ_NOP:
423 case REQ_BUSY:
424 break;
425
426 default:
427 PUSHs (sv_2mortal (newSViv (req->result)));
428 break;
230 } 429 }
231 break;
232 430
233 case REQ_OPEN:
234 {
235 /* convert fd to fh */
236 SV *fh;
237 431
238 PUSHs (sv_2mortal (newSViv (req->result)));
239 PUTBACK; 432 PUTBACK;
240 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
241 SPAGAIN;
242
243 fh = SvREFCNT_inc (POPs);
244
245 PUSHMARK (SP);
246 XPUSHs (sv_2mortal (fh));
247 }
248 break;
249
250 case REQ_GROUP:
251 req->fd = 2; /* mark group as finished */
252
253 if (req->data)
254 {
255 int i;
256 AV *av = (AV *)req->data;
257
258 EXTEND (SP, AvFILL (av) + 1);
259 for (i = 0; i <= AvFILL (av); ++i)
260 PUSHs (*av_fetch (av, i, 0));
261 }
262 break;
263
264 case REQ_NOP:
265 case REQ_SLEEP:
266 break;
267
268 default:
269 PUSHs (sv_2mortal (newSViv (req->result)));
270 break;
271 }
272
273
274 PUTBACK;
275 call_sv (req->callback, G_VOID | G_EVAL); 433 call_sv (req->callback, G_VOID | G_EVAL);
276 SPAGAIN; 434 SPAGAIN;
277 435
278 FREETMPS; 436 FREETMPS;
279 LEAVE; 437 LEAVE;
280
281 errno = errorno;
282
283 if (SvTRUE (ERRSV))
284 { 438 }
285 req_free (req);
286 croak (0);
287 }
288}
289 439
290static void req_free (aio_req req)
291{
292 if (req->grp) 440 if (req->grp)
293 { 441 {
294 aio_req grp = req->grp; 442 aio_req grp = req->grp;
295 443
296 /* unlink request */ 444 /* unlink request */
301 grp->grp_first = req->grp_next; 449 grp->grp_first = req->grp_next;
302 450
303 aio_grp_dec (grp); 451 aio_grp_dec (grp);
304 } 452 }
305 453
454 if (SvTRUE (ERRSV))
455 {
456 req_free (req);
457 croak (0);
458 }
459}
460
461static void req_free (aio_req req)
462{
306 if (req->self) 463 if (req->self)
307 { 464 {
308 sv_unmagic (req->self, PERL_MAGIC_ext); 465 sv_unmagic (req->self, PERL_MAGIC_ext);
309 SvREFCNT_dec (req->self); 466 SvREFCNT_dec (req->self);
310 } 467 }
313 SvREFCNT_dec (req->fh); 470 SvREFCNT_dec (req->fh);
314 SvREFCNT_dec (req->fh2); 471 SvREFCNT_dec (req->fh2);
315 SvREFCNT_dec (req->callback); 472 SvREFCNT_dec (req->callback);
316 Safefree (req->statdata); 473 Safefree (req->statdata);
317 474
318 if (req->type == REQ_READDIR && req->result >= 0) 475 if (req->type == REQ_READDIR)
319 free (req->data2ptr); 476 free (req->data2ptr);
320 477
321 Safefree (req); 478 Safefree (req);
322} 479}
323 480
481static void req_cancel_subs (aio_req grp)
482{
483 aio_req sub;
484
485 if (grp->type != REQ_GROUP)
486 return;
487
488 SvREFCNT_dec (grp->fh2);
489 grp->fh2 = 0;
490
491 for (sub = grp->grp_first; sub; sub = sub->grp_next)
492 req_cancel (sub);
493}
494
324static void req_cancel (aio_req req) 495static void req_cancel (aio_req req)
325{ 496{
326 req->cancelled = 1; 497 req->flags |= FLAG_CANCELLED;
327 498
328 if (req->type == REQ_GROUP) 499 req_cancel_subs (req);
329 {
330 aio_req sub;
331
332 for (sub = req->grp_first; sub; sub = sub->grp_next)
333 req_cancel (sub);
334 }
335} 500}
336 501
337static int poll_cb () 502static int poll_cb (int max)
338{ 503{
339 dSP; 504 dSP;
340 int count = 0; 505 int count = 0;
341 int do_croak = 0; 506 int do_croak = 0;
342 aio_req req; 507 aio_req req;
343 508
344 for (;;) 509 for (;;)
345 { 510 {
346 pthread_mutex_lock (&reslock); 511 while (max <= 0 || count < max)
347 req = ress;
348
349 if (req)
350 { 512 {
351 ress = req->next; 513 LOCK (reslock);
514 req = reqq_shift (&res_queue);
352 515
353 if (!ress) 516 if (req)
354 { 517 {
518 if (!res_queue.size)
519 {
355 /* read any signals sent by the worker threads */ 520 /* read any signals sent by the worker threads */
356 char buf [32]; 521 char buf [32];
357 while (read (respipe [0], buf, 32) == 32) 522 while (read (respipe [0], buf, 32) == 32)
523 ;
358 ; 524 }
359
360 rese = 0;
361 } 525 }
526
527 UNLOCK (reslock);
528
529 if (!req)
530 break;
531
532 --nreqs;
533
534 if (req->type == REQ_QUIT)
535 --started;
536 else if (req->type == REQ_GROUP && req->length)
537 {
538 req->fd = 1; /* mark request as delayed */
539 continue;
540 }
541 else
542 {
543 if (req->type == REQ_READ)
544 SvCUR_set (req->data, req->dataoffset + (req->result > 0 ? req->result : 0));
545
546 if (req->data2ptr && (req->type == REQ_READ || req->type == REQ_WRITE))
547 SvREADONLY_off (req->data);
548
549 if (req->statdata)
550 {
551 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
552 PL_laststatval = req->result;
553 PL_statcache = *(req->statdata);
554 }
555
556 req_invoke (req);
557
558 count++;
559 }
560
561 req_free (req);
362 } 562 }
363 563
364 pthread_mutex_unlock (&reslock); 564 if (nreqs <= max_outstanding)
365
366 if (!req)
367 break; 565 break;
368 566
369 --nreqs; 567 poll_wait ();
370 568
371 if (req->type == REQ_QUIT) 569 max = 0;
372 started--;
373 else if (req->type == REQ_GROUP && req->length)
374 {
375 req->fd = 1; /* mark request as delayed */
376 continue;
377 }
378 else
379 {
380 if (req->type == REQ_READ)
381 SvCUR_set (req->data, req->dataoffset + (req->result > 0 ? req->result : 0));
382
383 if (req->data2ptr && (req->type == REQ_READ || req->type == REQ_WRITE))
384 SvREADONLY_off (req->data);
385
386 if (req->statdata)
387 {
388 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
389 PL_laststatval = req->result;
390 PL_statcache = *(req->statdata);
391 }
392
393 req_invoke (req);
394
395 count++;
396 }
397
398 req_free (req);
399 } 570 }
400 571
401 return count; 572 return count;
402} 573}
403 574
404static void *aio_proc(void *arg); 575static void *aio_proc(void *arg);
405 576
406static void start_thread (void) 577static void start_thread (void)
407{ 578{
408 sigset_t fullsigset, oldsigset; 579 sigset_t fullsigset, oldsigset;
409 pthread_t tid;
410 pthread_attr_t attr; 580 pthread_attr_t attr;
581
582 worker *wrk = calloc (1, sizeof (worker));
583
584 if (!wrk)
585 croak ("unable to allocate worker thread data");
411 586
412 pthread_attr_init (&attr); 587 pthread_attr_init (&attr);
413 pthread_attr_setstacksize (&attr, STACKSIZE); 588 pthread_attr_setstacksize (&attr, STACKSIZE);
414 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED); 589 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
415 590
416 sigfillset (&fullsigset); 591 sigfillset (&fullsigset);
592
593 LOCK (wrklock);
417 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset); 594 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
418 595
419 if (pthread_create (&tid, &attr, aio_proc, 0) == 0) 596 if (pthread_create (&wrk->tid, &attr, aio_proc, (void *)wrk) == 0)
597 {
598 wrk->prev = &wrk_first;
599 wrk->next = wrk_first.next;
600 wrk_first.next->prev = wrk;
601 wrk_first.next = wrk;
420 started++; 602 ++started;
603 }
604 else
605 free (wrk);
421 606
422 sigprocmask (SIG_SETMASK, &oldsigset, 0); 607 sigprocmask (SIG_SETMASK, &oldsigset, 0);
608 UNLOCK (wrklock);
423} 609}
424 610
425static void req_send (aio_req req) 611static void req_send (aio_req req)
426{ 612{
427 while (started < wanted && nreqs >= started) 613 while (started < wanted && nreqs >= started)
428 start_thread (); 614 start_thread ();
429 615
430 ++nreqs; 616 ++nreqs;
431 617
432 pthread_mutex_lock (&reqlock); 618 LOCK (reqlock);
433 619 reqq_push (&req_queue, req);
434 req->next = 0;
435
436 if (reqe)
437 {
438 reqe->next = req;
439 reqe = req;
440 }
441 else
442 reqe = reqs = req;
443
444 pthread_cond_signal (&reqwait); 620 pthread_cond_signal (&reqwait);
445 pthread_mutex_unlock (&reqlock); 621 UNLOCK (reqlock);
446
447 if (nreqs > max_outstanding)
448 for (;;)
449 {
450 poll_cb ();
451
452 if (nreqs <= max_outstanding)
453 break;
454
455 poll_wait ();
456 }
457} 622}
458 623
459static void end_thread (void) 624static void end_thread (void)
460{ 625{
461 aio_req req; 626 aio_req req;
627
462 Newz (0, req, 1, aio_cb); 628 Newz (0, req, 1, aio_cb);
629
463 req->type = REQ_QUIT; 630 req->type = REQ_QUIT;
631 req->pri = PRI_MAX + PRI_BIAS;
464 632
465 req_send (req); 633 req_send (req);
466} 634}
467 635
468static void min_parallel (int nthreads) 636static void min_parallel (int nthreads)
485 } 653 }
486 654
487 while (started > wanted) 655 while (started > wanted)
488 { 656 {
489 poll_wait (); 657 poll_wait ();
490 poll_cb (); 658 poll_cb (0);
491 } 659 }
492} 660}
493 661
494static void create_pipe () 662static void create_pipe ()
495{ 663{
520static ssize_t pread (int fd, void *buf, size_t count, off_t offset) 688static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
521{ 689{
522 ssize_t res; 690 ssize_t res;
523 off_t ooffset; 691 off_t ooffset;
524 692
525 pthread_mutex_lock (&preadwritelock); 693 LOCK (preadwritelock);
526 ooffset = lseek (fd, 0, SEEK_CUR); 694 ooffset = lseek (fd, 0, SEEK_CUR);
527 lseek (fd, offset, SEEK_SET); 695 lseek (fd, offset, SEEK_SET);
528 res = read (fd, buf, count); 696 res = read (fd, buf, count);
529 lseek (fd, ooffset, SEEK_SET); 697 lseek (fd, ooffset, SEEK_SET);
530 pthread_mutex_unlock (&preadwritelock); 698 UNLOCK (preadwritelock);
531 699
532 return res; 700 return res;
533} 701}
534 702
535static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset) 703static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
536{ 704{
537 ssize_t res; 705 ssize_t res;
538 off_t ooffset; 706 off_t ooffset;
539 707
540 pthread_mutex_lock (&preadwritelock); 708 LOCK (preadwritelock);
541 ooffset = lseek (fd, 0, SEEK_CUR); 709 ooffset = lseek (fd, 0, SEEK_CUR);
542 lseek (fd, offset, SEEK_SET); 710 lseek (fd, offset, SEEK_SET);
543 res = write (fd, buf, count); 711 res = write (fd, buf, count);
544 lseek (fd, offset, SEEK_SET); 712 lseek (fd, offset, SEEK_SET);
545 pthread_mutex_unlock (&preadwritelock); 713 UNLOCK (preadwritelock);
546 714
547 return res; 715 return res;
548} 716}
549#endif 717#endif
550 718
551#if !HAVE_FDATASYNC 719#if !HAVE_FDATASYNC
552# define fdatasync fsync 720# define fdatasync fsync
553#endif 721#endif
554 722
555#if !HAVE_READAHEAD 723#if !HAVE_READAHEAD
556# define readahead aio_readahead 724# define readahead(fd,offset,count) aio_readahead (fd, offset, count, self)
557 725
558static ssize_t readahead (int fd, off_t offset, size_t count) 726static ssize_t aio_readahead (int fd, off_t offset, size_t count, worker *self)
559{ 727{
560 char readahead_buf[4096]; 728 dBUF;
561 729
562 while (count > 0) 730 while (count > 0)
563 { 731 {
564 size_t len = count < sizeof (readahead_buf) ? count : sizeof (readahead_buf); 732 size_t len = count < AIO_BUFSIZE ? count : AIO_BUFSIZE;
565 733
566 pread (fd, readahead_buf, len, offset); 734 pread (fd, aio_buf, len, offset);
567 offset += len; 735 offset += len;
568 count -= len; 736 count -= len;
569 } 737 }
570 738
571 errno = 0; 739 errno = 0;
572} 740}
741
573#endif 742#endif
574 743
575#if !HAVE_READDIR_R 744#if !HAVE_READDIR_R
576# define readdir_r aio_readdir_r 745# define readdir_r aio_readdir_r
577 746
580static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res) 749static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res)
581{ 750{
582 struct dirent *e; 751 struct dirent *e;
583 int errorno; 752 int errorno;
584 753
585 pthread_mutex_lock (&readdirlock); 754 LOCK (readdirlock);
586 755
587 e = readdir (dirp); 756 e = readdir (dirp);
588 errorno = errno; 757 errorno = errno;
589 758
590 if (e) 759 if (e)
593 strcpy (ent->d_name, e->d_name); 762 strcpy (ent->d_name, e->d_name);
594 } 763 }
595 else 764 else
596 *res = 0; 765 *res = 0;
597 766
598 pthread_mutex_unlock (&readdirlock); 767 UNLOCK (readdirlock);
599 768
600 errno = errorno; 769 errno = errorno;
601 return e ? 0 : -1; 770 return e ? 0 : -1;
602} 771}
603#endif 772#endif
604 773
605/* sendfile always needs emulation */ 774/* sendfile always needs emulation */
606static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count) 775static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self)
607{ 776{
608 ssize_t res; 777 ssize_t res;
609 778
610 if (!count) 779 if (!count)
611 return 0; 780 return 0;
622 { 791 {
623 off_t sbytes; 792 off_t sbytes;
624 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0); 793 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0);
625 794
626 if (res < 0 && sbytes) 795 if (res < 0 && sbytes)
627 /* maybe only on EAGAIN only: as usual, the manpage leaves you guessing */ 796 /* maybe only on EAGAIN: as usual, the manpage leaves you guessing */
628 res = sbytes; 797 res = sbytes;
629 } 798 }
630 799
631# elif __hpux 800# elif __hpux
632 res = sendfile (ofd, ifd, offset, count, 0, 0); 801 res = sendfile (ofd, ifd, offset, count, 0, 0);
660#endif 829#endif
661 ) 830 )
662 ) 831 )
663 { 832 {
664 /* emulate sendfile. this is a major pain in the ass */ 833 /* emulate sendfile. this is a major pain in the ass */
665 char buf[4096]; 834 dBUF;
835
666 res = 0; 836 res = 0;
667 837
668 while (count) 838 while (count)
669 { 839 {
670 ssize_t cnt; 840 ssize_t cnt;
671 841
672 cnt = pread (ifd, buf, count > 4096 ? 4096 : count, offset); 842 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
673 843
674 if (cnt <= 0) 844 if (cnt <= 0)
675 { 845 {
676 if (cnt && !res) res = -1; 846 if (cnt && !res) res = -1;
677 break; 847 break;
678 } 848 }
679 849
680 cnt = write (ofd, buf, cnt); 850 cnt = write (ofd, aio_buf, cnt);
681 851
682 if (cnt <= 0) 852 if (cnt <= 0)
683 { 853 {
684 if (cnt && !res) res = -1; 854 if (cnt && !res) res = -1;
685 break; 855 break;
693 863
694 return res; 864 return res;
695} 865}
696 866
697/* read a full directory */ 867/* read a full directory */
698static int scandir_ (const char *path, void **namesp) 868static void scandir_ (aio_req req, worker *self)
699{ 869{
700 DIR *dirp = opendir (path); 870 DIR *dirp;
701 union 871 union
702 { 872 {
703 struct dirent d; 873 struct dirent d;
704 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1]; 874 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1];
705 } u; 875 } *u;
706 struct dirent *entp; 876 struct dirent *entp;
707 char *name, *names; 877 char *name, *names;
708 int memlen = 4096; 878 int memlen = 4096;
709 int memofs = 0; 879 int memofs = 0;
710 int res = 0; 880 int res = 0;
711 int errorno; 881 int errorno;
712 882
713 if (!dirp) 883 LOCK (wrklock);
714 return -1; 884 self->dirp = dirp = opendir (req->dataptr);
715 885 self->dbuf = u = malloc (sizeof (*u));
716 names = malloc (memlen); 886 req->data2ptr = names = malloc (memlen);
887 UNLOCK (wrklock);
717 888
889 if (dirp && u && names)
718 for (;;) 890 for (;;)
719 { 891 {
892 errno = 0;
720 errno = 0, readdir_r (dirp, &u.d, &entp); 893 readdir_r (dirp, &u->d, &entp);
721 894
722 if (!entp) 895 if (!entp)
723 break; 896 break;
724 897
725 name = entp->d_name; 898 name = entp->d_name;
726 899
727 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2]))) 900 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
728 { 901 {
729 int len = strlen (name) + 1; 902 int len = strlen (name) + 1;
730 903
731 res++; 904 res++;
732 905
733 while (memofs + len > memlen) 906 while (memofs + len > memlen)
734 { 907 {
735 memlen *= 2; 908 memlen *= 2;
909 LOCK (wrklock);
736 names = realloc (names, memlen); 910 req->data2ptr = names = realloc (names, memlen);
911 UNLOCK (wrklock);
912
737 if (!names) 913 if (!names)
738 break; 914 break;
739 } 915 }
740 916
741 memcpy (names + memofs, name, len); 917 memcpy (names + memofs, name, len);
742 memofs += len; 918 memofs += len;
743 } 919 }
744 } 920 }
745 921
746 errorno = errno;
747 closedir (dirp);
748
749 if (errorno) 922 if (errno)
750 {
751 free (names);
752 errno = errorno;
753 res = -1; 923 res = -1;
754 } 924
755 925 req->result = res;
756 *namesp = (void *)names;
757 return res;
758} 926}
759 927
760/*****************************************************************************/ 928/*****************************************************************************/
761 929
762static void *aio_proc (void *thr_arg) 930static void *aio_proc (void *thr_arg)
763{ 931{
764 aio_req req; 932 aio_req req;
765 int type; 933 int type;
934 worker *self = (worker *)thr_arg;
766 935
767 do 936 do
768 { 937 {
769 pthread_mutex_lock (&reqlock); 938 LOCK (reqlock);
770 939
771 for (;;) 940 for (;;)
772 { 941 {
773 req = reqs; 942 self->req = req = reqq_shift (&req_queue);
774
775 if (reqs)
776 {
777 reqs = reqs->next;
778 if (!reqs) reqe = 0;
779 }
780 943
781 if (req) 944 if (req)
782 break; 945 break;
783 946
784 pthread_cond_wait (&reqwait, &reqlock); 947 pthread_cond_wait (&reqwait, &reqlock);
785 } 948 }
786 949
787 pthread_mutex_unlock (&reqlock); 950 UNLOCK (reqlock);
788 951
789 errno = 0; /* strictly unnecessary */ 952 errno = 0; /* strictly unnecessary */
790
791 if (!req->cancelled)
792 switch (type = req->type) /* remember type for QUIT check */ 953 type = req->type; /* remember type for QUIT check */
954
955 if (!(req->flags & FLAG_CANCELLED))
956 switch (type)
793 { 957 {
794 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break; 958 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break;
795 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break; 959 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break;
796 960
797 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break; 961 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break;
798 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length); break; 962 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length, self); break;
799 963
800 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break; 964 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break;
801 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break; 965 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break;
802 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break; 966 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break;
803 967
809 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break; 973 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break;
810 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break; 974 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break;
811 975
812 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break; 976 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break;
813 case REQ_FSYNC: req->result = fsync (req->fd); break; 977 case REQ_FSYNC: req->result = fsync (req->fd); break;
814 case REQ_READDIR: req->result = scandir_ (req->dataptr, &req->data2ptr); break; 978 case REQ_READDIR: scandir_ (req, self); break;
815 979
816 case REQ_SLEEP: 980 case REQ_BUSY:
817 { 981 {
818 struct timeval tv; 982 struct timeval tv;
819 983
820 tv.tv_sec = req->fd; 984 tv.tv_sec = req->fd;
821 tv.tv_usec = req->fd2; 985 tv.tv_usec = req->fd2;
822 986
823 req->result = select (0, 0, 0, 0, &tv); 987 req->result = select (0, 0, 0, 0, &tv);
824 } 988 }
825 989
826 case REQ_GROUP: /*TODO: should not be handled here */ 990 case REQ_GROUP:
827 case REQ_NOP: /*TODO: should not be handled here */ 991 case REQ_NOP:
828 case REQ_QUIT: 992 case REQ_QUIT:
829 break; 993 break;
830 994
831 default: 995 default:
832 req->result = ENOSYS; 996 req->result = ENOSYS;
833 break; 997 break;
834 } 998 }
835 999
836 req->errorno = errno; 1000 req->errorno = errno;
837 1001
838 pthread_mutex_lock (&reslock); 1002 LOCK (reslock);
839 1003
840 req->next = 0; 1004 if (!reqq_push (&res_queue, req))
841
842 if (rese)
843 {
844 rese->next = req;
845 rese = req;
846 }
847 else
848 {
849 rese = ress = req;
850
851 /* write a dummy byte to the pipe so fh becomes ready */ 1005 /* write a dummy byte to the pipe so fh becomes ready */
852 write (respipe [1], &respipe, 1); 1006 write (respipe [1], &respipe, 1);
853 }
854 1007
855 pthread_mutex_unlock (&reslock); 1008 self->req = 0;
1009 worker_clear (self);
1010
1011 UNLOCK (reslock);
856 } 1012 }
857 while (type != REQ_QUIT); 1013 while (type != REQ_QUIT);
858 1014
1015 LOCK (wrklock);
1016 worker_free (self);
1017 UNLOCK (wrklock);
1018
859 return 0; 1019 return 0;
860} 1020}
861 1021
862/*****************************************************************************/ 1022/*****************************************************************************/
863 1023
864static void atfork_prepare (void) 1024static void atfork_prepare (void)
865{ 1025{
866 pthread_mutex_lock (&reqlock); 1026 LOCK (wrklock);
867 pthread_mutex_lock (&reslock); 1027 LOCK (reqlock);
1028 LOCK (reslock);
868#if !HAVE_PREADWRITE 1029#if !HAVE_PREADWRITE
869 pthread_mutex_lock (&preadwritelock); 1030 LOCK (preadwritelock);
870#endif 1031#endif
871#if !HAVE_READDIR_R 1032#if !HAVE_READDIR_R
872 pthread_mutex_lock (&readdirlock); 1033 LOCK (readdirlock);
873#endif 1034#endif
874} 1035}
875 1036
876static void atfork_parent (void) 1037static void atfork_parent (void)
877{ 1038{
878#if !HAVE_READDIR_R 1039#if !HAVE_READDIR_R
879 pthread_mutex_unlock (&readdirlock); 1040 UNLOCK (readdirlock);
880#endif 1041#endif
881#if !HAVE_PREADWRITE 1042#if !HAVE_PREADWRITE
882 pthread_mutex_unlock (&preadwritelock); 1043 UNLOCK (preadwritelock);
883#endif 1044#endif
884 pthread_mutex_unlock (&reslock); 1045 UNLOCK (reslock);
885 pthread_mutex_unlock (&reqlock); 1046 UNLOCK (reqlock);
1047 UNLOCK (wrklock);
886} 1048}
887 1049
888static void atfork_child (void) 1050static void atfork_child (void)
889{ 1051{
890 aio_req prv; 1052 aio_req prv;
891 1053
1054 while (prv = reqq_shift (&req_queue))
1055 req_free (prv);
1056
1057 while (prv = reqq_shift (&res_queue))
1058 req_free (prv);
1059
1060 while (wrk_first.next != &wrk_first)
1061 {
1062 worker *wrk = wrk_first.next;
1063
1064 if (wrk->req)
1065 req_free (wrk->req);
1066
1067 worker_clear (wrk);
1068 worker_free (wrk);
1069 }
1070
892 started = 0; 1071 started = 0;
893 1072 nreqs = 0;
894 while (reqs)
895 {
896 prv = reqs;
897 reqs = prv->next;
898 req_free (prv);
899 }
900
901 reqs = reqe = 0;
902
903 while (ress)
904 {
905 prv = ress;
906 ress = prv->next;
907 req_free (prv);
908 }
909
910 ress = rese = 0;
911 1073
912 close (respipe [0]); 1074 close (respipe [0]);
913 close (respipe [1]); 1075 close (respipe [1]);
914 create_pipe (); 1076 create_pipe ();
915 1077
916 atfork_parent (); 1078 atfork_parent ();
917} 1079}
918 1080
919#define dREQ \ 1081#define dREQ \
920 aio_req req; \ 1082 aio_req req; \
1083 int req_pri = next_pri; \
1084 next_pri = DEFAULT_PRI + PRI_BIAS; \
921 \ 1085 \
922 if (SvOK (callback) && !SvROK (callback)) \ 1086 if (SvOK (callback) && !SvROK (callback)) \
923 croak ("callback must be undef or of reference type"); \ 1087 croak ("callback must be undef or of reference type"); \
924 \ 1088 \
925 Newz (0, req, 1, aio_cb); \ 1089 Newz (0, req, 1, aio_cb); \
926 if (!req) \ 1090 if (!req) \
927 croak ("out of memory during aio_req allocation"); \ 1091 croak ("out of memory during aio_req allocation"); \
928 \ 1092 \
929 req->callback = newSVsv (callback) 1093 req->callback = newSVsv (callback); \
1094 req->pri = req_pri
930 1095
931#define REQ_SEND \ 1096#define REQ_SEND \
932 req_send (req); \ 1097 req_send (req); \
933 \ 1098 \
934 if (GIMME_V != G_VOID) \ 1099 if (GIMME_V != G_VOID) \
948 create_pipe (); 1113 create_pipe ();
949 pthread_atfork (atfork_prepare, atfork_parent, atfork_child); 1114 pthread_atfork (atfork_prepare, atfork_parent, atfork_child);
950} 1115}
951 1116
952void 1117void
953min_parallel (nthreads) 1118min_parallel (int nthreads)
954 int nthreads
955 PROTOTYPE: $ 1119 PROTOTYPE: $
956 1120
957void 1121void
958max_parallel (nthreads) 1122max_parallel (int nthreads)
959 int nthreads
960 PROTOTYPE: $ 1123 PROTOTYPE: $
961 1124
962int 1125int
963max_outstanding (nreqs) 1126max_outstanding (int maxreqs)
964 int nreqs 1127 PROTOTYPE: $
965 PROTOTYPE: $
966 CODE: 1128 CODE:
967 RETVAL = max_outstanding; 1129 RETVAL = max_outstanding;
968 max_outstanding = nreqs; 1130 max_outstanding = maxreqs;
1131 OUTPUT:
1132 RETVAL
969 1133
970void 1134void
971aio_open (pathname,flags,mode,callback=&PL_sv_undef) 1135aio_open (pathname,flags,mode,callback=&PL_sv_undef)
972 SV * pathname 1136 SV * pathname
973 int flags 1137 int flags
1188 1352
1189 REQ_SEND; 1353 REQ_SEND;
1190} 1354}
1191 1355
1192void 1356void
1193aio_sleep (delay,callback=&PL_sv_undef) 1357aio_busy (delay,callback=&PL_sv_undef)
1194 double delay 1358 double delay
1195 SV * callback 1359 SV * callback
1196 PPCODE: 1360 PPCODE:
1197{ 1361{
1198 dREQ; 1362 dREQ;
1199 1363
1200 req->type = REQ_SLEEP; 1364 req->type = REQ_BUSY;
1201 req->fd = delay < 0. ? 0 : delay; 1365 req->fd = delay < 0. ? 0 : delay;
1202 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd); 1366 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd);
1203 1367
1204 REQ_SEND; 1368 REQ_SEND;
1205} 1369}
1209 SV * callback 1373 SV * callback
1210 PROTOTYPE: ;$ 1374 PROTOTYPE: ;$
1211 PPCODE: 1375 PPCODE:
1212{ 1376{
1213 dREQ; 1377 dREQ;
1378
1214 req->type = REQ_GROUP; 1379 req->type = REQ_GROUP;
1215 req_send (req); 1380 req_send (req);
1381
1216 XPUSHs (req_sv (req, AIO_GRP_KLASS)); 1382 XPUSHs (req_sv (req, AIO_GRP_KLASS));
1217} 1383}
1218 1384
1219void 1385void
1220aio_nop (callback=&PL_sv_undef) 1386aio_nop (callback=&PL_sv_undef)
1225 1391
1226 req->type = REQ_NOP; 1392 req->type = REQ_NOP;
1227 1393
1228 REQ_SEND; 1394 REQ_SEND;
1229} 1395}
1396
1397void
1398aioreq_pri (int pri = DEFAULT_PRI)
1399 CODE:
1400 if (pri < PRI_MIN) pri = PRI_MIN;
1401 if (pri > PRI_MAX) pri = PRI_MAX;
1402 next_pri = pri + PRI_BIAS;
1403
1404void
1405aioreq_nice (int nice = 0)
1406 CODE:
1407 nice = next_pri - nice;
1408 if (nice < PRI_MIN) nice = PRI_MIN;
1409 if (nice > PRI_MAX) nice = PRI_MAX;
1410 next_pri = nice + PRI_BIAS;
1230 1411
1231void 1412void
1232flush () 1413flush ()
1233 PROTOTYPE: 1414 PROTOTYPE:
1234 CODE: 1415 CODE:
1235 while (nreqs) 1416 while (nreqs)
1236 { 1417 {
1237 poll_wait (); 1418 poll_wait ();
1238 poll_cb (); 1419 poll_cb (0);
1239 } 1420 }
1240 1421
1241void 1422void
1242poll() 1423poll()
1243 PROTOTYPE: 1424 PROTOTYPE:
1244 CODE: 1425 CODE:
1245 if (nreqs) 1426 if (nreqs)
1246 { 1427 {
1247 poll_wait (); 1428 poll_wait ();
1248 poll_cb (); 1429 poll_cb (0);
1249 } 1430 }
1250 1431
1251int 1432int
1252poll_fileno() 1433poll_fileno()
1253 PROTOTYPE: 1434 PROTOTYPE:
1258 1439
1259int 1440int
1260poll_cb(...) 1441poll_cb(...)
1261 PROTOTYPE: 1442 PROTOTYPE:
1262 CODE: 1443 CODE:
1263 RETVAL = poll_cb (); 1444 RETVAL = poll_cb (0);
1445 OUTPUT:
1446 RETVAL
1447
1448int
1449poll_some(int max = 0)
1450 PROTOTYPE: $
1451 CODE:
1452 RETVAL = poll_cb (max);
1264 OUTPUT: 1453 OUTPUT:
1265 RETVAL 1454 RETVAL
1266 1455
1267void 1456void
1268poll_wait() 1457poll_wait()
1283 1472
1284MODULE = IO::AIO PACKAGE = IO::AIO::REQ 1473MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1285 1474
1286void 1475void
1287cancel (aio_req_ornot req) 1476cancel (aio_req_ornot req)
1288 PROTOTYPE:
1289 CODE: 1477 CODE:
1290 req_cancel (req); 1478 req_cancel (req);
1479
1480void
1481cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1482 CODE:
1483 SvREFCNT_dec (req->callback);
1484 req->callback = newSVsv (callback);
1291 1485
1292MODULE = IO::AIO PACKAGE = IO::AIO::GRP 1486MODULE = IO::AIO PACKAGE = IO::AIO::GRP
1293 1487
1294void 1488void
1295add (aio_req grp, ...) 1489add (aio_req grp, ...)
1323 } 1517 }
1324 } 1518 }
1325} 1519}
1326 1520
1327void 1521void
1522cancel_subs (aio_req_ornot req)
1523 CODE:
1524 req_cancel_subs (req);
1525
1526void
1328result (aio_req grp, ...) 1527result (aio_req grp, ...)
1329 CODE: 1528 CODE:
1330{ 1529{
1331 int i; 1530 int i;
1332 AV *av = newAV (); 1531 AV *av = newAV ();
1337 SvREFCNT_dec (grp->data); 1536 SvREFCNT_dec (grp->data);
1338 grp->data = (SV *)av; 1537 grp->data = (SV *)av;
1339} 1538}
1340 1539
1341void 1540void
1342lock (aio_req grp)
1343 CODE:
1344 ++grp->length;
1345
1346void
1347unlock (aio_req grp)
1348 CODE:
1349 aio_grp_dec (grp);
1350
1351void
1352feeder_limit (aio_req grp, int limit) 1541limit (aio_req grp, int limit)
1353 CODE: 1542 CODE:
1354 grp->fd2 = limit; 1543 grp->fd2 = limit;
1355 aio_grp_feed (grp); 1544 aio_grp_feed (grp);
1356 1545
1357void 1546void
1358set_feeder (aio_req grp, SV *callback=&PL_sv_undef) 1547feed (aio_req grp, SV *callback=&PL_sv_undef)
1359 CODE: 1548 CODE:
1360{ 1549{
1361 SvREFCNT_dec (grp->fh2); 1550 SvREFCNT_dec (grp->fh2);
1362 grp->fh2 = newSVsv (callback); 1551 grp->fh2 = newSVsv (callback);
1363 1552

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines