ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
(Generate patch)

Comparing IO-AIO/AIO.xs (file contents):
Revision 1.43 by root, Sat Oct 21 23:06:04 2006 UTC vs.
Revision 1.75 by root, Thu Oct 26 06:44:48 2006 UTC

1/* solaris */
2#define _POSIX_PTHREAD_SEMANTICS 1
3
4#if __linux
5# define _GNU_SOURCE
6#endif
7
1#define _REENTRANT 1 8#define _REENTRANT 1
9
2#include <errno.h> 10#include <errno.h>
3 11
4#include "EXTERN.h" 12#include "EXTERN.h"
5#include "perl.h" 13#include "perl.h"
6#include "XSUB.h" 14#include "XSUB.h"
9 17
10#include <pthread.h> 18#include <pthread.h>
11 19
12#include <stddef.h> 20#include <stddef.h>
13#include <errno.h> 21#include <errno.h>
22#include <sys/time.h>
23#include <sys/select.h>
14#include <sys/types.h> 24#include <sys/types.h>
15#include <sys/stat.h> 25#include <sys/stat.h>
16#include <limits.h> 26#include <limits.h>
17#include <unistd.h> 27#include <unistd.h>
18#include <fcntl.h> 28#include <fcntl.h>
37/* used for struct dirent, AIX doesn't provide it */ 47/* used for struct dirent, AIX doesn't provide it */
38#ifndef NAME_MAX 48#ifndef NAME_MAX
39# define NAME_MAX 4096 49# define NAME_MAX 4096
40#endif 50#endif
41 51
52#ifndef PTHREAD_STACK_MIN
53/* care for broken platforms, e.g. windows */
54# define PTHREAD_STACK_MIN 16384
55#endif
56
42#if __ia64 57#if __ia64
43# define STACKSIZE 65536 58# define STACKSIZE 65536
59#elif __i386 || __x86_64 /* 16k is unreasonably high :( */
60# define STACKSIZE PTHREAD_STACK_MIN
44#else 61#else
45# define STACKSIZE 8192 62# define STACKSIZE 16384
46#endif 63#endif
64
65/* buffer size for various temporary buffers */
66#define AIO_BUFSIZE 65536
67
68#define dBUF \
69 char *aio_buf; \
70 LOCK (wrklock); \
71 self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \
72 UNLOCK (wrklock); \
73 if (!aio_buf) \
74 return -1;
47 75
48enum { 76enum {
49 REQ_QUIT, 77 REQ_QUIT,
50 REQ_OPEN, REQ_CLOSE, 78 REQ_OPEN, REQ_CLOSE,
51 REQ_READ, REQ_WRITE, REQ_READAHEAD, 79 REQ_READ, REQ_WRITE, REQ_READAHEAD,
53 REQ_STAT, REQ_LSTAT, REQ_FSTAT, 81 REQ_STAT, REQ_LSTAT, REQ_FSTAT,
54 REQ_FSYNC, REQ_FDATASYNC, 82 REQ_FSYNC, REQ_FDATASYNC,
55 REQ_UNLINK, REQ_RMDIR, REQ_RENAME, 83 REQ_UNLINK, REQ_RMDIR, REQ_RENAME,
56 REQ_READDIR, 84 REQ_READDIR,
57 REQ_LINK, REQ_SYMLINK, 85 REQ_LINK, REQ_SYMLINK,
86 REQ_GROUP, REQ_NOP,
87 REQ_BUSY,
58}; 88};
59 89
60#define AIO_CB_KLASS "IO::AIO::CB" 90#define AIO_REQ_KLASS "IO::AIO::REQ"
91#define AIO_GRP_KLASS "IO::AIO::GRP"
61 92
62typedef struct aio_cb 93typedef struct aio_cb
63{ 94{
64 struct aio_cb *grp_prev, *grp_next;
65 struct aio_grp *grp;
66
67 struct aio_cb *volatile next; 95 struct aio_cb *volatile next;
68
69 SV *self; /* the perl counterpart of this request, if any */
70 96
71 SV *data, *callback; 97 SV *data, *callback;
72 SV *fh, *fh2; 98 SV *fh, *fh2;
73 void *dataptr, *data2ptr; 99 void *dataptr, *data2ptr;
74 Stat_t *statdata; 100 Stat_t *statdata;
75 off_t offset; 101 off_t offset;
76 size_t length; 102 size_t length;
77 ssize_t result; 103 ssize_t result;
78 104
105 STRLEN dataoffset;
79 int type; 106 int type;
80 int fd, fd2; 107 int fd, fd2;
81 int errorno; 108 int errorno;
82 STRLEN dataoffset;
83 mode_t mode; /* open */ 109 mode_t mode; /* open */
110
84 unsigned char cancelled; 111 unsigned char flags;
112 unsigned char pri;
113
114 SV *self; /* the perl counterpart of this request, if any */
115 struct aio_cb *grp, *grp_prev, *grp_next, *grp_first;
85} aio_cb; 116} aio_cb;
117
118enum {
119 FLAG_CANCELLED = 0x01,
120};
86 121
87typedef aio_cb *aio_req; 122typedef aio_cb *aio_req;
88typedef aio_cb *aio_req_ornot; 123typedef aio_cb *aio_req_ornot;
89 124
125enum {
126 PRI_MIN = -4,
127 PRI_MAX = 4,
128
129 DEFAULT_PRI = 0,
130 PRI_BIAS = -PRI_MIN,
131 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
132};
133
134static int next_pri = DEFAULT_PRI + PRI_BIAS;
135
90static int started, wanted; 136static int started, wanted;
91static volatile int nreqs; 137static volatile int nreqs;
92static int max_outstanding = 1<<30;
93static int respipe [2]; 138static int respipe [2];
94 139
140#if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)
141# define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
142#else
143# define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
144#endif
145
146#define LOCK(mutex) pthread_mutex_lock (&(mutex))
147#define UNLOCK(mutex) pthread_mutex_unlock (&(mutex))
148
149/* worker threasd management */
150static pthread_mutex_t wrklock = AIO_MUTEX_INIT;
151
152typedef struct worker {
153 /* locked by wrklock */
154 struct worker *prev, *next;
155
156 pthread_t tid;
157
158 /* locked by reslock, reqlock or wrklock */
159 aio_req req; /* currently processed request */
160 void *dbuf;
161 DIR *dirp;
162} worker;
163
164static worker wrk_first = { &wrk_first, &wrk_first, 0 };
165
166static void worker_clear (worker *wrk)
167{
168 if (wrk->dirp)
169 {
170 closedir (wrk->dirp);
171 wrk->dirp = 0;
172 }
173
174 if (wrk->dbuf)
175 {
176 free (wrk->dbuf);
177 wrk->dbuf = 0;
178 }
179}
180
181static void worker_free (worker *wrk)
182{
183 wrk->next->prev = wrk->prev;
184 wrk->prev->next = wrk->next;
185
186 free (wrk);
187}
188
95static pthread_mutex_t reslock = PTHREAD_MUTEX_INITIALIZER; 189static pthread_mutex_t reslock = AIO_MUTEX_INIT;
96static pthread_mutex_t reqlock = PTHREAD_MUTEX_INITIALIZER; 190static pthread_mutex_t reqlock = AIO_MUTEX_INIT;
97static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER; 191static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER;
98 192
99static volatile aio_req reqs, reqe; /* queue start, queue end */ 193/*
100static volatile aio_req ress, rese; /* queue start, queue end */ 194 * a somewhat faster data structure might be nice, but
101 195 * with 8 priorities this actually needs <20 insns
196 * per shift, the most expensive operation.
197 */
102typedef struct aio_grp 198typedef struct {
103{ 199 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
104 struct aio_cb *first, *last; 200 int size;
105 SV *callback; 201} reqq;
106 int busycount;
107} aio_grp;
108 202
109static void aio_grp_begin (aio_grp *grp) 203static reqq req_queue;
110{ 204static reqq res_queue;
111 ++grp->busycount;
112}
113 205
114static void aio_grp_end (aio_grp *grp) 206int reqq_push (reqq *q, aio_req req)
115{ 207{
116 --grp->busycount; 208 int pri = req->pri;
209 req->next = 0;
117 210
118 if (grp->busycount) 211 if (q->qe[pri])
212 {
213 q->qe[pri]->next = req;
214 q->qe[pri] = req;
215 }
216 else
217 q->qe[pri] = q->qs[pri] = req;
218
219 return q->size++;
220}
221
222aio_req reqq_shift (reqq *q)
223{
224 int pri;
225
226 if (!q->size)
119 return; 227 return 0;
120 228
121 SvREFCNT_dec (grp->callback); 229 --q->size;
122 grp->callback = 0;
123}
124 230
125static aio_grp *aio_grp_new () 231 for (pri = NUM_PRI; pri--; )
126{ 232 {
127 aio_grp *grp; 233 aio_req req = q->qs[pri];
128 234
129 Newz (0, grp, 1, aio_grp); 235 if (req)
130 aio_grp_begin (grp); 236 {
237 if (!(q->qs[pri] = req->next))
238 q->qe[pri] = 0;
131 239
132 return grp; 240 return req;
241 }
242 }
243
244 abort ();
133} 245}
246
247static int poll_cb ();
248static void req_invoke (aio_req req);
249static void req_free (aio_req req);
250static void req_cancel (aio_req req);
134 251
135/* must be called at most once */ 252/* must be called at most once */
136static SV *req_sv (aio_req req) 253static SV *req_sv (aio_req req, const char *klass)
137{ 254{
255 if (!req->self)
256 {
138 req->self = (SV *)newHV (); 257 req->self = (SV *)newHV ();
139 sv_magic (req->self, 0, PERL_MAGIC_ext, (char *)req, 0); 258 sv_magic (req->self, 0, PERL_MAGIC_ext, (char *)req, 0);
259 }
140 260
141 return sv_bless (newRV_noinc (req->self), gv_stashpv (AIO_CB_KLASS, 1)); 261 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1)));
142} 262}
143 263
144static aio_req SvAIO_REQ (SV *sv) 264static aio_req SvAIO_REQ (SV *sv)
145{ 265{
266 MAGIC *mg;
267
146 if (!sv_derived_from (sv, AIO_CB_KLASS) || !SvROK (sv)) 268 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv))
147 croak ("object of class " AIO_CB_KLASS " expected"); 269 croak ("object of class " AIO_REQ_KLASS " expected");
148 270
149 MAGIC *mg = mg_find (SvRV (sv), PERL_MAGIC_ext); 271 mg = mg_find (SvRV (sv), PERL_MAGIC_ext);
150 272
151 return mg ? (aio_req)mg->mg_ptr : 0; 273 return mg ? (aio_req)mg->mg_ptr : 0;
274}
275
276static void aio_grp_feed (aio_req grp)
277{
278 while (grp->length < grp->fd2 && !(grp->flags & FLAG_CANCELLED))
279 {
280 int old_len = grp->length;
281
282 if (grp->fh2 && SvOK (grp->fh2))
283 {
284 dSP;
285
286 ENTER;
287 SAVETMPS;
288 PUSHMARK (SP);
289 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
290 PUTBACK;
291 call_sv (grp->fh2, G_VOID | G_EVAL | G_KEEPERR);
292 SPAGAIN;
293 FREETMPS;
294 LEAVE;
295 }
296
297 /* stop if no progress has been made */
298 if (old_len == grp->length)
299 {
300 SvREFCNT_dec (grp->fh2);
301 grp->fh2 = 0;
302 break;
303 }
304 }
305}
306
307static void aio_grp_dec (aio_req grp)
308{
309 --grp->length;
310
311 /* call feeder, if applicable */
312 aio_grp_feed (grp);
313
314 /* finish, if done */
315 if (!grp->length && grp->fd)
316 {
317 req_invoke (grp);
318 req_free (grp);
319 }
320}
321
322static void poll_wait ()
323{
324 fd_set rfd;
325
326 while (nreqs)
327 {
328 int size;
329#if !(__i386 || __x86_64) /* safe without sempahore on these archs */
330 LOCK (reslock);
331#endif
332 size = res_queue.size;
333#if !(__i386 || __x86_64) /* safe without sempahore on these archs */
334 UNLOCK (reslock);
335#endif
336
337 if (size)
338 return;
339
340 FD_ZERO(&rfd);
341 FD_SET(respipe [0], &rfd);
342
343 select (respipe [0] + 1, &rfd, 0, 0, 0);
344 }
345}
346
347static void req_invoke (aio_req req)
348{
349 dSP;
350
351 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
352 {
353 errno = req->errorno;
354
355 ENTER;
356 SAVETMPS;
357 PUSHMARK (SP);
358 EXTEND (SP, 1);
359
360 switch (req->type)
361 {
362 case REQ_READDIR:
363 {
364 SV *rv = &PL_sv_undef;
365
366 if (req->result >= 0)
367 {
368 int i;
369 char *buf = req->data2ptr;
370 AV *av = newAV ();
371
372 av_extend (av, req->result - 1);
373
374 for (i = 0; i < req->result; ++i)
375 {
376 SV *sv = newSVpv (buf, 0);
377
378 av_store (av, i, sv);
379 buf += SvCUR (sv) + 1;
380 }
381
382 rv = sv_2mortal (newRV_noinc ((SV *)av));
383 }
384
385 PUSHs (rv);
386 }
387 break;
388
389 case REQ_OPEN:
390 {
391 /* convert fd to fh */
392 SV *fh;
393
394 PUSHs (sv_2mortal (newSViv (req->result)));
395 PUTBACK;
396 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
397 SPAGAIN;
398
399 fh = SvREFCNT_inc (POPs);
400
401 PUSHMARK (SP);
402 XPUSHs (sv_2mortal (fh));
403 }
404 break;
405
406 case REQ_GROUP:
407 req->fd = 2; /* mark group as finished */
408
409 if (req->data)
410 {
411 int i;
412 AV *av = (AV *)req->data;
413
414 EXTEND (SP, AvFILL (av) + 1);
415 for (i = 0; i <= AvFILL (av); ++i)
416 PUSHs (*av_fetch (av, i, 0));
417 }
418 break;
419
420 case REQ_NOP:
421 case REQ_BUSY:
422 break;
423
424 default:
425 PUSHs (sv_2mortal (newSViv (req->result)));
426 break;
427 }
428
429
430 PUTBACK;
431 call_sv (req->callback, G_VOID | G_EVAL);
432 SPAGAIN;
433
434 FREETMPS;
435 LEAVE;
436 }
437
438 if (req->grp)
439 {
440 aio_req grp = req->grp;
441
442 /* unlink request */
443 if (req->grp_next) req->grp_next->grp_prev = req->grp_prev;
444 if (req->grp_prev) req->grp_prev->grp_next = req->grp_next;
445
446 if (grp->grp_first == req)
447 grp->grp_first = req->grp_next;
448
449 aio_grp_dec (grp);
450 }
451
452 if (SvTRUE (ERRSV))
453 {
454 req_free (req);
455 croak (0);
456 }
152} 457}
153 458
154static void req_free (aio_req req) 459static void req_free (aio_req req)
155{ 460{
156 if (req->self) 461 if (req->self)
157 { 462 {
158 sv_unmagic (req->self, PERL_MAGIC_ext); 463 sv_unmagic (req->self, PERL_MAGIC_ext);
159 SvREFCNT_dec (req->self); 464 SvREFCNT_dec (req->self);
160 } 465 }
161 466
162 if (req->data)
163 SvREFCNT_dec (req->data); 467 SvREFCNT_dec (req->data);
164
165 if (req->fh)
166 SvREFCNT_dec (req->fh); 468 SvREFCNT_dec (req->fh);
167
168 if (req->fh2)
169 SvREFCNT_dec (req->fh2); 469 SvREFCNT_dec (req->fh2);
170
171 if (req->statdata)
172 Safefree (req->statdata);
173
174 if (req->callback)
175 SvREFCNT_dec (req->callback); 470 SvREFCNT_dec (req->callback);
471 Safefree (req->statdata);
176 472
177 if (req->type == REQ_READDIR && req->result >= 0) 473 if (req->type == REQ_READDIR)
178 free (req->data2ptr); 474 free (req->data2ptr);
179 475
180 Safefree (req); 476 Safefree (req);
181} 477}
182 478
183static void 479static void req_cancel_subs (aio_req grp)
184poll_wait ()
185{ 480{
186 if (nreqs && !ress) 481 aio_req sub;
187 {
188 fd_set rfd;
189 FD_ZERO(&rfd);
190 FD_SET(respipe [0], &rfd);
191 482
192 select (respipe [0] + 1, &rfd, 0, 0, 0); 483 if (grp->type != REQ_GROUP)
193 } 484 return;
194}
195 485
196static int 486 SvREFCNT_dec (grp->fh2);
197poll_cb () 487 grp->fh2 = 0;
488
489 for (sub = grp->grp_first; sub; sub = sub->grp_next)
490 req_cancel (sub);
491}
492
493static void req_cancel (aio_req req)
494{
495 req->flags |= FLAG_CANCELLED;
496
497 req_cancel_subs (req);
498}
499
500static int poll_cb ()
198{ 501{
199 dSP; 502 dSP;
200 int count = 0; 503 int count = 0;
201 int do_croak = 0; 504 int do_croak = 0;
202 aio_req req; 505 aio_req req;
203 506
204 for (;;) 507 for (;;)
205 { 508 {
206 pthread_mutex_lock (&reslock); 509 LOCK (reslock);
207 req = ress; 510 req = reqq_shift (&res_queue);
208 511
209 if (req) 512 if (req)
210 { 513 {
211 ress = req->next;
212
213 if (!ress) 514 if (!res_queue.size)
214 { 515 {
215 /* read any signals sent by the worker threads */ 516 /* read any signals sent by the worker threads */
216 char buf [32]; 517 char buf [32];
217 while (read (respipe [0], buf, 32) == 32) 518 while (read (respipe [0], buf, 32) == 32)
218 ; 519 ;
219
220 rese = 0;
221 } 520 }
222 } 521 }
223 522
224 pthread_mutex_unlock (&reslock); 523 UNLOCK (reslock);
225 524
226 if (!req) 525 if (!req)
227 break; 526 break;
228 527
229 nreqs--; 528 --nreqs;
230 529
231 if (req->type == REQ_QUIT) 530 if (req->type == REQ_QUIT)
232 started--; 531 started--;
532 else if (req->type == REQ_GROUP && req->length)
533 {
534 req->fd = 1; /* mark request as delayed */
535 continue;
536 }
233 else 537 else
234 { 538 {
235 int errorno = errno;
236 errno = req->errorno;
237
238 if (req->type == REQ_READ) 539 if (req->type == REQ_READ)
239 SvCUR_set (req->data, req->dataoffset + (req->result > 0 ? req->result : 0)); 540 SvCUR_set (req->data, req->dataoffset + (req->result > 0 ? req->result : 0));
240 541
241 if (req->data2ptr && (req->type == REQ_READ || req->type == REQ_WRITE)) 542 if (req->data2ptr && (req->type == REQ_READ || req->type == REQ_WRITE))
242 SvREADONLY_off (req->data); 543 SvREADONLY_off (req->data);
246 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT; 547 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
247 PL_laststatval = req->result; 548 PL_laststatval = req->result;
248 PL_statcache = *(req->statdata); 549 PL_statcache = *(req->statdata);
249 } 550 }
250 551
251 ENTER; 552 req_invoke (req);
252 PUSHMARK (SP);
253 553
254 if (req->type == REQ_READDIR)
255 {
256 SV *rv = &PL_sv_undef;
257
258 if (req->result >= 0)
259 {
260 char *buf = req->data2ptr;
261 AV *av = newAV ();
262
263 while (req->result)
264 {
265 SV *sv = newSVpv (buf, 0);
266
267 av_push (av, sv);
268 buf += SvCUR (sv) + 1;
269 req->result--;
270 }
271
272 rv = sv_2mortal (newRV_noinc ((SV *)av));
273 }
274
275 XPUSHs (rv);
276 }
277 else
278 {
279 XPUSHs (sv_2mortal (newSViv (req->result)));
280
281 if (req->type == REQ_OPEN)
282 {
283 /* convert fd to fh */
284 SV *fh;
285
286 PUTBACK;
287 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
288 SPAGAIN;
289
290 fh = SvREFCNT_inc (POPs);
291
292 PUSHMARK (SP);
293 XPUSHs (sv_2mortal (fh));
294 }
295 }
296
297 if (SvOK (req->callback) && !req->cancelled)
298 {
299 PUTBACK;
300 call_sv (req->callback, G_VOID | G_EVAL);
301 SPAGAIN;
302
303 if (SvTRUE (ERRSV))
304 {
305 req_free (req);
306 croak (0);
307 }
308 }
309
310 LEAVE;
311
312 errno = errorno;
313 count++; 554 count++;
314 } 555 }
315 556
316 req_free (req); 557 req_free (req);
317 } 558 }
319 return count; 560 return count;
320} 561}
321 562
322static void *aio_proc(void *arg); 563static void *aio_proc(void *arg);
323 564
324static void
325start_thread (void) 565static void start_thread (void)
326{ 566{
327 sigset_t fullsigset, oldsigset; 567 sigset_t fullsigset, oldsigset;
328 pthread_t tid;
329 pthread_attr_t attr; 568 pthread_attr_t attr;
569
570 worker *wrk = calloc (1, sizeof (worker));
571
572 if (!wrk)
573 croak ("unable to allocate worker thread data");
330 574
331 pthread_attr_init (&attr); 575 pthread_attr_init (&attr);
332 pthread_attr_setstacksize (&attr, STACKSIZE); 576 pthread_attr_setstacksize (&attr, STACKSIZE);
333 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED); 577 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
334 578
335 sigfillset (&fullsigset); 579 sigfillset (&fullsigset);
580
581 LOCK (wrklock);
336 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset); 582 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
337 583
338 if (pthread_create (&tid, &attr, aio_proc, 0) == 0) 584 if (pthread_create (&wrk->tid, &attr, aio_proc, (void *)wrk) == 0)
585 {
586 wrk->prev = &wrk_first;
587 wrk->next = wrk_first.next;
588 wrk_first.next->prev = wrk;
589 wrk_first.next = wrk;
339 started++; 590 started++;
591 }
592 else
593 free (wrk);
340 594
341 sigprocmask (SIG_SETMASK, &oldsigset, 0); 595 sigprocmask (SIG_SETMASK, &oldsigset, 0);
596 UNLOCK (wrklock);
342} 597}
343 598
344static void
345req_send (aio_req req) 599static void req_send (aio_req req)
346{ 600{
347 while (started < wanted && nreqs >= started) 601 while (started < wanted && nreqs >= started)
348 start_thread (); 602 start_thread ();
349 603
350 nreqs++; 604 ++nreqs;
351 605
352 pthread_mutex_lock (&reqlock); 606 LOCK (reqlock);
353 607 reqq_push (&req_queue, req);
354 req->next = 0;
355
356 if (reqe)
357 {
358 reqe->next = req;
359 reqe = req;
360 }
361 else
362 reqe = reqs = req;
363
364 pthread_cond_signal (&reqwait); 608 pthread_cond_signal (&reqwait);
365 pthread_mutex_unlock (&reqlock); 609 UNLOCK (reqlock);
366
367 if (nreqs > max_outstanding)
368 for (;;)
369 {
370 poll_cb ();
371
372 if (nreqs <= max_outstanding)
373 break;
374
375 poll_wait ();
376 }
377} 610}
378 611
379static void 612static void end_thread (void)
380end_thread (void)
381{ 613{
382 aio_req req; 614 aio_req req;
615
383 Newz (0, req, 1, aio_cb); 616 Newz (0, req, 1, aio_cb);
617
384 req->type = REQ_QUIT; 618 req->type = REQ_QUIT;
619 req->pri = PRI_MAX + PRI_BIAS;
385 620
386 req_send (req); 621 req_send (req);
387} 622}
388 623
389static void min_parallel (int nthreads) 624static void min_parallel (int nthreads)
436 * normal read/write by using a mutex. slows down execution a lot, 671 * normal read/write by using a mutex. slows down execution a lot,
437 * but that's your problem, not mine. 672 * but that's your problem, not mine.
438 */ 673 */
439static pthread_mutex_t preadwritelock = PTHREAD_MUTEX_INITIALIZER; 674static pthread_mutex_t preadwritelock = PTHREAD_MUTEX_INITIALIZER;
440 675
441static ssize_t 676static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
442pread (int fd, void *buf, size_t count, off_t offset)
443{ 677{
444 ssize_t res; 678 ssize_t res;
445 off_t ooffset; 679 off_t ooffset;
446 680
447 pthread_mutex_lock (&preadwritelock); 681 LOCK (preadwritelock);
448 ooffset = lseek (fd, 0, SEEK_CUR); 682 ooffset = lseek (fd, 0, SEEK_CUR);
449 lseek (fd, offset, SEEK_SET); 683 lseek (fd, offset, SEEK_SET);
450 res = read (fd, buf, count); 684 res = read (fd, buf, count);
451 lseek (fd, ooffset, SEEK_SET); 685 lseek (fd, ooffset, SEEK_SET);
452 pthread_mutex_unlock (&preadwritelock); 686 UNLOCK (preadwritelock);
453 687
454 return res; 688 return res;
455} 689}
456 690
457static ssize_t
458pwrite (int fd, void *buf, size_t count, off_t offset) 691static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
459{ 692{
460 ssize_t res; 693 ssize_t res;
461 off_t ooffset; 694 off_t ooffset;
462 695
463 pthread_mutex_lock (&preadwritelock); 696 LOCK (preadwritelock);
464 ooffset = lseek (fd, 0, SEEK_CUR); 697 ooffset = lseek (fd, 0, SEEK_CUR);
465 lseek (fd, offset, SEEK_SET); 698 lseek (fd, offset, SEEK_SET);
466 res = write (fd, buf, count); 699 res = write (fd, buf, count);
467 lseek (fd, offset, SEEK_SET); 700 lseek (fd, offset, SEEK_SET);
468 pthread_mutex_unlock (&preadwritelock); 701 UNLOCK (preadwritelock);
469 702
470 return res; 703 return res;
471} 704}
472#endif 705#endif
473 706
474#if !HAVE_FDATASYNC 707#if !HAVE_FDATASYNC
475# define fdatasync fsync 708# define fdatasync fsync
476#endif 709#endif
477 710
478#if !HAVE_READAHEAD 711#if !HAVE_READAHEAD
479# define readahead aio_readahead 712# define readahead(fd,offset,count) aio_readahead (fd, offset, count, self)
480 713
481static ssize_t 714static ssize_t aio_readahead (int fd, off_t offset, size_t count, worker *self)
482readahead (int fd, off_t offset, size_t count)
483{ 715{
484 char readahead_buf[4096]; 716 dBUF;
485 717
486 while (count > 0) 718 while (count > 0)
487 { 719 {
488 size_t len = count < sizeof (readahead_buf) ? count : sizeof (readahead_buf); 720 size_t len = count < AIO_BUFSIZE ? count : AIO_BUFSIZE;
489 721
490 pread (fd, readahead_buf, len, offset); 722 pread (fd, aio_buf, len, offset);
491 offset += len; 723 offset += len;
492 count -= len; 724 count -= len;
493 } 725 }
494 726
495 errno = 0; 727 errno = 0;
496} 728}
729
497#endif 730#endif
498 731
499#if !HAVE_READDIR_R 732#if !HAVE_READDIR_R
500# define readdir_r aio_readdir_r 733# define readdir_r aio_readdir_r
501 734
502static pthread_mutex_t readdirlock = PTHREAD_MUTEX_INITIALIZER; 735static pthread_mutex_t readdirlock = PTHREAD_MUTEX_INITIALIZER;
503 736
504static int
505readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res) 737static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res)
506{ 738{
507 struct dirent *e; 739 struct dirent *e;
508 int errorno; 740 int errorno;
509 741
510 pthread_mutex_lock (&readdirlock); 742 LOCK (readdirlock);
511 743
512 e = readdir (dirp); 744 e = readdir (dirp);
513 errorno = errno; 745 errorno = errno;
514 746
515 if (e) 747 if (e)
518 strcpy (ent->d_name, e->d_name); 750 strcpy (ent->d_name, e->d_name);
519 } 751 }
520 else 752 else
521 *res = 0; 753 *res = 0;
522 754
523 pthread_mutex_unlock (&readdirlock); 755 UNLOCK (readdirlock);
524 756
525 errno = errorno; 757 errno = errorno;
526 return e ? 0 : -1; 758 return e ? 0 : -1;
527} 759}
528#endif 760#endif
529 761
530/* sendfile always needs emulation */ 762/* sendfile always needs emulation */
531static ssize_t
532sendfile_ (int ofd, int ifd, off_t offset, size_t count) 763static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self)
533{ 764{
534 ssize_t res; 765 ssize_t res;
535 766
536 if (!count) 767 if (!count)
537 return 0; 768 return 0;
586#endif 817#endif
587 ) 818 )
588 ) 819 )
589 { 820 {
590 /* emulate sendfile. this is a major pain in the ass */ 821 /* emulate sendfile. this is a major pain in the ass */
591 char buf[4096]; 822 dBUF;
823
592 res = 0; 824 res = 0;
593 825
594 while (count) 826 while (count)
595 { 827 {
596 ssize_t cnt; 828 ssize_t cnt;
597 829
598 cnt = pread (ifd, buf, count > 4096 ? 4096 : count, offset); 830 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
599 831
600 if (cnt <= 0) 832 if (cnt <= 0)
601 { 833 {
602 if (cnt && !res) res = -1; 834 if (cnt && !res) res = -1;
603 break; 835 break;
604 } 836 }
605 837
606 cnt = write (ofd, buf, cnt); 838 cnt = write (ofd, aio_buf, cnt);
607 839
608 if (cnt <= 0) 840 if (cnt <= 0)
609 { 841 {
610 if (cnt && !res) res = -1; 842 if (cnt && !res) res = -1;
611 break; 843 break;
619 851
620 return res; 852 return res;
621} 853}
622 854
623/* read a full directory */ 855/* read a full directory */
624static int 856static void scandir_ (aio_req req, worker *self)
625scandir_ (const char *path, void **namesp)
626{ 857{
627 DIR *dirp = opendir (path); 858 DIR *dirp;
628 union 859 union
629 { 860 {
630 struct dirent d; 861 struct dirent d;
631 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1]; 862 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1];
632 } u; 863 } *u;
633 struct dirent *entp; 864 struct dirent *entp;
634 char *name, *names; 865 char *name, *names;
635 int memlen = 4096; 866 int memlen = 4096;
636 int memofs = 0; 867 int memofs = 0;
637 int res = 0; 868 int res = 0;
638 int errorno; 869 int errorno;
639 870
640 if (!dirp) 871 LOCK (wrklock);
641 return -1; 872 self->dirp = dirp = opendir (req->dataptr);
873 self->dbuf = u = malloc (sizeof (*u));
874 UNLOCK (wrklock);
642 875
643 names = malloc (memlen); 876 req->data2ptr = names = malloc (memlen);
644 877
878 if (dirp && u && names)
645 for (;;) 879 for (;;)
646 { 880 {
881 errno = 0;
647 errno = 0, readdir_r (dirp, &u.d, &entp); 882 readdir_r (dirp, &u->d, &entp);
648 883
649 if (!entp) 884 if (!entp)
650 break; 885 break;
651 886
652 name = entp->d_name; 887 name = entp->d_name;
653 888
654 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2]))) 889 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
655 { 890 {
656 int len = strlen (name) + 1; 891 int len = strlen (name) + 1;
657 892
658 res++; 893 res++;
659 894
660 while (memofs + len > memlen) 895 while (memofs + len > memlen)
661 { 896 {
662 memlen *= 2; 897 memlen *= 2;
898 LOCK (wrklock);
663 names = realloc (names, memlen); 899 req->data2ptr = names = realloc (names, memlen);
900 UNLOCK (wrklock);
901
664 if (!names) 902 if (!names)
665 break; 903 break;
666 } 904 }
667 905
668 memcpy (names + memofs, name, len); 906 memcpy (names + memofs, name, len);
669 memofs += len; 907 memofs += len;
670 } 908 }
671 } 909 }
672 910
673 errorno = errno;
674 closedir (dirp);
675
676 if (errorno) 911 if (errno)
677 {
678 free (names);
679 errno = errorno;
680 res = -1; 912 res = -1;
681 } 913
682 914 req->result = res;
683 *namesp = (void *)names;
684 return res;
685} 915}
686 916
687/*****************************************************************************/ 917/*****************************************************************************/
688 918
689static void *
690aio_proc (void *thr_arg) 919static void *aio_proc (void *thr_arg)
691{ 920{
692 aio_req req; 921 aio_req req;
693 int type; 922 int type;
923 worker *self = (worker *)thr_arg;
694 924
695 do 925 do
696 { 926 {
697 pthread_mutex_lock (&reqlock); 927 LOCK (reqlock);
698 928
699 for (;;) 929 for (;;)
700 { 930 {
701 req = reqs; 931 self->req = req = reqq_shift (&req_queue);
702
703 if (reqs)
704 {
705 reqs = reqs->next;
706 if (!reqs) reqe = 0;
707 }
708 932
709 if (req) 933 if (req)
710 break; 934 break;
711 935
712 pthread_cond_wait (&reqwait, &reqlock); 936 pthread_cond_wait (&reqwait, &reqlock);
713 } 937 }
714 938
715 pthread_mutex_unlock (&reqlock); 939 UNLOCK (reqlock);
716 940
717 errno = 0; /* strictly unnecessary */ 941 errno = 0; /* strictly unnecessary */
942 type = req->type; /* remember type for QUIT check */
718 943
719 if (!req->cancelled) 944 if (!(req->flags & FLAG_CANCELLED))
720 switch (req->type) 945 switch (type)
721 { 946 {
722 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break; 947 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break;
723 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break; 948 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break;
724 949
725 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break; 950 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break;
726 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length); break; 951 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length, self); break;
727 952
728 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break; 953 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break;
729 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break; 954 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break;
730 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break; 955 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break;
731 956
737 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break; 962 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break;
738 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break; 963 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break;
739 964
740 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break; 965 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break;
741 case REQ_FSYNC: req->result = fsync (req->fd); break; 966 case REQ_FSYNC: req->result = fsync (req->fd); break;
742 case REQ_READDIR: req->result = scandir_ (req->dataptr, &req->data2ptr); break; 967 case REQ_READDIR: scandir_ (req, self); break;
743 968
969 case REQ_BUSY:
970 {
971 struct timeval tv;
972
973 tv.tv_sec = req->fd;
974 tv.tv_usec = req->fd2;
975
976 req->result = select (0, 0, 0, 0, &tv);
977 }
978
979 case REQ_GROUP:
980 case REQ_NOP:
744 case REQ_QUIT: 981 case REQ_QUIT:
745 break; 982 break;
746 983
747 default: 984 default:
748 req->result = ENOSYS; 985 req->result = ENOSYS;
749 break; 986 break;
750 } 987 }
751 988
752 req->errorno = errno; 989 req->errorno = errno;
753 990
754 pthread_mutex_lock (&reslock); 991 LOCK (reslock);
755 992
756 req->next = 0; 993 if (!reqq_push (&res_queue, req))
757
758 if (rese)
759 {
760 rese->next = req;
761 rese = req;
762 }
763 else
764 {
765 rese = ress = req;
766
767 /* write a dummy byte to the pipe so fh becomes ready */ 994 /* write a dummy byte to the pipe so fh becomes ready */
768 write (respipe [1], &respipe, 1); 995 write (respipe [1], &respipe, 1);
769 }
770 996
771 pthread_mutex_unlock (&reslock); 997 self->req = 0;
998 worker_clear (self);
999
1000 UNLOCK (reslock);
772 } 1001 }
773 while (type != REQ_QUIT); 1002 while (type != REQ_QUIT);
774 1003
1004 LOCK (wrklock);
1005 worker_free (self);
1006 UNLOCK (wrklock);
1007
775 return 0; 1008 return 0;
776} 1009}
777 1010
778/*****************************************************************************/ 1011/*****************************************************************************/
779 1012
780static void atfork_prepare (void) 1013static void atfork_prepare (void)
781{ 1014{
782 pthread_mutex_lock (&reqlock); 1015 LOCK (wrklock);
783 pthread_mutex_lock (&reslock); 1016 LOCK (reqlock);
1017 LOCK (reslock);
784#if !HAVE_PREADWRITE 1018#if !HAVE_PREADWRITE
785 pthread_mutex_lock (&preadwritelock); 1019 LOCK (preadwritelock);
786#endif 1020#endif
787#if !HAVE_READDIR_R 1021#if !HAVE_READDIR_R
788 pthread_mutex_lock (&readdirlock); 1022 LOCK (readdirlock);
789#endif 1023#endif
790} 1024}
791 1025
792static void atfork_parent (void) 1026static void atfork_parent (void)
793{ 1027{
794#if !HAVE_READDIR_R 1028#if !HAVE_READDIR_R
795 pthread_mutex_unlock (&readdirlock); 1029 UNLOCK (readdirlock);
796#endif 1030#endif
797#if !HAVE_PREADWRITE 1031#if !HAVE_PREADWRITE
798 pthread_mutex_unlock (&preadwritelock); 1032 UNLOCK (preadwritelock);
799#endif 1033#endif
800 pthread_mutex_unlock (&reslock); 1034 UNLOCK (reslock);
801 pthread_mutex_unlock (&reqlock); 1035 UNLOCK (reqlock);
1036 UNLOCK (wrklock);
802} 1037}
803 1038
804static void atfork_child (void) 1039static void atfork_child (void)
805{ 1040{
806 aio_req prv; 1041 aio_req prv;
807 1042
1043 while (prv = reqq_shift (&req_queue))
1044 req_free (prv);
1045
1046 while (prv = reqq_shift (&res_queue))
1047 req_free (prv);
1048
1049 while (wrk_first.next != &wrk_first)
1050 {
1051 worker *wrk = wrk_first.next;
1052
1053 if (wrk->req)
1054 req_free (wrk->req);
1055
1056 worker_clear (wrk);
1057 worker_free (wrk);
1058 }
1059
808 started = 0; 1060 started = 0;
809 1061 nreqs = 0;
810 while (reqs)
811 {
812 prv = reqs;
813 reqs = prv->next;
814 req_free (prv);
815 }
816
817 reqs = reqe = 0;
818
819 while (ress)
820 {
821 prv = ress;
822 ress = prv->next;
823 req_free (prv);
824 }
825
826 ress = rese = 0;
827 1062
828 close (respipe [0]); 1063 close (respipe [0]);
829 close (respipe [1]); 1064 close (respipe [1]);
830 create_pipe (); 1065 create_pipe ();
831 1066
832 atfork_parent (); 1067 atfork_parent ();
833} 1068}
834 1069
835#define dREQ \ 1070#define dREQ \
836 aio_req req; \ 1071 aio_req req; \
1072 int req_pri = next_pri; \
1073 next_pri = DEFAULT_PRI + PRI_BIAS; \
837 \ 1074 \
838 if (SvOK (callback) && !SvROK (callback)) \ 1075 if (SvOK (callback) && !SvROK (callback)) \
839 croak ("callback must be undef or of reference type"); \ 1076 croak ("callback must be undef or of reference type"); \
840 \ 1077 \
841 Newz (0, req, 1, aio_cb); \ 1078 Newz (0, req, 1, aio_cb); \
842 if (!req) \ 1079 if (!req) \
843 croak ("out of memory during aio_req allocation"); \ 1080 croak ("out of memory during aio_req allocation"); \
844 \ 1081 \
845 req->callback = newSVsv (callback) 1082 req->callback = newSVsv (callback); \
1083 req->pri = req_pri
846 1084
847#define REQ_SEND \ 1085#define REQ_SEND \
848 req_send (req); \ 1086 req_send (req); \
849 \ 1087 \
850 if (GIMME_V != G_VOID) \ 1088 if (GIMME_V != G_VOID) \
851 XPUSHs (req_sv (req)); 1089 XPUSHs (req_sv (req, AIO_REQ_KLASS));
852 1090
853MODULE = IO::AIO PACKAGE = IO::AIO 1091MODULE = IO::AIO PACKAGE = IO::AIO
854 1092
855PROTOTYPES: ENABLE 1093PROTOTYPES: ENABLE
856 1094
872 1110
873void 1111void
874max_parallel (nthreads) 1112max_parallel (nthreads)
875 int nthreads 1113 int nthreads
876 PROTOTYPE: $ 1114 PROTOTYPE: $
877
878int
879max_outstanding (nreqs)
880 int nreqs
881 PROTOTYPE: $
882 CODE:
883 RETVAL = max_outstanding;
884 max_outstanding = nreqs;
885 1115
886void 1116void
887aio_open (pathname,flags,mode,callback=&PL_sv_undef) 1117aio_open (pathname,flags,mode,callback=&PL_sv_undef)
888 SV * pathname 1118 SV * pathname
889 int flags 1119 int flags
1103 req->dataptr = SvPVbyte_nolen (req->data); 1333 req->dataptr = SvPVbyte_nolen (req->data);
1104 1334
1105 REQ_SEND; 1335 REQ_SEND;
1106} 1336}
1107 1337
1108#if 0
1109
1110# undocumented, because it does not cancel active requests
1111void 1338void
1112cancel_most_requests () 1339aio_busy (delay,callback=&PL_sv_undef)
1340 double delay
1341 SV * callback
1342 PPCODE:
1343{
1344 dREQ;
1345
1346 req->type = REQ_BUSY;
1347 req->fd = delay < 0. ? 0 : delay;
1348 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd);
1349
1350 REQ_SEND;
1351}
1352
1353void
1354aio_group (callback=&PL_sv_undef)
1355 SV * callback
1113 PROTOTYPE: 1356 PROTOTYPE: ;$
1114 CODE: 1357 PPCODE:
1115{ 1358{
1116 aio_req *req; 1359 dREQ;
1117 1360
1118 pthread_mutex_lock (&reqlock); 1361 req->type = REQ_GROUP;
1119 for (req = reqs; req; req = req->next) 1362 req_send (req);
1120 req->flags |= 1;
1121 pthread_mutex_unlock (&reqlock);
1122 1363
1123 pthread_mutex_lock (&reslock); 1364 XPUSHs (req_sv (req, AIO_GRP_KLASS));
1124 for (req = ress; req; req = req->next)
1125 req->flags |= 1;
1126 pthread_mutex_unlock (&reslock);
1127} 1365}
1128 1366
1129#endif 1367void
1368aio_nop (callback=&PL_sv_undef)
1369 SV * callback
1370 PPCODE:
1371{
1372 dREQ;
1373
1374 req->type = REQ_NOP;
1375
1376 REQ_SEND;
1377}
1378
1379void
1380aioreq_pri (int pri = DEFAULT_PRI)
1381 CODE:
1382 if (pri < PRI_MIN) pri = PRI_MIN;
1383 if (pri > PRI_MAX) pri = PRI_MAX;
1384 next_pri = pri + PRI_BIAS;
1385
1386void
1387aioreq_nice (int nice = 0)
1388 CODE:
1389 nice = next_pri - nice;
1390 if (nice < PRI_MIN) nice = PRI_MIN;
1391 if (nice > PRI_MAX) nice = PRI_MAX;
1392 next_pri = nice + PRI_BIAS;
1130 1393
1131void 1394void
1132flush () 1395flush ()
1133 PROTOTYPE: 1396 PROTOTYPE:
1134 CODE: 1397 CODE:
1177 CODE: 1440 CODE:
1178 RETVAL = nreqs; 1441 RETVAL = nreqs;
1179 OUTPUT: 1442 OUTPUT:
1180 RETVAL 1443 RETVAL
1181 1444
1445PROTOTYPES: DISABLE
1446
1182MODULE = IO::AIO PACKAGE = IO::AIO::CB 1447MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1183 1448
1184void 1449void
1185cancel (aio_req_ornot req) 1450cancel (aio_req_ornot req)
1186 PROTOTYPE:
1187 CODE: 1451 CODE:
1188 req->cancelled = 1; 1452 req_cancel (req);
1189 1453
1454void
1455cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1456 CODE:
1457 SvREFCNT_dec (req->callback);
1458 req->callback = newSVsv (callback);
1459
1460MODULE = IO::AIO PACKAGE = IO::AIO::GRP
1461
1462void
1463add (aio_req grp, ...)
1464 PPCODE:
1465{
1466 int i;
1467 aio_req req;
1468
1469 if (grp->fd == 2)
1470 croak ("cannot add requests to IO::AIO::GRP after the group finished");
1471
1472 for (i = 1; i < items; ++i )
1473 {
1474 if (GIMME_V != G_VOID)
1475 XPUSHs (sv_2mortal (newSVsv (ST (i))));
1476
1477 req = SvAIO_REQ (ST (i));
1478
1479 if (req)
1480 {
1481 ++grp->length;
1482 req->grp = grp;
1483
1484 req->grp_prev = 0;
1485 req->grp_next = grp->grp_first;
1486
1487 if (grp->grp_first)
1488 grp->grp_first->grp_prev = req;
1489
1490 grp->grp_first = req;
1491 }
1492 }
1493}
1494
1495void
1496cancel_subs (aio_req_ornot req)
1497 CODE:
1498 req_cancel_subs (req);
1499
1500void
1501result (aio_req grp, ...)
1502 CODE:
1503{
1504 int i;
1505 AV *av = newAV ();
1506
1507 for (i = 1; i < items; ++i )
1508 av_push (av, newSVsv (ST (i)));
1509
1510 SvREFCNT_dec (grp->data);
1511 grp->data = (SV *)av;
1512}
1513
1514void
1515limit (aio_req grp, int limit)
1516 CODE:
1517 grp->fd2 = limit;
1518 aio_grp_feed (grp);
1519
1520void
1521feed (aio_req grp, SV *callback=&PL_sv_undef)
1522 CODE:
1523{
1524 SvREFCNT_dec (grp->fh2);
1525 grp->fh2 = newSVsv (callback);
1526
1527 if (grp->fd2 <= 0)
1528 grp->fd2 = 2;
1529
1530 aio_grp_feed (grp);
1531}
1532

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines