ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
(Generate patch)

Comparing IO-AIO/AIO.xs (file contents):
Revision 1.43 by root, Sat Oct 21 23:06:04 2006 UTC vs.
Revision 1.78 by root, Thu Oct 26 14:35:34 2006 UTC

1/* solaris */
2#define _POSIX_PTHREAD_SEMANTICS 1
3
4#if __linux && !defined(_GNU_SOURCE)
5# define _GNU_SOURCE
6#endif
7
8/* just in case */
1#define _REENTRANT 1 9#define _REENTRANT 1
10
2#include <errno.h> 11#include <errno.h>
3 12
4#include "EXTERN.h" 13#include "EXTERN.h"
5#include "perl.h" 14#include "perl.h"
6#include "XSUB.h" 15#include "XSUB.h"
9 18
10#include <pthread.h> 19#include <pthread.h>
11 20
12#include <stddef.h> 21#include <stddef.h>
13#include <errno.h> 22#include <errno.h>
23#include <sys/time.h>
24#include <sys/select.h>
14#include <sys/types.h> 25#include <sys/types.h>
15#include <sys/stat.h> 26#include <sys/stat.h>
16#include <limits.h> 27#include <limits.h>
17#include <unistd.h> 28#include <unistd.h>
18#include <fcntl.h> 29#include <fcntl.h>
37/* used for struct dirent, AIX doesn't provide it */ 48/* used for struct dirent, AIX doesn't provide it */
38#ifndef NAME_MAX 49#ifndef NAME_MAX
39# define NAME_MAX 4096 50# define NAME_MAX 4096
40#endif 51#endif
41 52
53#ifndef PTHREAD_STACK_MIN
54/* care for broken platforms, e.g. windows */
55# define PTHREAD_STACK_MIN 16384
56#endif
57
42#if __ia64 58#if __ia64
43# define STACKSIZE 65536 59# define STACKSIZE 65536
60#elif __i386 || __x86_64 /* 16k is unreasonably high :( */
61# define STACKSIZE PTHREAD_STACK_MIN
44#else 62#else
45# define STACKSIZE 8192 63# define STACKSIZE 16384
46#endif 64#endif
65
66/* buffer size for various temporary buffers */
67#define AIO_BUFSIZE 65536
68
69#define dBUF \
70 char *aio_buf; \
71 LOCK (wrklock); \
72 self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \
73 UNLOCK (wrklock); \
74 if (!aio_buf) \
75 return -1;
47 76
48enum { 77enum {
49 REQ_QUIT, 78 REQ_QUIT,
50 REQ_OPEN, REQ_CLOSE, 79 REQ_OPEN, REQ_CLOSE,
51 REQ_READ, REQ_WRITE, REQ_READAHEAD, 80 REQ_READ, REQ_WRITE, REQ_READAHEAD,
53 REQ_STAT, REQ_LSTAT, REQ_FSTAT, 82 REQ_STAT, REQ_LSTAT, REQ_FSTAT,
54 REQ_FSYNC, REQ_FDATASYNC, 83 REQ_FSYNC, REQ_FDATASYNC,
55 REQ_UNLINK, REQ_RMDIR, REQ_RENAME, 84 REQ_UNLINK, REQ_RMDIR, REQ_RENAME,
56 REQ_READDIR, 85 REQ_READDIR,
57 REQ_LINK, REQ_SYMLINK, 86 REQ_LINK, REQ_SYMLINK,
87 REQ_GROUP, REQ_NOP,
88 REQ_BUSY,
58}; 89};
59 90
60#define AIO_CB_KLASS "IO::AIO::CB" 91#define AIO_REQ_KLASS "IO::AIO::REQ"
92#define AIO_GRP_KLASS "IO::AIO::GRP"
61 93
62typedef struct aio_cb 94typedef struct aio_cb
63{ 95{
64 struct aio_cb *grp_prev, *grp_next;
65 struct aio_grp *grp;
66
67 struct aio_cb *volatile next; 96 struct aio_cb *volatile next;
68
69 SV *self; /* the perl counterpart of this request, if any */
70 97
71 SV *data, *callback; 98 SV *data, *callback;
72 SV *fh, *fh2; 99 SV *fh, *fh2;
73 void *dataptr, *data2ptr; 100 void *dataptr, *data2ptr;
74 Stat_t *statdata; 101 Stat_t *statdata;
75 off_t offset; 102 off_t offset;
76 size_t length; 103 size_t length;
77 ssize_t result; 104 ssize_t result;
78 105
106 STRLEN dataoffset;
79 int type; 107 int type;
80 int fd, fd2; 108 int fd, fd2;
81 int errorno; 109 int errorno;
82 STRLEN dataoffset;
83 mode_t mode; /* open */ 110 mode_t mode; /* open */
111
84 unsigned char cancelled; 112 unsigned char flags;
113 unsigned char pri;
114
115 SV *self; /* the perl counterpart of this request, if any */
116 struct aio_cb *grp, *grp_prev, *grp_next, *grp_first;
85} aio_cb; 117} aio_cb;
118
119enum {
120 FLAG_CANCELLED = 0x01,
121};
86 122
87typedef aio_cb *aio_req; 123typedef aio_cb *aio_req;
88typedef aio_cb *aio_req_ornot; 124typedef aio_cb *aio_req_ornot;
89 125
126enum {
127 PRI_MIN = -4,
128 PRI_MAX = 4,
129
130 DEFAULT_PRI = 0,
131 PRI_BIAS = -PRI_MIN,
132 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
133};
134
135static int next_pri = DEFAULT_PRI + PRI_BIAS;
136
90static int started, wanted; 137static unsigned int started, wanted;
91static volatile int nreqs; 138static volatile unsigned int nreqs;
92static int max_outstanding = 1<<30; 139static volatile unsigned int max_outstanding = 0xffffffff;
93static int respipe [2]; 140static int respipe [2];
94 141
142#if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)
143# define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
144#else
145# define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
146#endif
147
148#define LOCK(mutex) pthread_mutex_lock (&(mutex))
149#define UNLOCK(mutex) pthread_mutex_unlock (&(mutex))
150
151/* worker threads management */
152static pthread_mutex_t wrklock = AIO_MUTEX_INIT;
153
154typedef struct worker {
155 /* locked by wrklock */
156 struct worker *prev, *next;
157
158 pthread_t tid;
159
160 /* locked by reslock, reqlock or wrklock */
161 aio_req req; /* currently processed request */
162 void *dbuf;
163 DIR *dirp;
164} worker;
165
166static worker wrk_first = { &wrk_first, &wrk_first, 0 };
167
168static void worker_clear (worker *wrk)
169{
170 if (wrk->dirp)
171 {
172 closedir (wrk->dirp);
173 wrk->dirp = 0;
174 }
175
176 if (wrk->dbuf)
177 {
178 free (wrk->dbuf);
179 wrk->dbuf = 0;
180 }
181}
182
183static void worker_free (worker *wrk)
184{
185 wrk->next->prev = wrk->prev;
186 wrk->prev->next = wrk->next;
187
188 free (wrk);
189}
190
95static pthread_mutex_t reslock = PTHREAD_MUTEX_INITIALIZER; 191static pthread_mutex_t reslock = AIO_MUTEX_INIT;
96static pthread_mutex_t reqlock = PTHREAD_MUTEX_INITIALIZER; 192static pthread_mutex_t reqlock = AIO_MUTEX_INIT;
97static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER; 193static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER;
98 194
99static volatile aio_req reqs, reqe; /* queue start, queue end */ 195/*
100static volatile aio_req ress, rese; /* queue start, queue end */ 196 * a somewhat faster data structure might be nice, but
101 197 * with 8 priorities this actually needs <20 insns
198 * per shift, the most expensive operation.
199 */
102typedef struct aio_grp 200typedef struct {
103{ 201 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
104 struct aio_cb *first, *last; 202 int size;
105 SV *callback; 203} reqq;
106 int busycount;
107} aio_grp;
108 204
109static void aio_grp_begin (aio_grp *grp) 205static reqq req_queue;
110{ 206static reqq res_queue;
111 ++grp->busycount;
112}
113 207
114static void aio_grp_end (aio_grp *grp) 208int reqq_push (reqq *q, aio_req req)
115{ 209{
116 --grp->busycount; 210 int pri = req->pri;
211 req->next = 0;
117 212
118 if (grp->busycount) 213 if (q->qe[pri])
214 {
215 q->qe[pri]->next = req;
216 q->qe[pri] = req;
217 }
218 else
219 q->qe[pri] = q->qs[pri] = req;
220
221 return q->size++;
222}
223
224aio_req reqq_shift (reqq *q)
225{
226 int pri;
227
228 if (!q->size)
119 return; 229 return 0;
120 230
121 SvREFCNT_dec (grp->callback); 231 --q->size;
122 grp->callback = 0;
123}
124 232
125static aio_grp *aio_grp_new () 233 for (pri = NUM_PRI; pri--; )
126{ 234 {
127 aio_grp *grp; 235 aio_req req = q->qs[pri];
128 236
129 Newz (0, grp, 1, aio_grp); 237 if (req)
130 aio_grp_begin (grp); 238 {
239 if (!(q->qs[pri] = req->next))
240 q->qe[pri] = 0;
131 241
132 return grp; 242 return req;
243 }
244 }
245
246 abort ();
133} 247}
248
249static int poll_cb (int max);
250static void req_invoke (aio_req req);
251static void req_free (aio_req req);
252static void req_cancel (aio_req req);
134 253
135/* must be called at most once */ 254/* must be called at most once */
136static SV *req_sv (aio_req req) 255static SV *req_sv (aio_req req, const char *klass)
137{ 256{
257 if (!req->self)
258 {
138 req->self = (SV *)newHV (); 259 req->self = (SV *)newHV ();
139 sv_magic (req->self, 0, PERL_MAGIC_ext, (char *)req, 0); 260 sv_magic (req->self, 0, PERL_MAGIC_ext, (char *)req, 0);
261 }
140 262
141 return sv_bless (newRV_noinc (req->self), gv_stashpv (AIO_CB_KLASS, 1)); 263 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1)));
142} 264}
143 265
144static aio_req SvAIO_REQ (SV *sv) 266static aio_req SvAIO_REQ (SV *sv)
145{ 267{
268 MAGIC *mg;
269
146 if (!sv_derived_from (sv, AIO_CB_KLASS) || !SvROK (sv)) 270 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv))
147 croak ("object of class " AIO_CB_KLASS " expected"); 271 croak ("object of class " AIO_REQ_KLASS " expected");
148 272
149 MAGIC *mg = mg_find (SvRV (sv), PERL_MAGIC_ext); 273 mg = mg_find (SvRV (sv), PERL_MAGIC_ext);
150 274
151 return mg ? (aio_req)mg->mg_ptr : 0; 275 return mg ? (aio_req)mg->mg_ptr : 0;
276}
277
278static void aio_grp_feed (aio_req grp)
279{
280 while (grp->length < grp->fd2 && !(grp->flags & FLAG_CANCELLED))
281 {
282 int old_len = grp->length;
283
284 if (grp->fh2 && SvOK (grp->fh2))
285 {
286 dSP;
287
288 ENTER;
289 SAVETMPS;
290 PUSHMARK (SP);
291 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
292 PUTBACK;
293 call_sv (grp->fh2, G_VOID | G_EVAL | G_KEEPERR);
294 SPAGAIN;
295 FREETMPS;
296 LEAVE;
297 }
298
299 /* stop if no progress has been made */
300 if (old_len == grp->length)
301 {
302 SvREFCNT_dec (grp->fh2);
303 grp->fh2 = 0;
304 break;
305 }
306 }
307}
308
309static void aio_grp_dec (aio_req grp)
310{
311 --grp->length;
312
313 /* call feeder, if applicable */
314 aio_grp_feed (grp);
315
316 /* finish, if done */
317 if (!grp->length && grp->fd)
318 {
319 req_invoke (grp);
320 req_free (grp);
321 }
322}
323
324static void poll_wait ()
325{
326 fd_set rfd;
327
328 while (nreqs)
329 {
330 int size;
331#if !(__i386 || __x86_64) /* safe without sempahore on these archs */
332 LOCK (reslock);
333#endif
334 size = res_queue.size;
335#if !(__i386 || __x86_64) /* safe without sempahore on these archs */
336 UNLOCK (reslock);
337#endif
338
339 if (size)
340 return;
341
342 FD_ZERO(&rfd);
343 FD_SET(respipe [0], &rfd);
344
345 select (respipe [0] + 1, &rfd, 0, 0, 0);
346 }
347}
348
349static void req_invoke (aio_req req)
350{
351 dSP;
352
353 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
354 {
355 errno = req->errorno;
356
357 ENTER;
358 SAVETMPS;
359 PUSHMARK (SP);
360 EXTEND (SP, 1);
361
362 switch (req->type)
363 {
364 case REQ_READDIR:
365 {
366 SV *rv = &PL_sv_undef;
367
368 if (req->result >= 0)
369 {
370 int i;
371 char *buf = req->data2ptr;
372 AV *av = newAV ();
373
374 av_extend (av, req->result - 1);
375
376 for (i = 0; i < req->result; ++i)
377 {
378 SV *sv = newSVpv (buf, 0);
379
380 av_store (av, i, sv);
381 buf += SvCUR (sv) + 1;
382 }
383
384 rv = sv_2mortal (newRV_noinc ((SV *)av));
385 }
386
387 PUSHs (rv);
388 }
389 break;
390
391 case REQ_OPEN:
392 {
393 /* convert fd to fh */
394 SV *fh;
395
396 PUSHs (sv_2mortal (newSViv (req->result)));
397 PUTBACK;
398 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
399 SPAGAIN;
400
401 fh = SvREFCNT_inc (POPs);
402
403 PUSHMARK (SP);
404 XPUSHs (sv_2mortal (fh));
405 }
406 break;
407
408 case REQ_GROUP:
409 req->fd = 2; /* mark group as finished */
410
411 if (req->data)
412 {
413 int i;
414 AV *av = (AV *)req->data;
415
416 EXTEND (SP, AvFILL (av) + 1);
417 for (i = 0; i <= AvFILL (av); ++i)
418 PUSHs (*av_fetch (av, i, 0));
419 }
420 break;
421
422 case REQ_NOP:
423 case REQ_BUSY:
424 break;
425
426 default:
427 PUSHs (sv_2mortal (newSViv (req->result)));
428 break;
429 }
430
431
432 PUTBACK;
433 call_sv (req->callback, G_VOID | G_EVAL);
434 SPAGAIN;
435
436 FREETMPS;
437 LEAVE;
438 }
439
440 if (req->grp)
441 {
442 aio_req grp = req->grp;
443
444 /* unlink request */
445 if (req->grp_next) req->grp_next->grp_prev = req->grp_prev;
446 if (req->grp_prev) req->grp_prev->grp_next = req->grp_next;
447
448 if (grp->grp_first == req)
449 grp->grp_first = req->grp_next;
450
451 aio_grp_dec (grp);
452 }
453
454 if (SvTRUE (ERRSV))
455 {
456 req_free (req);
457 croak (0);
458 }
152} 459}
153 460
154static void req_free (aio_req req) 461static void req_free (aio_req req)
155{ 462{
156 if (req->self) 463 if (req->self)
157 { 464 {
158 sv_unmagic (req->self, PERL_MAGIC_ext); 465 sv_unmagic (req->self, PERL_MAGIC_ext);
159 SvREFCNT_dec (req->self); 466 SvREFCNT_dec (req->self);
160 } 467 }
161 468
162 if (req->data)
163 SvREFCNT_dec (req->data); 469 SvREFCNT_dec (req->data);
164
165 if (req->fh)
166 SvREFCNT_dec (req->fh); 470 SvREFCNT_dec (req->fh);
167
168 if (req->fh2)
169 SvREFCNT_dec (req->fh2); 471 SvREFCNT_dec (req->fh2);
170
171 if (req->statdata)
172 Safefree (req->statdata);
173
174 if (req->callback)
175 SvREFCNT_dec (req->callback); 472 SvREFCNT_dec (req->callback);
473 Safefree (req->statdata);
176 474
177 if (req->type == REQ_READDIR && req->result >= 0) 475 if (req->type == REQ_READDIR)
178 free (req->data2ptr); 476 free (req->data2ptr);
179 477
180 Safefree (req); 478 Safefree (req);
181} 479}
182 480
183static void 481static void req_cancel_subs (aio_req grp)
184poll_wait ()
185{ 482{
186 if (nreqs && !ress) 483 aio_req sub;
187 {
188 fd_set rfd;
189 FD_ZERO(&rfd);
190 FD_SET(respipe [0], &rfd);
191 484
192 select (respipe [0] + 1, &rfd, 0, 0, 0); 485 if (grp->type != REQ_GROUP)
193 } 486 return;
194}
195 487
196static int 488 SvREFCNT_dec (grp->fh2);
197poll_cb () 489 grp->fh2 = 0;
490
491 for (sub = grp->grp_first; sub; sub = sub->grp_next)
492 req_cancel (sub);
493}
494
495static void req_cancel (aio_req req)
496{
497 req->flags |= FLAG_CANCELLED;
498
499 req_cancel_subs (req);
500}
501
502static int poll_cb (int max)
198{ 503{
199 dSP; 504 dSP;
200 int count = 0; 505 int count = 0;
201 int do_croak = 0; 506 int do_croak = 0;
202 aio_req req; 507 aio_req req;
203 508
204 for (;;) 509 for (;;)
205 { 510 {
206 pthread_mutex_lock (&reslock); 511 while (max <= 0 || count < max)
207 req = ress;
208
209 if (req)
210 { 512 {
211 ress = req->next; 513 LOCK (reslock);
514 req = reqq_shift (&res_queue);
212 515
213 if (!ress) 516 if (req)
214 { 517 {
518 if (!res_queue.size)
519 {
215 /* read any signals sent by the worker threads */ 520 /* read any signals sent by the worker threads */
216 char buf [32]; 521 char buf [32];
217 while (read (respipe [0], buf, 32) == 32) 522 while (read (respipe [0], buf, 32) == 32)
523 ;
218 ; 524 }
219
220 rese = 0;
221 } 525 }
222 }
223 526
224 pthread_mutex_unlock (&reslock); 527 UNLOCK (reslock);
225 528
226 if (!req) 529 if (!req)
227 break; 530 break;
228 531
229 nreqs--; 532 --nreqs;
230 533
231 if (req->type == REQ_QUIT) 534 if (req->type == REQ_QUIT)
232 started--; 535 --started;
233 else 536 else if (req->type == REQ_GROUP && req->length)
234 {
235 int errorno = errno;
236 errno = req->errorno;
237
238 if (req->type == REQ_READ)
239 SvCUR_set (req->data, req->dataoffset + (req->result > 0 ? req->result : 0));
240
241 if (req->data2ptr && (req->type == REQ_READ || req->type == REQ_WRITE))
242 SvREADONLY_off (req->data);
243
244 if (req->statdata)
245 { 537 {
246 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT; 538 req->fd = 1; /* mark request as delayed */
247 PL_laststatval = req->result; 539 continue;
248 PL_statcache = *(req->statdata);
249 }
250
251 ENTER;
252 PUSHMARK (SP);
253
254 if (req->type == REQ_READDIR)
255 {
256 SV *rv = &PL_sv_undef;
257
258 if (req->result >= 0)
259 {
260 char *buf = req->data2ptr;
261 AV *av = newAV ();
262
263 while (req->result)
264 {
265 SV *sv = newSVpv (buf, 0);
266
267 av_push (av, sv);
268 buf += SvCUR (sv) + 1;
269 req->result--;
270 }
271
272 rv = sv_2mortal (newRV_noinc ((SV *)av));
273 }
274
275 XPUSHs (rv);
276 } 540 }
277 else 541 else
278 { 542 {
279 XPUSHs (sv_2mortal (newSViv (req->result)));
280
281 if (req->type == REQ_OPEN) 543 if (req->type == REQ_READ)
544 SvCUR_set (req->data, req->dataoffset + (req->result > 0 ? req->result : 0));
545
546 if (req->data2ptr && (req->type == REQ_READ || req->type == REQ_WRITE))
547 SvREADONLY_off (req->data);
548
549 if (req->statdata)
282 { 550 {
283 /* convert fd to fh */ 551 PL_laststype = req->type == REQ_LSTAT ? OP_LSTAT : OP_STAT;
284 SV *fh; 552 PL_laststatval = req->result;
285 553 PL_statcache = *(req->statdata);
286 PUTBACK;
287 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
288 SPAGAIN;
289
290 fh = SvREFCNT_inc (POPs);
291
292 PUSHMARK (SP);
293 XPUSHs (sv_2mortal (fh));
294 } 554 }
555
556 req_invoke (req);
557
558 count++;
295 } 559 }
296 560
297 if (SvOK (req->callback) && !req->cancelled)
298 {
299 PUTBACK;
300 call_sv (req->callback, G_VOID | G_EVAL);
301 SPAGAIN;
302
303 if (SvTRUE (ERRSV))
304 {
305 req_free (req); 561 req_free (req);
306 croak (0);
307 }
308 }
309
310 LEAVE;
311
312 errno = errorno;
313 count++;
314 } 562 }
315 563
316 req_free (req); 564 if (nreqs <= max_outstanding)
565 break;
566
567 poll_wait ();
568
569 max = 0;
317 } 570 }
318 571
319 return count; 572 return count;
320} 573}
321 574
322static void *aio_proc(void *arg); 575static void *aio_proc(void *arg);
323 576
324static void
325start_thread (void) 577static void start_thread (void)
326{ 578{
327 sigset_t fullsigset, oldsigset; 579 sigset_t fullsigset, oldsigset;
328 pthread_t tid;
329 pthread_attr_t attr; 580 pthread_attr_t attr;
581
582 worker *wrk = calloc (1, sizeof (worker));
583
584 if (!wrk)
585 croak ("unable to allocate worker thread data");
330 586
331 pthread_attr_init (&attr); 587 pthread_attr_init (&attr);
332 pthread_attr_setstacksize (&attr, STACKSIZE); 588 pthread_attr_setstacksize (&attr, STACKSIZE);
333 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED); 589 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
334 590
335 sigfillset (&fullsigset); 591 sigfillset (&fullsigset);
592
593 LOCK (wrklock);
336 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset); 594 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
337 595
338 if (pthread_create (&tid, &attr, aio_proc, 0) == 0) 596 if (pthread_create (&wrk->tid, &attr, aio_proc, (void *)wrk) == 0)
597 {
598 wrk->prev = &wrk_first;
599 wrk->next = wrk_first.next;
600 wrk_first.next->prev = wrk;
601 wrk_first.next = wrk;
339 started++; 602 ++started;
603 }
604 else
605 free (wrk);
340 606
341 sigprocmask (SIG_SETMASK, &oldsigset, 0); 607 sigprocmask (SIG_SETMASK, &oldsigset, 0);
608 UNLOCK (wrklock);
342} 609}
343 610
344static void
345req_send (aio_req req) 611static void req_send (aio_req req)
346{ 612{
347 while (started < wanted && nreqs >= started) 613 while (started < wanted && nreqs >= started)
348 start_thread (); 614 start_thread ();
349 615
350 nreqs++; 616 ++nreqs;
351 617
352 pthread_mutex_lock (&reqlock); 618 LOCK (reqlock);
353 619 reqq_push (&req_queue, req);
354 req->next = 0;
355
356 if (reqe)
357 {
358 reqe->next = req;
359 reqe = req;
360 }
361 else
362 reqe = reqs = req;
363
364 pthread_cond_signal (&reqwait); 620 pthread_cond_signal (&reqwait);
365 pthread_mutex_unlock (&reqlock); 621 UNLOCK (reqlock);
366
367 if (nreqs > max_outstanding)
368 for (;;)
369 {
370 poll_cb ();
371
372 if (nreqs <= max_outstanding)
373 break;
374
375 poll_wait ();
376 }
377} 622}
378 623
379static void 624static void end_thread (void)
380end_thread (void)
381{ 625{
382 aio_req req; 626 aio_req req;
627
383 Newz (0, req, 1, aio_cb); 628 Newz (0, req, 1, aio_cb);
629
384 req->type = REQ_QUIT; 630 req->type = REQ_QUIT;
631 req->pri = PRI_MAX + PRI_BIAS;
385 632
386 req_send (req); 633 req_send (req);
387} 634}
388 635
389static void min_parallel (int nthreads) 636static void min_parallel (int nthreads)
406 } 653 }
407 654
408 while (started > wanted) 655 while (started > wanted)
409 { 656 {
410 poll_wait (); 657 poll_wait ();
411 poll_cb (); 658 poll_cb (0);
412 } 659 }
413} 660}
414 661
415static void create_pipe () 662static void create_pipe ()
416{ 663{
436 * normal read/write by using a mutex. slows down execution a lot, 683 * normal read/write by using a mutex. slows down execution a lot,
437 * but that's your problem, not mine. 684 * but that's your problem, not mine.
438 */ 685 */
439static pthread_mutex_t preadwritelock = PTHREAD_MUTEX_INITIALIZER; 686static pthread_mutex_t preadwritelock = PTHREAD_MUTEX_INITIALIZER;
440 687
441static ssize_t 688static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
442pread (int fd, void *buf, size_t count, off_t offset)
443{ 689{
444 ssize_t res; 690 ssize_t res;
445 off_t ooffset; 691 off_t ooffset;
446 692
447 pthread_mutex_lock (&preadwritelock); 693 LOCK (preadwritelock);
448 ooffset = lseek (fd, 0, SEEK_CUR); 694 ooffset = lseek (fd, 0, SEEK_CUR);
449 lseek (fd, offset, SEEK_SET); 695 lseek (fd, offset, SEEK_SET);
450 res = read (fd, buf, count); 696 res = read (fd, buf, count);
451 lseek (fd, ooffset, SEEK_SET); 697 lseek (fd, ooffset, SEEK_SET);
452 pthread_mutex_unlock (&preadwritelock); 698 UNLOCK (preadwritelock);
453 699
454 return res; 700 return res;
455} 701}
456 702
457static ssize_t
458pwrite (int fd, void *buf, size_t count, off_t offset) 703static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
459{ 704{
460 ssize_t res; 705 ssize_t res;
461 off_t ooffset; 706 off_t ooffset;
462 707
463 pthread_mutex_lock (&preadwritelock); 708 LOCK (preadwritelock);
464 ooffset = lseek (fd, 0, SEEK_CUR); 709 ooffset = lseek (fd, 0, SEEK_CUR);
465 lseek (fd, offset, SEEK_SET); 710 lseek (fd, offset, SEEK_SET);
466 res = write (fd, buf, count); 711 res = write (fd, buf, count);
467 lseek (fd, offset, SEEK_SET); 712 lseek (fd, offset, SEEK_SET);
468 pthread_mutex_unlock (&preadwritelock); 713 UNLOCK (preadwritelock);
469 714
470 return res; 715 return res;
471} 716}
472#endif 717#endif
473 718
474#if !HAVE_FDATASYNC 719#if !HAVE_FDATASYNC
475# define fdatasync fsync 720# define fdatasync fsync
476#endif 721#endif
477 722
478#if !HAVE_READAHEAD 723#if !HAVE_READAHEAD
479# define readahead aio_readahead 724# define readahead(fd,offset,count) aio_readahead (fd, offset, count, self)
480 725
481static ssize_t 726static ssize_t aio_readahead (int fd, off_t offset, size_t count, worker *self)
482readahead (int fd, off_t offset, size_t count)
483{ 727{
484 char readahead_buf[4096]; 728 dBUF;
485 729
486 while (count > 0) 730 while (count > 0)
487 { 731 {
488 size_t len = count < sizeof (readahead_buf) ? count : sizeof (readahead_buf); 732 size_t len = count < AIO_BUFSIZE ? count : AIO_BUFSIZE;
489 733
490 pread (fd, readahead_buf, len, offset); 734 pread (fd, aio_buf, len, offset);
491 offset += len; 735 offset += len;
492 count -= len; 736 count -= len;
493 } 737 }
494 738
495 errno = 0; 739 errno = 0;
496} 740}
741
497#endif 742#endif
498 743
499#if !HAVE_READDIR_R 744#if !HAVE_READDIR_R
500# define readdir_r aio_readdir_r 745# define readdir_r aio_readdir_r
501 746
502static pthread_mutex_t readdirlock = PTHREAD_MUTEX_INITIALIZER; 747static pthread_mutex_t readdirlock = PTHREAD_MUTEX_INITIALIZER;
503 748
504static int
505readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res) 749static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res)
506{ 750{
507 struct dirent *e; 751 struct dirent *e;
508 int errorno; 752 int errorno;
509 753
510 pthread_mutex_lock (&readdirlock); 754 LOCK (readdirlock);
511 755
512 e = readdir (dirp); 756 e = readdir (dirp);
513 errorno = errno; 757 errorno = errno;
514 758
515 if (e) 759 if (e)
518 strcpy (ent->d_name, e->d_name); 762 strcpy (ent->d_name, e->d_name);
519 } 763 }
520 else 764 else
521 *res = 0; 765 *res = 0;
522 766
523 pthread_mutex_unlock (&readdirlock); 767 UNLOCK (readdirlock);
524 768
525 errno = errorno; 769 errno = errorno;
526 return e ? 0 : -1; 770 return e ? 0 : -1;
527} 771}
528#endif 772#endif
529 773
530/* sendfile always needs emulation */ 774/* sendfile always needs emulation */
531static ssize_t
532sendfile_ (int ofd, int ifd, off_t offset, size_t count) 775static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self)
533{ 776{
534 ssize_t res; 777 ssize_t res;
535 778
536 if (!count) 779 if (!count)
537 return 0; 780 return 0;
548 { 791 {
549 off_t sbytes; 792 off_t sbytes;
550 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0); 793 res = sendfile (ifd, ofd, offset, count, 0, &sbytes, 0);
551 794
552 if (res < 0 && sbytes) 795 if (res < 0 && sbytes)
553 /* maybe only on EAGAIN only: as usual, the manpage leaves you guessing */ 796 /* maybe only on EAGAIN: as usual, the manpage leaves you guessing */
554 res = sbytes; 797 res = sbytes;
555 } 798 }
556 799
557# elif __hpux 800# elif __hpux
558 res = sendfile (ofd, ifd, offset, count, 0, 0); 801 res = sendfile (ofd, ifd, offset, count, 0, 0);
586#endif 829#endif
587 ) 830 )
588 ) 831 )
589 { 832 {
590 /* emulate sendfile. this is a major pain in the ass */ 833 /* emulate sendfile. this is a major pain in the ass */
591 char buf[4096]; 834 dBUF;
835
592 res = 0; 836 res = 0;
593 837
594 while (count) 838 while (count)
595 { 839 {
596 ssize_t cnt; 840 ssize_t cnt;
597 841
598 cnt = pread (ifd, buf, count > 4096 ? 4096 : count, offset); 842 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
599 843
600 if (cnt <= 0) 844 if (cnt <= 0)
601 { 845 {
602 if (cnt && !res) res = -1; 846 if (cnt && !res) res = -1;
603 break; 847 break;
604 } 848 }
605 849
606 cnt = write (ofd, buf, cnt); 850 cnt = write (ofd, aio_buf, cnt);
607 851
608 if (cnt <= 0) 852 if (cnt <= 0)
609 { 853 {
610 if (cnt && !res) res = -1; 854 if (cnt && !res) res = -1;
611 break; 855 break;
619 863
620 return res; 864 return res;
621} 865}
622 866
623/* read a full directory */ 867/* read a full directory */
624static int 868static void scandir_ (aio_req req, worker *self)
625scandir_ (const char *path, void **namesp)
626{ 869{
627 DIR *dirp = opendir (path); 870 DIR *dirp;
628 union 871 union
629 { 872 {
630 struct dirent d; 873 struct dirent d;
631 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1]; 874 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1];
632 } u; 875 } *u;
633 struct dirent *entp; 876 struct dirent *entp;
634 char *name, *names; 877 char *name, *names;
635 int memlen = 4096; 878 int memlen = 4096;
636 int memofs = 0; 879 int memofs = 0;
637 int res = 0; 880 int res = 0;
638 int errorno; 881 int errorno;
639 882
640 if (!dirp) 883 LOCK (wrklock);
641 return -1; 884 self->dirp = dirp = opendir (req->dataptr);
642 885 self->dbuf = u = malloc (sizeof (*u));
643 names = malloc (memlen); 886 req->data2ptr = names = malloc (memlen);
887 UNLOCK (wrklock);
644 888
889 if (dirp && u && names)
645 for (;;) 890 for (;;)
646 { 891 {
892 errno = 0;
647 errno = 0, readdir_r (dirp, &u.d, &entp); 893 readdir_r (dirp, &u->d, &entp);
648 894
649 if (!entp) 895 if (!entp)
650 break; 896 break;
651 897
652 name = entp->d_name; 898 name = entp->d_name;
653 899
654 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2]))) 900 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
655 { 901 {
656 int len = strlen (name) + 1; 902 int len = strlen (name) + 1;
657 903
658 res++; 904 res++;
659 905
660 while (memofs + len > memlen) 906 while (memofs + len > memlen)
661 { 907 {
662 memlen *= 2; 908 memlen *= 2;
909 LOCK (wrklock);
663 names = realloc (names, memlen); 910 req->data2ptr = names = realloc (names, memlen);
911 UNLOCK (wrklock);
912
664 if (!names) 913 if (!names)
665 break; 914 break;
666 } 915 }
667 916
668 memcpy (names + memofs, name, len); 917 memcpy (names + memofs, name, len);
669 memofs += len; 918 memofs += len;
670 } 919 }
671 } 920 }
672 921
673 errorno = errno;
674 closedir (dirp);
675
676 if (errorno) 922 if (errno)
677 {
678 free (names);
679 errno = errorno;
680 res = -1; 923 res = -1;
681 } 924
682 925 req->result = res;
683 *namesp = (void *)names;
684 return res;
685} 926}
686 927
687/*****************************************************************************/ 928/*****************************************************************************/
688 929
689static void *
690aio_proc (void *thr_arg) 930static void *aio_proc (void *thr_arg)
691{ 931{
692 aio_req req; 932 aio_req req;
693 int type; 933 int type;
934 worker *self = (worker *)thr_arg;
694 935
695 do 936 do
696 { 937 {
697 pthread_mutex_lock (&reqlock); 938 LOCK (reqlock);
698 939
699 for (;;) 940 for (;;)
700 { 941 {
701 req = reqs; 942 self->req = req = reqq_shift (&req_queue);
702
703 if (reqs)
704 {
705 reqs = reqs->next;
706 if (!reqs) reqe = 0;
707 }
708 943
709 if (req) 944 if (req)
710 break; 945 break;
711 946
712 pthread_cond_wait (&reqwait, &reqlock); 947 pthread_cond_wait (&reqwait, &reqlock);
713 } 948 }
714 949
715 pthread_mutex_unlock (&reqlock); 950 UNLOCK (reqlock);
716 951
717 errno = 0; /* strictly unnecessary */ 952 errno = 0; /* strictly unnecessary */
953 type = req->type; /* remember type for QUIT check */
718 954
719 if (!req->cancelled) 955 if (!(req->flags & FLAG_CANCELLED))
720 switch (req->type) 956 switch (type)
721 { 957 {
722 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break; 958 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break;
723 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break; 959 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break;
724 960
725 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break; 961 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break;
726 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length); break; 962 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length, self); break;
727 963
728 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break; 964 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break;
729 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break; 965 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break;
730 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break; 966 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break;
731 967
737 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break; 973 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break;
738 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break; 974 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break;
739 975
740 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break; 976 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break;
741 case REQ_FSYNC: req->result = fsync (req->fd); break; 977 case REQ_FSYNC: req->result = fsync (req->fd); break;
742 case REQ_READDIR: req->result = scandir_ (req->dataptr, &req->data2ptr); break; 978 case REQ_READDIR: scandir_ (req, self); break;
743 979
980 case REQ_BUSY:
981 {
982 struct timeval tv;
983
984 tv.tv_sec = req->fd;
985 tv.tv_usec = req->fd2;
986
987 req->result = select (0, 0, 0, 0, &tv);
988 }
989
990 case REQ_GROUP:
991 case REQ_NOP:
744 case REQ_QUIT: 992 case REQ_QUIT:
745 break; 993 break;
746 994
747 default: 995 default:
748 req->result = ENOSYS; 996 req->result = ENOSYS;
749 break; 997 break;
750 } 998 }
751 999
752 req->errorno = errno; 1000 req->errorno = errno;
753 1001
754 pthread_mutex_lock (&reslock); 1002 LOCK (reslock);
755 1003
756 req->next = 0; 1004 if (!reqq_push (&res_queue, req))
757
758 if (rese)
759 {
760 rese->next = req;
761 rese = req;
762 }
763 else
764 {
765 rese = ress = req;
766
767 /* write a dummy byte to the pipe so fh becomes ready */ 1005 /* write a dummy byte to the pipe so fh becomes ready */
768 write (respipe [1], &respipe, 1); 1006 write (respipe [1], &respipe, 1);
769 }
770 1007
771 pthread_mutex_unlock (&reslock); 1008 self->req = 0;
1009 worker_clear (self);
1010
1011 UNLOCK (reslock);
772 } 1012 }
773 while (type != REQ_QUIT); 1013 while (type != REQ_QUIT);
774 1014
1015 LOCK (wrklock);
1016 worker_free (self);
1017 UNLOCK (wrklock);
1018
775 return 0; 1019 return 0;
776} 1020}
777 1021
778/*****************************************************************************/ 1022/*****************************************************************************/
779 1023
780static void atfork_prepare (void) 1024static void atfork_prepare (void)
781{ 1025{
782 pthread_mutex_lock (&reqlock); 1026 LOCK (wrklock);
783 pthread_mutex_lock (&reslock); 1027 LOCK (reqlock);
1028 LOCK (reslock);
784#if !HAVE_PREADWRITE 1029#if !HAVE_PREADWRITE
785 pthread_mutex_lock (&preadwritelock); 1030 LOCK (preadwritelock);
786#endif 1031#endif
787#if !HAVE_READDIR_R 1032#if !HAVE_READDIR_R
788 pthread_mutex_lock (&readdirlock); 1033 LOCK (readdirlock);
789#endif 1034#endif
790} 1035}
791 1036
792static void atfork_parent (void) 1037static void atfork_parent (void)
793{ 1038{
794#if !HAVE_READDIR_R 1039#if !HAVE_READDIR_R
795 pthread_mutex_unlock (&readdirlock); 1040 UNLOCK (readdirlock);
796#endif 1041#endif
797#if !HAVE_PREADWRITE 1042#if !HAVE_PREADWRITE
798 pthread_mutex_unlock (&preadwritelock); 1043 UNLOCK (preadwritelock);
799#endif 1044#endif
800 pthread_mutex_unlock (&reslock); 1045 UNLOCK (reslock);
801 pthread_mutex_unlock (&reqlock); 1046 UNLOCK (reqlock);
1047 UNLOCK (wrklock);
802} 1048}
803 1049
804static void atfork_child (void) 1050static void atfork_child (void)
805{ 1051{
806 aio_req prv; 1052 aio_req prv;
807 1053
1054 while (prv = reqq_shift (&req_queue))
1055 req_free (prv);
1056
1057 while (prv = reqq_shift (&res_queue))
1058 req_free (prv);
1059
1060 while (wrk_first.next != &wrk_first)
1061 {
1062 worker *wrk = wrk_first.next;
1063
1064 if (wrk->req)
1065 req_free (wrk->req);
1066
1067 worker_clear (wrk);
1068 worker_free (wrk);
1069 }
1070
808 started = 0; 1071 started = 0;
809 1072 nreqs = 0;
810 while (reqs)
811 {
812 prv = reqs;
813 reqs = prv->next;
814 req_free (prv);
815 }
816
817 reqs = reqe = 0;
818
819 while (ress)
820 {
821 prv = ress;
822 ress = prv->next;
823 req_free (prv);
824 }
825
826 ress = rese = 0;
827 1073
828 close (respipe [0]); 1074 close (respipe [0]);
829 close (respipe [1]); 1075 close (respipe [1]);
830 create_pipe (); 1076 create_pipe ();
831 1077
832 atfork_parent (); 1078 atfork_parent ();
833} 1079}
834 1080
835#define dREQ \ 1081#define dREQ \
836 aio_req req; \ 1082 aio_req req; \
1083 int req_pri = next_pri; \
1084 next_pri = DEFAULT_PRI + PRI_BIAS; \
837 \ 1085 \
838 if (SvOK (callback) && !SvROK (callback)) \ 1086 if (SvOK (callback) && !SvROK (callback)) \
839 croak ("callback must be undef or of reference type"); \ 1087 croak ("callback must be undef or of reference type"); \
840 \ 1088 \
841 Newz (0, req, 1, aio_cb); \ 1089 Newz (0, req, 1, aio_cb); \
842 if (!req) \ 1090 if (!req) \
843 croak ("out of memory during aio_req allocation"); \ 1091 croak ("out of memory during aio_req allocation"); \
844 \ 1092 \
845 req->callback = newSVsv (callback) 1093 req->callback = newSVsv (callback); \
1094 req->pri = req_pri
846 1095
847#define REQ_SEND \ 1096#define REQ_SEND \
848 req_send (req); \ 1097 req_send (req); \
849 \ 1098 \
850 if (GIMME_V != G_VOID) \ 1099 if (GIMME_V != G_VOID) \
851 XPUSHs (req_sv (req)); 1100 XPUSHs (req_sv (req, AIO_REQ_KLASS));
852 1101
853MODULE = IO::AIO PACKAGE = IO::AIO 1102MODULE = IO::AIO PACKAGE = IO::AIO
854 1103
855PROTOTYPES: ENABLE 1104PROTOTYPES: ENABLE
856 1105
864 create_pipe (); 1113 create_pipe ();
865 pthread_atfork (atfork_prepare, atfork_parent, atfork_child); 1114 pthread_atfork (atfork_prepare, atfork_parent, atfork_child);
866} 1115}
867 1116
868void 1117void
869min_parallel (nthreads) 1118min_parallel (int nthreads)
870 int nthreads
871 PROTOTYPE: $ 1119 PROTOTYPE: $
872 1120
873void 1121void
874max_parallel (nthreads) 1122max_parallel (int nthreads)
875 int nthreads
876 PROTOTYPE: $ 1123 PROTOTYPE: $
877 1124
878int 1125int
879max_outstanding (nreqs) 1126max_outstanding (int maxreqs)
880 int nreqs 1127 PROTOTYPE: $
881 PROTOTYPE: $
882 CODE: 1128 CODE:
883 RETVAL = max_outstanding; 1129 RETVAL = max_outstanding;
884 max_outstanding = nreqs; 1130 max_outstanding = maxreqs;
1131 OUTPUT:
1132 RETVAL
885 1133
886void 1134void
887aio_open (pathname,flags,mode,callback=&PL_sv_undef) 1135aio_open (pathname,flags,mode,callback=&PL_sv_undef)
888 SV * pathname 1136 SV * pathname
889 int flags 1137 int flags
1103 req->dataptr = SvPVbyte_nolen (req->data); 1351 req->dataptr = SvPVbyte_nolen (req->data);
1104 1352
1105 REQ_SEND; 1353 REQ_SEND;
1106} 1354}
1107 1355
1108#if 0
1109
1110# undocumented, because it does not cancel active requests
1111void 1356void
1112cancel_most_requests () 1357aio_busy (delay,callback=&PL_sv_undef)
1358 double delay
1359 SV * callback
1360 PPCODE:
1361{
1362 dREQ;
1363
1364 req->type = REQ_BUSY;
1365 req->fd = delay < 0. ? 0 : delay;
1366 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd);
1367
1368 REQ_SEND;
1369}
1370
1371void
1372aio_group (callback=&PL_sv_undef)
1373 SV * callback
1113 PROTOTYPE: 1374 PROTOTYPE: ;$
1114 CODE: 1375 PPCODE:
1115{ 1376{
1116 aio_req *req; 1377 dREQ;
1117 1378
1118 pthread_mutex_lock (&reqlock); 1379 req->type = REQ_GROUP;
1119 for (req = reqs; req; req = req->next) 1380 req_send (req);
1120 req->flags |= 1;
1121 pthread_mutex_unlock (&reqlock);
1122 1381
1123 pthread_mutex_lock (&reslock); 1382 XPUSHs (req_sv (req, AIO_GRP_KLASS));
1124 for (req = ress; req; req = req->next)
1125 req->flags |= 1;
1126 pthread_mutex_unlock (&reslock);
1127} 1383}
1128 1384
1129#endif 1385void
1386aio_nop (callback=&PL_sv_undef)
1387 SV * callback
1388 PPCODE:
1389{
1390 dREQ;
1391
1392 req->type = REQ_NOP;
1393
1394 REQ_SEND;
1395}
1396
1397void
1398aioreq_pri (int pri = DEFAULT_PRI)
1399 CODE:
1400 if (pri < PRI_MIN) pri = PRI_MIN;
1401 if (pri > PRI_MAX) pri = PRI_MAX;
1402 next_pri = pri + PRI_BIAS;
1403
1404void
1405aioreq_nice (int nice = 0)
1406 CODE:
1407 nice = next_pri - nice;
1408 if (nice < PRI_MIN) nice = PRI_MIN;
1409 if (nice > PRI_MAX) nice = PRI_MAX;
1410 next_pri = nice + PRI_BIAS;
1130 1411
1131void 1412void
1132flush () 1413flush ()
1133 PROTOTYPE: 1414 PROTOTYPE:
1134 CODE: 1415 CODE:
1135 while (nreqs) 1416 while (nreqs)
1136 { 1417 {
1137 poll_wait (); 1418 poll_wait ();
1138 poll_cb (); 1419 poll_cb (0);
1139 } 1420 }
1140 1421
1141void 1422void
1142poll() 1423poll()
1143 PROTOTYPE: 1424 PROTOTYPE:
1144 CODE: 1425 CODE:
1145 if (nreqs) 1426 if (nreqs)
1146 { 1427 {
1147 poll_wait (); 1428 poll_wait ();
1148 poll_cb (); 1429 poll_cb (0);
1149 } 1430 }
1150 1431
1151int 1432int
1152poll_fileno() 1433poll_fileno()
1153 PROTOTYPE: 1434 PROTOTYPE:
1158 1439
1159int 1440int
1160poll_cb(...) 1441poll_cb(...)
1161 PROTOTYPE: 1442 PROTOTYPE:
1162 CODE: 1443 CODE:
1163 RETVAL = poll_cb (); 1444 RETVAL = poll_cb (0);
1445 OUTPUT:
1446 RETVAL
1447
1448int
1449poll_some(int max = 0)
1450 PROTOTYPE: $
1451 CODE:
1452 RETVAL = poll_cb (max);
1164 OUTPUT: 1453 OUTPUT:
1165 RETVAL 1454 RETVAL
1166 1455
1167void 1456void
1168poll_wait() 1457poll_wait()
1177 CODE: 1466 CODE:
1178 RETVAL = nreqs; 1467 RETVAL = nreqs;
1179 OUTPUT: 1468 OUTPUT:
1180 RETVAL 1469 RETVAL
1181 1470
1471PROTOTYPES: DISABLE
1472
1182MODULE = IO::AIO PACKAGE = IO::AIO::CB 1473MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1183 1474
1184void 1475void
1185cancel (aio_req_ornot req) 1476cancel (aio_req_ornot req)
1186 PROTOTYPE:
1187 CODE: 1477 CODE:
1188 req->cancelled = 1; 1478 req_cancel (req);
1189 1479
1480void
1481cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1482 CODE:
1483 SvREFCNT_dec (req->callback);
1484 req->callback = newSVsv (callback);
1485
1486MODULE = IO::AIO PACKAGE = IO::AIO::GRP
1487
1488void
1489add (aio_req grp, ...)
1490 PPCODE:
1491{
1492 int i;
1493 aio_req req;
1494
1495 if (grp->fd == 2)
1496 croak ("cannot add requests to IO::AIO::GRP after the group finished");
1497
1498 for (i = 1; i < items; ++i )
1499 {
1500 if (GIMME_V != G_VOID)
1501 XPUSHs (sv_2mortal (newSVsv (ST (i))));
1502
1503 req = SvAIO_REQ (ST (i));
1504
1505 if (req)
1506 {
1507 ++grp->length;
1508 req->grp = grp;
1509
1510 req->grp_prev = 0;
1511 req->grp_next = grp->grp_first;
1512
1513 if (grp->grp_first)
1514 grp->grp_first->grp_prev = req;
1515
1516 grp->grp_first = req;
1517 }
1518 }
1519}
1520
1521void
1522cancel_subs (aio_req_ornot req)
1523 CODE:
1524 req_cancel_subs (req);
1525
1526void
1527result (aio_req grp, ...)
1528 CODE:
1529{
1530 int i;
1531 AV *av = newAV ();
1532
1533 for (i = 1; i < items; ++i )
1534 av_push (av, newSVsv (ST (i)));
1535
1536 SvREFCNT_dec (grp->data);
1537 grp->data = (SV *)av;
1538}
1539
1540void
1541limit (aio_req grp, int limit)
1542 CODE:
1543 grp->fd2 = limit;
1544 aio_grp_feed (grp);
1545
1546void
1547feed (aio_req grp, SV *callback=&PL_sv_undef)
1548 CODE:
1549{
1550 SvREFCNT_dec (grp->fh2);
1551 grp->fh2 = newSVsv (callback);
1552
1553 if (grp->fd2 <= 0)
1554 grp->fd2 = 2;
1555
1556 aio_grp_feed (grp);
1557}
1558

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines