ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
(Generate patch)

Comparing IO-AIO/AIO.xs (file contents):
Revision 1.48 by root, Sun Oct 22 10:33:19 2006 UTC vs.
Revision 1.69 by root, Tue Oct 24 11:57:30 2006 UTC

1#if __linux
2# define _GNU_SOURCE
3#endif
4
1#define _REENTRANT 1 5#define _REENTRANT 1
6
2#include <errno.h> 7#include <errno.h>
3 8
4#include "EXTERN.h" 9#include "EXTERN.h"
5#include "perl.h" 10#include "perl.h"
6#include "XSUB.h" 11#include "XSUB.h"
41# define NAME_MAX 4096 46# define NAME_MAX 4096
42#endif 47#endif
43 48
44#if __ia64 49#if __ia64
45# define STACKSIZE 65536 50# define STACKSIZE 65536
51#elif __i386 || __x86_64 /* 16k is unreasonably high :( */
52# define STACKSIZE PTHREAD_STACK_MIN
46#else 53#else
47# define STACKSIZE 8192 54# define STACKSIZE 16384
48#endif 55#endif
56
57/* buffer size for various temporary buffers */
58#define AIO_BUFSIZE 65536
59
60#define dBUF \
61 char *aio_buf = malloc (AIO_BUFSIZE); \
62 if (!aio_buf) \
63 return -1;
64
65#define fBUF free (aio_buf)
49 66
50enum { 67enum {
51 REQ_QUIT, 68 REQ_QUIT,
52 REQ_OPEN, REQ_CLOSE, 69 REQ_OPEN, REQ_CLOSE,
53 REQ_READ, REQ_WRITE, REQ_READAHEAD, 70 REQ_READ, REQ_WRITE, REQ_READAHEAD,
55 REQ_STAT, REQ_LSTAT, REQ_FSTAT, 72 REQ_STAT, REQ_LSTAT, REQ_FSTAT,
56 REQ_FSYNC, REQ_FDATASYNC, 73 REQ_FSYNC, REQ_FDATASYNC,
57 REQ_UNLINK, REQ_RMDIR, REQ_RENAME, 74 REQ_UNLINK, REQ_RMDIR, REQ_RENAME,
58 REQ_READDIR, 75 REQ_READDIR,
59 REQ_LINK, REQ_SYMLINK, 76 REQ_LINK, REQ_SYMLINK,
60 REQ_SLEEP, 77 REQ_GROUP, REQ_NOP,
61 REQ_GROUP, 78 REQ_BUSY,
62}; 79};
63 80
64#define AIO_REQ_KLASS "IO::AIO::REQ" 81#define AIO_REQ_KLASS "IO::AIO::REQ"
65#define AIO_GRP_KLASS "IO::AIO::GRP" 82#define AIO_GRP_KLASS "IO::AIO::GRP"
66 83
67typedef struct aio_cb 84typedef struct aio_cb
68{ 85{
69 struct aio_cb *grp, *grp_prev, *grp_next;
70
71 struct aio_cb *volatile next; 86 struct aio_cb *volatile next;
72
73 SV *self; /* the perl counterpart of this request, if any */
74 87
75 SV *data, *callback; 88 SV *data, *callback;
76 SV *fh, *fh2; 89 SV *fh, *fh2;
77 void *dataptr, *data2ptr; 90 void *dataptr, *data2ptr;
78 Stat_t *statdata; 91 Stat_t *statdata;
79 off_t offset; 92 off_t offset;
80 size_t length; 93 size_t length;
81 ssize_t result; 94 ssize_t result;
82 95
96 STRLEN dataoffset;
83 int type; 97 int type;
84 int fd, fd2; 98 int fd, fd2;
85 int errorno; 99 int errorno;
86 STRLEN dataoffset;
87 mode_t mode; /* open */ 100 mode_t mode; /* open */
101
88 unsigned char cancelled; 102 unsigned char flags;
103 unsigned char pri;
104
105 SV *self; /* the perl counterpart of this request, if any */
106 struct aio_cb *grp, *grp_prev, *grp_next, *grp_first;
89} aio_cb; 107} aio_cb;
108
109enum {
110 FLAG_CANCELLED = 0x01,
111};
90 112
91typedef aio_cb *aio_req; 113typedef aio_cb *aio_req;
92typedef aio_cb *aio_req_ornot; 114typedef aio_cb *aio_req_ornot;
115
116enum {
117 PRI_MIN = -4,
118 PRI_MAX = 4,
119
120 DEFAULT_PRI = 0,
121 PRI_BIAS = -PRI_MIN,
122 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
123};
124
125static int next_pri = DEFAULT_PRI + PRI_BIAS;
93 126
94static int started, wanted; 127static int started, wanted;
95static volatile int nreqs; 128static volatile int nreqs;
96static int max_outstanding = 1<<30; 129static int max_outstanding = 1<<30;
97static int respipe [2]; 130static int respipe [2];
98 131
132#if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)
133# define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
134#else
135# define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
136#endif
137
99static pthread_mutex_t reslock = PTHREAD_MUTEX_INITIALIZER; 138static pthread_mutex_t reslock = AIO_MUTEX_INIT;
100static pthread_mutex_t reqlock = PTHREAD_MUTEX_INITIALIZER; 139static pthread_mutex_t reqlock = AIO_MUTEX_INIT;
101static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER; 140static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER;
102 141
103static volatile aio_req reqs, reqe; /* queue start, queue end */ 142/*
104static volatile aio_req ress, rese; /* queue start, queue end */ 143 * a somewhat faster data structure might be nice, but
144 * with 8 priorities this actually needs <20 insns
145 * per shift, the most expensive operation.
146 */
147typedef struct {
148 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
149 int size;
150} reqq;
105 151
152static reqq req_queue;
153static reqq res_queue;
154
155int reqq_push (reqq *q, aio_req req)
156{
157 int pri = req->pri;
158 req->next = 0;
159
160 if (q->qe[pri])
161 {
162 q->qe[pri]->next = req;
163 q->qe[pri] = req;
164 }
165 else
166 q->qe[pri] = q->qs[pri] = req;
167
168 return q->size++;
169}
170
171aio_req reqq_shift (reqq *q)
172{
173 int pri;
174
175 if (!q->size)
176 return 0;
177
178 --q->size;
179
180 for (pri = NUM_PRI; pri--; )
181 {
182 aio_req req = q->qs[pri];
183
184 if (req)
185 {
186 if (!(q->qs[pri] = req->next))
187 q->qe[pri] = 0;
188
189 return req;
190 }
191 }
192
193 abort ();
194}
195
196static void req_invoke (aio_req req);
106static void req_free (aio_req req); 197static void req_free (aio_req req);
107 198
108/* must be called at most once */ 199/* must be called at most once */
109static SV *req_sv (aio_req req, const char *klass) 200static SV *req_sv (aio_req req, const char *klass)
110{ 201{
202 if (!req->self)
203 {
111 req->self = (SV *)newHV (); 204 req->self = (SV *)newHV ();
112 sv_magic (req->self, 0, PERL_MAGIC_ext, (char *)req, 0); 205 sv_magic (req->self, 0, PERL_MAGIC_ext, (char *)req, 0);
206 }
113 207
114 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1))); 208 return sv_2mortal (sv_bless (newRV_inc (req->self), gv_stashpv (klass, 1)));
115} 209}
116 210
117static aio_req SvAIO_REQ (SV *sv) 211static aio_req SvAIO_REQ (SV *sv)
118{ 212{
213 MAGIC *mg;
214
119 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv)) 215 if (!sv_derived_from (sv, AIO_REQ_KLASS) || !SvROK (sv))
120 croak ("object of class " AIO_REQ_KLASS " expected"); 216 croak ("object of class " AIO_REQ_KLASS " expected");
121 217
122 MAGIC *mg = mg_find (SvRV (sv), PERL_MAGIC_ext); 218 mg = mg_find (SvRV (sv), PERL_MAGIC_ext);
123 219
124 return mg ? (aio_req)mg->mg_ptr : 0; 220 return mg ? (aio_req)mg->mg_ptr : 0;
125} 221}
126 222
223static void aio_grp_feed (aio_req grp)
224{
225 while (grp->length < grp->fd2 && !(grp->flags & FLAG_CANCELLED))
226 {
227 int old_len = grp->length;
228
229 if (grp->fh2 && SvOK (grp->fh2))
230 {
231 dSP;
232
233 ENTER;
234 SAVETMPS;
235 PUSHMARK (SP);
236 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
237 PUTBACK;
238 call_sv (grp->fh2, G_VOID | G_EVAL | G_KEEPERR);
239 SPAGAIN;
240 FREETMPS;
241 LEAVE;
242 }
243
244 /* stop if no progress has been made */
245 if (old_len == grp->length)
246 {
247 SvREFCNT_dec (grp->fh2);
248 grp->fh2 = 0;
249 break;
250 }
251 }
252}
253
254static void aio_grp_dec (aio_req grp)
255{
256 --grp->length;
257
258 /* call feeder, if applicable */
259 aio_grp_feed (grp);
260
261 /* finish, if done */
262 if (!grp->length && grp->fd)
263 {
264 req_invoke (grp);
265 req_free (grp);
266 }
267}
268
127static void poll_wait () 269static void poll_wait ()
128{ 270{
129 if (nreqs && !ress)
130 {
131 fd_set rfd; 271 fd_set rfd;
272
273 while (nreqs)
274 {
275 int size;
276#if !(__i386 || __x86_64) /* safe without sempahore on this archs */
277 pthread_mutex_lock (&reslock);
278#endif
279 size = res_queue.size;
280#if !(__i386 || __x86_64) /* safe without sempahore on this archs */
281 pthread_mutex_unlock (&reslock);
282#endif
283
284 if (size)
285 return;
286
132 FD_ZERO(&rfd); 287 FD_ZERO(&rfd);
133 FD_SET(respipe [0], &rfd); 288 FD_SET(respipe [0], &rfd);
134 289
135 select (respipe [0] + 1, &rfd, 0, 0, 0); 290 select (respipe [0] + 1, &rfd, 0, 0, 0);
136 } 291 }
137} 292}
138 293
139static void req_invoke (aio_req req) 294static void req_invoke (aio_req req)
140{ 295{
141 dSP; 296 dSP;
142 int errorno = errno;
143 297
144 if (req->cancelled || !SvOK (req->callback)) 298 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
145 return; 299 {
146
147 errno = req->errorno; 300 errno = req->errorno;
148 301
149 ENTER; 302 ENTER;
303 SAVETMPS;
150 PUSHMARK (SP); 304 PUSHMARK (SP);
151 EXTEND (SP, 1); 305 EXTEND (SP, 1);
152 306
153 switch (req->type) 307 switch (req->type)
154 {
155 case REQ_READDIR:
156 { 308 {
157 SV *rv = &PL_sv_undef; 309 case REQ_READDIR:
158
159 if (req->result >= 0)
160 { 310 {
161 char *buf = req->data2ptr; 311 SV *rv = &PL_sv_undef;
162 AV *av = newAV ();
163 312
164 while (req->result) 313 if (req->result >= 0)
165 { 314 {
315 char *buf = req->data2ptr;
316 AV *av = newAV ();
317
318 while (req->result)
319 {
166 SV *sv = newSVpv (buf, 0); 320 SV *sv = newSVpv (buf, 0);
167 321
168 av_push (av, sv); 322 av_push (av, sv);
169 buf += SvCUR (sv) + 1; 323 buf += SvCUR (sv) + 1;
170 req->result--; 324 req->result--;
325 }
326
327 rv = sv_2mortal (newRV_noinc ((SV *)av));
171 } 328 }
172 329
173 rv = sv_2mortal (newRV_noinc ((SV *)av)); 330 PUSHs (rv);
174 } 331 }
332 break;
175 333
176 PUSHs (rv); 334 case REQ_OPEN:
335 {
336 /* convert fd to fh */
337 SV *fh;
338
339 PUSHs (sv_2mortal (newSViv (req->result)));
340 PUTBACK;
341 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
342 SPAGAIN;
343
344 fh = SvREFCNT_inc (POPs);
345
346 PUSHMARK (SP);
347 XPUSHs (sv_2mortal (fh));
348 }
349 break;
350
351 case REQ_GROUP:
352 req->fd = 2; /* mark group as finished */
353
354 if (req->data)
355 {
356 int i;
357 AV *av = (AV *)req->data;
358
359 EXTEND (SP, AvFILL (av) + 1);
360 for (i = 0; i <= AvFILL (av); ++i)
361 PUSHs (*av_fetch (av, i, 0));
362 }
363 break;
364
365 case REQ_NOP:
366 case REQ_BUSY:
367 break;
368
369 default:
370 PUSHs (sv_2mortal (newSViv (req->result)));
371 break;
177 } 372 }
178 break;
179 373
180 case REQ_OPEN:
181 {
182 /* convert fd to fh */
183 SV *fh;
184 374
185 PUSHs (sv_2mortal (newSViv (req->result)));
186 PUTBACK; 375 PUTBACK;
187 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
188 SPAGAIN;
189
190 fh = SvREFCNT_inc (POPs);
191
192 PUSHMARK (SP);
193 XPUSHs (sv_2mortal (fh));
194 }
195 break;
196
197 case REQ_GROUP:
198 if (req->data)
199 {
200 int i;
201 AV *av = (AV *)req->data;
202
203 EXTEND (SP, AvFILL (av) + 1);
204 for (i = 0; i <= AvFILL (av); ++i)
205 PUSHs (*av_fetch (av, i, 0));
206 }
207 break;
208
209 case REQ_SLEEP:
210 break;
211
212 default:
213 PUSHs (sv_2mortal (newSViv (req->result)));
214 break;
215 }
216
217
218 PUTBACK;
219 call_sv (req->callback, G_VOID | G_EVAL); 376 call_sv (req->callback, G_VOID | G_EVAL);
220 SPAGAIN; 377 SPAGAIN;
378
379 FREETMPS;
380 LEAVE;
381 }
382
383 if (req->grp)
384 {
385 aio_req grp = req->grp;
386
387 /* unlink request */
388 if (req->grp_next) req->grp_next->grp_prev = req->grp_prev;
389 if (req->grp_prev) req->grp_prev->grp_next = req->grp_next;
390
391 if (grp->grp_first == req)
392 grp->grp_first = req->grp_next;
393
394 aio_grp_dec (grp);
395 }
221 396
222 if (SvTRUE (ERRSV)) 397 if (SvTRUE (ERRSV))
223 { 398 {
224 req_free (req); 399 req_free (req);
225 croak (0); 400 croak (0);
226 } 401 }
227
228 LEAVE;
229
230 errno = errorno;
231} 402}
232 403
233static void req_free (aio_req req) 404static void req_free (aio_req req)
234{ 405{
235 if (req->grp)
236 {
237 aio_req grp = req->grp;
238
239 /* unlink request */
240 req->grp_next->grp_prev = req->grp_prev;
241 req->grp_prev->grp_next = req->grp_next;
242
243 if (grp->grp_next == grp && grp->fd)
244 {
245 req_invoke (grp);
246 req_free (grp);
247 }
248 }
249
250 if (req->self) 406 if (req->self)
251 { 407 {
252 sv_unmagic (req->self, PERL_MAGIC_ext); 408 sv_unmagic (req->self, PERL_MAGIC_ext);
253 SvREFCNT_dec (req->self); 409 SvREFCNT_dec (req->self);
254 } 410 }
255 411
256 if (req->data)
257 SvREFCNT_dec (req->data); 412 SvREFCNT_dec (req->data);
258
259 if (req->fh)
260 SvREFCNT_dec (req->fh); 413 SvREFCNT_dec (req->fh);
261
262 if (req->fh2)
263 SvREFCNT_dec (req->fh2); 414 SvREFCNT_dec (req->fh2);
264
265 if (req->statdata)
266 Safefree (req->statdata);
267
268 if (req->callback)
269 SvREFCNT_dec (req->callback); 415 SvREFCNT_dec (req->callback);
416 Safefree (req->statdata);
270 417
271 if (req->type == REQ_READDIR && req->result >= 0) 418 if (req->type == REQ_READDIR && req->result >= 0)
272 free (req->data2ptr); 419 free (req->data2ptr);
273 420
274 Safefree (req); 421 Safefree (req);
275} 422}
276 423
277static void req_cancel (aio_req req) 424static void req_cancel (aio_req req)
278{ 425{
279 req->cancelled = 1; 426 req->flags |= FLAG_CANCELLED;
280 427
281 if (req->type == REQ_GROUP) 428 if (req->type == REQ_GROUP)
282 { 429 {
283 aio_req sub; 430 aio_req sub;
284 431
285 for (sub = req->grp_next; sub != req; sub = sub->grp_next) 432 for (sub = req->grp_first; sub; sub = sub->grp_next)
286 req_cancel (sub); 433 req_cancel (sub);
287 } 434 }
288} 435}
289 436
290static int poll_cb () 437static int poll_cb ()
295 aio_req req; 442 aio_req req;
296 443
297 for (;;) 444 for (;;)
298 { 445 {
299 pthread_mutex_lock (&reslock); 446 pthread_mutex_lock (&reslock);
300 req = ress; 447 req = reqq_shift (&res_queue);
301 448
302 if (req) 449 if (req)
303 { 450 {
304 ress = req->next;
305
306 if (!ress) 451 if (!res_queue.size)
307 { 452 {
308 /* read any signals sent by the worker threads */ 453 /* read any signals sent by the worker threads */
309 char buf [32]; 454 char buf [32];
310 while (read (respipe [0], buf, 32) == 32) 455 while (read (respipe [0], buf, 32) == 32)
311 ; 456 ;
312
313 rese = 0;
314 } 457 }
315 } 458 }
316 459
317 pthread_mutex_unlock (&reslock); 460 pthread_mutex_unlock (&reslock);
318 461
319 if (!req) 462 if (!req)
320 break; 463 break;
321 464
322 nreqs--; 465 --nreqs;
323 466
324 if (req->type == REQ_QUIT) 467 if (req->type == REQ_QUIT)
325 started--; 468 started--;
326 else if (req->type == REQ_GROUP && req->grp_next != req) 469 else if (req->type == REQ_GROUP && req->length)
327 { 470 {
328 req->fd = 1; /* mark request as delayed */ 471 req->fd = 1; /* mark request as delayed */
329 continue; 472 continue;
330 } 473 }
331 else 474 else
378static void req_send (aio_req req) 521static void req_send (aio_req req)
379{ 522{
380 while (started < wanted && nreqs >= started) 523 while (started < wanted && nreqs >= started)
381 start_thread (); 524 start_thread ();
382 525
383 nreqs++; 526 ++nreqs;
384 527
385 pthread_mutex_lock (&reqlock); 528 pthread_mutex_lock (&reqlock);
386 529 reqq_push (&req_queue, req);
387 req->next = 0;
388
389 if (reqe)
390 {
391 reqe->next = req;
392 reqe = req;
393 }
394 else
395 reqe = reqs = req;
396
397 pthread_cond_signal (&reqwait); 530 pthread_cond_signal (&reqwait);
398 pthread_mutex_unlock (&reqlock); 531 pthread_mutex_unlock (&reqlock);
399 532
400 if (nreqs > max_outstanding) 533 if (nreqs > max_outstanding)
401 for (;;) 534 for (;;)
410} 543}
411 544
412static void end_thread (void) 545static void end_thread (void)
413{ 546{
414 aio_req req; 547 aio_req req;
548
415 Newz (0, req, 1, aio_cb); 549 Newz (0, req, 1, aio_cb);
550
416 req->type = REQ_QUIT; 551 req->type = REQ_QUIT;
552 req->pri = PRI_MAX + PRI_BIAS;
417 553
418 req_send (req); 554 req_send (req);
419} 555}
420 556
421static void min_parallel (int nthreads) 557static void min_parallel (int nthreads)
508#if !HAVE_READAHEAD 644#if !HAVE_READAHEAD
509# define readahead aio_readahead 645# define readahead aio_readahead
510 646
511static ssize_t readahead (int fd, off_t offset, size_t count) 647static ssize_t readahead (int fd, off_t offset, size_t count)
512{ 648{
513 char readahead_buf[4096]; 649 dBUF;
514 650
515 while (count > 0) 651 while (count > 0)
516 { 652 {
517 size_t len = count < sizeof (readahead_buf) ? count : sizeof (readahead_buf); 653 size_t len = count < AIO_BUFSIZE ? count : AIO_BUFSIZE;
518 654
519 pread (fd, readahead_buf, len, offset); 655 pread (fd, aio_buf, len, offset);
520 offset += len; 656 offset += len;
521 count -= len; 657 count -= len;
522 } 658 }
659
660 fBUF;
523 661
524 errno = 0; 662 errno = 0;
525} 663}
526#endif 664#endif
527 665
613#endif 751#endif
614 ) 752 )
615 ) 753 )
616 { 754 {
617 /* emulate sendfile. this is a major pain in the ass */ 755 /* emulate sendfile. this is a major pain in the ass */
618 char buf[4096]; 756 dBUF;
757
619 res = 0; 758 res = 0;
620 759
621 while (count) 760 while (count)
622 { 761 {
623 ssize_t cnt; 762 ssize_t cnt;
624 763
625 cnt = pread (ifd, buf, count > 4096 ? 4096 : count, offset); 764 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
626 765
627 if (cnt <= 0) 766 if (cnt <= 0)
628 { 767 {
629 if (cnt && !res) res = -1; 768 if (cnt && !res) res = -1;
630 break; 769 break;
631 } 770 }
632 771
633 cnt = write (ofd, buf, cnt); 772 cnt = write (ofd, aio_buf, cnt);
634 773
635 if (cnt <= 0) 774 if (cnt <= 0)
636 { 775 {
637 if (cnt && !res) res = -1; 776 if (cnt && !res) res = -1;
638 break; 777 break;
640 779
641 offset += cnt; 780 offset += cnt;
642 res += cnt; 781 res += cnt;
643 count -= cnt; 782 count -= cnt;
644 } 783 }
784
785 fBUF;
645 } 786 }
646 787
647 return res; 788 return res;
648} 789}
649 790
650/* read a full directory */ 791/* read a full directory */
651static int scandir_ (const char *path, void **namesp) 792static int scandir_ (const char *path, void **namesp)
652{ 793{
653 DIR *dirp = opendir (path); 794 DIR *dirp;
654 union 795 union
655 { 796 {
656 struct dirent d; 797 struct dirent d;
657 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1]; 798 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1];
658 } u; 799 } *u;
659 struct dirent *entp; 800 struct dirent *entp;
660 char *name, *names; 801 char *name, *names;
661 int memlen = 4096; 802 int memlen = 4096;
662 int memofs = 0; 803 int memofs = 0;
663 int res = 0; 804 int res = 0;
664 int errorno; 805 int errorno;
665 806
807 dirp = opendir (path);
666 if (!dirp) 808 if (!dirp)
667 return -1; 809 return -1;
668 810
811 u = malloc (sizeof (*u));
669 names = malloc (memlen); 812 names = malloc (memlen);
670 813
814 if (u && names)
671 for (;;) 815 for (;;)
672 { 816 {
817 errno = 0;
673 errno = 0, readdir_r (dirp, &u.d, &entp); 818 readdir_r (dirp, &u->d, &entp);
674 819
675 if (!entp) 820 if (!entp)
676 break; 821 break;
677 822
678 name = entp->d_name; 823 name = entp->d_name;
679 824
680 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2]))) 825 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
681 { 826 {
682 int len = strlen (name) + 1; 827 int len = strlen (name) + 1;
683 828
684 res++; 829 res++;
685 830
686 while (memofs + len > memlen) 831 while (memofs + len > memlen)
687 { 832 {
688 memlen *= 2; 833 memlen *= 2;
689 names = realloc (names, memlen); 834 names = realloc (names, memlen);
690 if (!names) 835 if (!names)
691 break; 836 break;
692 } 837 }
693 838
694 memcpy (names + memofs, name, len); 839 memcpy (names + memofs, name, len);
695 memofs += len; 840 memofs += len;
696 } 841 }
697 } 842 }
698 843
699 errorno = errno; 844 errorno = errno;
845 free (u);
700 closedir (dirp); 846 closedir (dirp);
701 847
702 if (errorno) 848 if (errorno)
703 { 849 {
704 free (names); 850 free (names);
721 { 867 {
722 pthread_mutex_lock (&reqlock); 868 pthread_mutex_lock (&reqlock);
723 869
724 for (;;) 870 for (;;)
725 { 871 {
726 req = reqs; 872 req = reqq_shift (&req_queue);
727
728 if (reqs)
729 {
730 reqs = reqs->next;
731 if (!reqs) reqe = 0;
732 }
733 873
734 if (req) 874 if (req)
735 break; 875 break;
736 876
737 pthread_cond_wait (&reqwait, &reqlock); 877 pthread_cond_wait (&reqwait, &reqlock);
738 } 878 }
739 879
740 pthread_mutex_unlock (&reqlock); 880 pthread_mutex_unlock (&reqlock);
741 881
742 errno = 0; /* strictly unnecessary */ 882 errno = 0; /* strictly unnecessary */
883 type = req->type; /* remember type for QUIT check */
743 884
744 if (!req->cancelled) 885 if (!(req->flags & FLAG_CANCELLED))
745 switch (req->type) 886 switch (type)
746 { 887 {
747 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break; 888 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break;
748 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break; 889 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break;
749 890
750 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break; 891 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break;
764 905
765 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break; 906 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break;
766 case REQ_FSYNC: req->result = fsync (req->fd); break; 907 case REQ_FSYNC: req->result = fsync (req->fd); break;
767 case REQ_READDIR: req->result = scandir_ (req->dataptr, &req->data2ptr); break; 908 case REQ_READDIR: req->result = scandir_ (req->dataptr, &req->data2ptr); break;
768 909
769 case REQ_SLEEP: 910 case REQ_BUSY:
770 { 911 {
771 struct timeval tv; 912 struct timeval tv;
772 913
773 tv.tv_sec = req->fd; 914 tv.tv_sec = req->fd;
774 tv.tv_usec = req->fd2; 915 tv.tv_usec = req->fd2;
775 916
776 req->result = select (0, 0, 0, 0, &tv); 917 req->result = select (0, 0, 0, 0, &tv);
777 } 918 }
778 919
920 case REQ_GROUP:
921 case REQ_NOP:
779 case REQ_QUIT: 922 case REQ_QUIT:
780 break; 923 break;
781 924
782 default: 925 default:
783 req->result = ENOSYS; 926 req->result = ENOSYS;
786 929
787 req->errorno = errno; 930 req->errorno = errno;
788 931
789 pthread_mutex_lock (&reslock); 932 pthread_mutex_lock (&reslock);
790 933
791 req->next = 0; 934 if (!reqq_push (&res_queue, req))
792
793 if (rese)
794 {
795 rese->next = req;
796 rese = req;
797 }
798 else
799 {
800 rese = ress = req;
801
802 /* write a dummy byte to the pipe so fh becomes ready */ 935 /* write a dummy byte to the pipe so fh becomes ready */
803 write (respipe [1], &respipe, 1); 936 write (respipe [1], &respipe, 1);
804 }
805 937
806 pthread_mutex_unlock (&reslock); 938 pthread_mutex_unlock (&reslock);
807 } 939 }
808 while (type != REQ_QUIT); 940 while (type != REQ_QUIT);
809 941
840{ 972{
841 aio_req prv; 973 aio_req prv;
842 974
843 started = 0; 975 started = 0;
844 976
845 while (reqs) 977 while (prv = reqq_shift (&req_queue))
846 {
847 prv = reqs;
848 reqs = prv->next;
849 req_free (prv); 978 req_free (prv);
850 }
851 979
852 reqs = reqe = 0; 980 while (prv = reqq_shift (&res_queue))
853
854 while (ress)
855 {
856 prv = ress;
857 ress = prv->next;
858 req_free (prv); 981 req_free (prv);
859 } 982
860
861 ress = rese = 0;
862
863 close (respipe [0]); 983 close (respipe [0]);
864 close (respipe [1]); 984 close (respipe [1]);
865 create_pipe (); 985 create_pipe ();
866 986
867 atfork_parent (); 987 atfork_parent ();
868} 988}
869 989
870#define dREQ \ 990#define dREQ \
871 aio_req req; \ 991 aio_req req; \
992 int req_pri = next_pri; \
993 next_pri = DEFAULT_PRI + PRI_BIAS; \
872 \ 994 \
873 if (SvOK (callback) && !SvROK (callback)) \ 995 if (SvOK (callback) && !SvROK (callback)) \
874 croak ("callback must be undef or of reference type"); \ 996 croak ("callback must be undef or of reference type"); \
875 \ 997 \
876 Newz (0, req, 1, aio_cb); \ 998 Newz (0, req, 1, aio_cb); \
877 if (!req) \ 999 if (!req) \
878 croak ("out of memory during aio_req allocation"); \ 1000 croak ("out of memory during aio_req allocation"); \
879 \ 1001 \
880 req->callback = newSVsv (callback) 1002 req->callback = newSVsv (callback); \
1003 req->pri = req_pri
881 1004
882#define REQ_SEND \ 1005#define REQ_SEND \
883 req_send (req); \ 1006 req_send (req); \
884 \ 1007 \
885 if (GIMME_V != G_VOID) \ 1008 if (GIMME_V != G_VOID) \
1139 1262
1140 REQ_SEND; 1263 REQ_SEND;
1141} 1264}
1142 1265
1143void 1266void
1144aio_sleep (delay,callback=&PL_sv_undef) 1267aio_busy (delay,callback=&PL_sv_undef)
1145 double delay 1268 double delay
1146 SV * callback 1269 SV * callback
1147 PPCODE: 1270 PPCODE:
1148{ 1271{
1149 dREQ; 1272 dREQ;
1150 1273
1151 req->type = REQ_SLEEP; 1274 req->type = REQ_BUSY;
1152 req->fd = delay < 0. ? 0 : delay; 1275 req->fd = delay < 0. ? 0 : delay;
1153 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd); 1276 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd);
1154 1277
1155 REQ_SEND; 1278 REQ_SEND;
1156} 1279}
1160 SV * callback 1283 SV * callback
1161 PROTOTYPE: ;$ 1284 PROTOTYPE: ;$
1162 PPCODE: 1285 PPCODE:
1163{ 1286{
1164 dREQ; 1287 dREQ;
1288
1165 req->type = REQ_GROUP; 1289 req->type = REQ_GROUP;
1166 req->grp_next = req;
1167 req->grp_prev = req;
1168
1169 req_send (req); 1290 req_send (req);
1291
1170 XPUSHs (req_sv (req, AIO_GRP_KLASS)); 1292 XPUSHs (req_sv (req, AIO_GRP_KLASS));
1171} 1293}
1294
1295void
1296aio_nop (callback=&PL_sv_undef)
1297 SV * callback
1298 PPCODE:
1299{
1300 dREQ;
1301
1302 req->type = REQ_NOP;
1303
1304 REQ_SEND;
1305}
1306
1307void
1308aioreq_pri (int pri = DEFAULT_PRI)
1309 CODE:
1310 if (pri < PRI_MIN) pri = PRI_MIN;
1311 if (pri > PRI_MAX) pri = PRI_MAX;
1312 next_pri = pri + PRI_BIAS;
1313
1314void
1315aioreq_nice (int nice = 0)
1316 CODE:
1317 nice = next_pri - nice;
1318 if (nice < PRI_MIN) nice = PRI_MIN;
1319 if (nice > PRI_MAX) nice = PRI_MAX;
1320 next_pri = nice + PRI_BIAS;
1172 1321
1173void 1322void
1174flush () 1323flush ()
1175 PROTOTYPE: 1324 PROTOTYPE:
1176 CODE: 1325 CODE:
1225 1374
1226MODULE = IO::AIO PACKAGE = IO::AIO::REQ 1375MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1227 1376
1228void 1377void
1229cancel (aio_req_ornot req) 1378cancel (aio_req_ornot req)
1230 PROTOTYPE:
1231 CODE: 1379 CODE:
1232 req_cancel (req); 1380 req_cancel (req);
1233 1381
1382void
1383cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1384 CODE:
1385 SvREFCNT_dec (req->callback);
1386 req->callback = newSVsv (callback);
1387
1234MODULE = IO::AIO PACKAGE = IO::AIO::GRP 1388MODULE = IO::AIO PACKAGE = IO::AIO::GRP
1235 1389
1236void 1390void
1237add (aio_req grp, ...) 1391add (aio_req grp, ...)
1238 PPCODE: 1392 PPCODE:
1239{ 1393{
1240 int i; 1394 int i;
1395 aio_req req;
1396
1397 if (grp->fd == 2)
1398 croak ("cannot add requests to IO::AIO::GRP after the group finished");
1241 1399
1242 for (i = 1; i < items; ++i ) 1400 for (i = 1; i < items; ++i )
1243 { 1401 {
1244 if (GIMME_V != G_VOID) 1402 if (GIMME_V != G_VOID)
1245 XPUSHs (sv_2mortal (newSVsv (ST (i)))); 1403 XPUSHs (sv_2mortal (newSVsv (ST (i))));
1246 1404
1247 aio_req req = SvAIO_REQ (ST (i)); 1405 req = SvAIO_REQ (ST (i));
1248 1406
1249 if (req) 1407 if (req)
1250 { 1408 {
1251 req->grp_prev = grp; 1409 ++grp->length;
1252 req->grp_next = grp->grp_next;
1253 grp->grp_next->grp_prev = req;
1254 grp->grp_next = req;
1255
1256 req->grp = grp; 1410 req->grp = grp;
1411
1412 req->grp_prev = 0;
1413 req->grp_next = grp->grp_first;
1414
1415 if (grp->grp_first)
1416 grp->grp_first->grp_prev = req;
1417
1418 grp->grp_first = req;
1257 } 1419 }
1258 } 1420 }
1259} 1421}
1260 1422
1261void 1423void
1262result (aio_req grp, ...) 1424result (aio_req grp, ...)
1425 CODE:
1426{
1427 int i;
1428 AV *av = newAV ();
1429
1430 for (i = 1; i < items; ++i )
1431 av_push (av, newSVsv (ST (i)));
1432
1433 SvREFCNT_dec (grp->data);
1434 grp->data = (SV *)av;
1435}
1436
1437void
1438limit (aio_req grp, int limit)
1263 CODE: 1439 CODE:
1264{ 1440 grp->fd2 = limit;
1265 int i; 1441 aio_grp_feed (grp);
1266 AV *av = newAV ();
1267 1442
1268 if (grp->data) 1443void
1444feed (aio_req grp, SV *callback=&PL_sv_undef)
1445 CODE:
1446{
1269 SvREFCNT_dec (grp->data); 1447 SvREFCNT_dec (grp->fh2);
1448 grp->fh2 = newSVsv (callback);
1270 1449
1271 for (i = 1; i < items; ++i ) 1450 if (grp->fd2 <= 0)
1272 av_push (av, newSVsv (ST (i))); 1451 grp->fd2 = 2;
1273 1452
1274 grp->data = (SV *)av; 1453 aio_grp_feed (grp);
1275} 1454}
1276 1455

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines