ViewVC Help
View File | Revision Log | Show Annotations | Download File
/cvs/IO-AIO/AIO.xs
(Generate patch)

Comparing IO-AIO/AIO.xs (file contents):
Revision 1.63 by root, Mon Oct 23 23:48:31 2006 UTC vs.
Revision 1.76 by root, Thu Oct 26 12:38:04 2006 UTC

1#if __linux 1/* solaris */
2#define _POSIX_PTHREAD_SEMANTICS 1
3
4#if __linux && !defined(_GNU_SOURCE)
2# define _GNU_SOURCE 5# define _GNU_SOURCE
3#endif 6#endif
4 7
8/* just in case */
5#define _REENTRANT 1 9#define _REENTRANT 1
6 10
7#include <errno.h> 11#include <errno.h>
8 12
9#include "EXTERN.h" 13#include "EXTERN.h"
44/* used for struct dirent, AIX doesn't provide it */ 48/* used for struct dirent, AIX doesn't provide it */
45#ifndef NAME_MAX 49#ifndef NAME_MAX
46# define NAME_MAX 4096 50# define NAME_MAX 4096
47#endif 51#endif
48 52
53#ifndef PTHREAD_STACK_MIN
54/* care for broken platforms, e.g. windows */
55# define PTHREAD_STACK_MIN 16384
56#endif
57
49#if __ia64 58#if __ia64
50# define STACKSIZE 65536 59# define STACKSIZE 65536
60#elif __i386 || __x86_64 /* 16k is unreasonably high :( */
61# define STACKSIZE PTHREAD_STACK_MIN
51#else 62#else
52# define STACKSIZE 8192 63# define STACKSIZE 16384
53#endif 64#endif
65
66/* buffer size for various temporary buffers */
67#define AIO_BUFSIZE 65536
68
69#define dBUF \
70 char *aio_buf; \
71 LOCK (wrklock); \
72 self->dbuf = aio_buf = malloc (AIO_BUFSIZE); \
73 UNLOCK (wrklock); \
74 if (!aio_buf) \
75 return -1;
54 76
55enum { 77enum {
56 REQ_QUIT, 78 REQ_QUIT,
57 REQ_OPEN, REQ_CLOSE, 79 REQ_OPEN, REQ_CLOSE,
58 REQ_READ, REQ_WRITE, REQ_READAHEAD, 80 REQ_READ, REQ_WRITE, REQ_READAHEAD,
61 REQ_FSYNC, REQ_FDATASYNC, 83 REQ_FSYNC, REQ_FDATASYNC,
62 REQ_UNLINK, REQ_RMDIR, REQ_RENAME, 84 REQ_UNLINK, REQ_RMDIR, REQ_RENAME,
63 REQ_READDIR, 85 REQ_READDIR,
64 REQ_LINK, REQ_SYMLINK, 86 REQ_LINK, REQ_SYMLINK,
65 REQ_GROUP, REQ_NOP, 87 REQ_GROUP, REQ_NOP,
66 REQ_SLEEP, 88 REQ_BUSY,
67}; 89};
68 90
69#define AIO_REQ_KLASS "IO::AIO::REQ" 91#define AIO_REQ_KLASS "IO::AIO::REQ"
70#define AIO_GRP_KLASS "IO::AIO::GRP" 92#define AIO_GRP_KLASS "IO::AIO::GRP"
71 93
105 PRI_MIN = -4, 127 PRI_MIN = -4,
106 PRI_MAX = 4, 128 PRI_MAX = 4,
107 129
108 DEFAULT_PRI = 0, 130 DEFAULT_PRI = 0,
109 PRI_BIAS = -PRI_MIN, 131 PRI_BIAS = -PRI_MIN,
132 NUM_PRI = PRI_MAX + PRI_BIAS + 1,
110}; 133};
111 134
112static int next_pri = DEFAULT_PRI + PRI_BIAS; 135static int next_pri = DEFAULT_PRI + PRI_BIAS;
113 136
114static int started, wanted; 137static int started, wanted;
115static volatile int nreqs; 138static volatile int nreqs;
116static int max_outstanding = 1<<30;
117static int respipe [2]; 139static int respipe [2];
118 140
119#if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP) 141#if __linux && defined (PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)
120# define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP 142# define AIO_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
121#else 143#else
122# define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER 144# define AIO_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
123#endif 145#endif
124 146
147#define LOCK(mutex) pthread_mutex_lock (&(mutex))
148#define UNLOCK(mutex) pthread_mutex_unlock (&(mutex))
149
150/* worker threads management */
151static pthread_mutex_t wrklock = AIO_MUTEX_INIT;
152
153typedef struct worker {
154 /* locked by wrklock */
155 struct worker *prev, *next;
156
157 pthread_t tid;
158
159 /* locked by reslock, reqlock or wrklock */
160 aio_req req; /* currently processed request */
161 void *dbuf;
162 DIR *dirp;
163} worker;
164
165static worker wrk_first = { &wrk_first, &wrk_first, 0 };
166
167static void worker_clear (worker *wrk)
168{
169 if (wrk->dirp)
170 {
171 closedir (wrk->dirp);
172 wrk->dirp = 0;
173 }
174
175 if (wrk->dbuf)
176 {
177 free (wrk->dbuf);
178 wrk->dbuf = 0;
179 }
180}
181
182static void worker_free (worker *wrk)
183{
184 wrk->next->prev = wrk->prev;
185 wrk->prev->next = wrk->next;
186
187 free (wrk);
188}
189
125static pthread_mutex_t reslock = AIO_MUTEX_INIT; 190static pthread_mutex_t reslock = AIO_MUTEX_INIT;
126static pthread_mutex_t reqlock = AIO_MUTEX_INIT; 191static pthread_mutex_t reqlock = AIO_MUTEX_INIT;
127static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER; 192static pthread_cond_t reqwait = PTHREAD_COND_INITIALIZER;
128 193
129static volatile aio_req reqs, reqe; /* queue start, queue end */ 194/*
130static volatile aio_req ress, rese; /* queue start, queue end */ 195 * a somewhat faster data structure might be nice, but
196 * with 8 priorities this actually needs <20 insns
197 * per shift, the most expensive operation.
198 */
199typedef struct {
200 aio_req qs[NUM_PRI], qe[NUM_PRI]; /* qstart, qend */
201 int size;
202} reqq;
131 203
204static reqq req_queue;
205static reqq res_queue;
206
207int reqq_push (reqq *q, aio_req req)
208{
209 int pri = req->pri;
210 req->next = 0;
211
212 if (q->qe[pri])
213 {
214 q->qe[pri]->next = req;
215 q->qe[pri] = req;
216 }
217 else
218 q->qe[pri] = q->qs[pri] = req;
219
220 return q->size++;
221}
222
223aio_req reqq_shift (reqq *q)
224{
225 int pri;
226
227 if (!q->size)
228 return 0;
229
230 --q->size;
231
232 for (pri = NUM_PRI; pri--; )
233 {
234 aio_req req = q->qs[pri];
235
236 if (req)
237 {
238 if (!(q->qs[pri] = req->next))
239 q->qe[pri] = 0;
240
241 return req;
242 }
243 }
244
245 abort ();
246}
247
248static int poll_cb (int max);
132static void req_invoke (aio_req req); 249static void req_invoke (aio_req req);
133static void req_free (aio_req req); 250static void req_free (aio_req req);
251static void req_cancel (aio_req req);
134 252
135/* must be called at most once */ 253/* must be called at most once */
136static SV *req_sv (aio_req req, const char *klass) 254static SV *req_sv (aio_req req, const char *klass)
137{ 255{
138 if (!req->self) 256 if (!req->self)
169 ENTER; 287 ENTER;
170 SAVETMPS; 288 SAVETMPS;
171 PUSHMARK (SP); 289 PUSHMARK (SP);
172 XPUSHs (req_sv (grp, AIO_GRP_KLASS)); 290 XPUSHs (req_sv (grp, AIO_GRP_KLASS));
173 PUTBACK; 291 PUTBACK;
174 call_sv (grp->fh2, G_VOID | G_EVAL); 292 call_sv (grp->fh2, G_VOID | G_EVAL | G_KEEPERR);
175 SPAGAIN; 293 SPAGAIN;
176 FREETMPS; 294 FREETMPS;
177 LEAVE; 295 LEAVE;
178 } 296 }
179 297
206{ 324{
207 fd_set rfd; 325 fd_set rfd;
208 326
209 while (nreqs) 327 while (nreqs)
210 { 328 {
211 aio_req req; 329 int size;
212 pthread_mutex_lock (&reslock); 330#if !(__i386 || __x86_64) /* safe without sempahore on these archs */
213 req = ress; 331 LOCK (reslock);
214 pthread_mutex_unlock (&reslock); 332#endif
333 size = res_queue.size;
334#if !(__i386 || __x86_64) /* safe without sempahore on these archs */
335 UNLOCK (reslock);
336#endif
215 337
216 if (req) 338 if (size)
217 return; 339 return;
218 340
219 FD_ZERO(&rfd); 341 FD_ZERO(&rfd);
220 FD_SET(respipe [0], &rfd); 342 FD_SET(respipe [0], &rfd);
221 343
224} 346}
225 347
226static void req_invoke (aio_req req) 348static void req_invoke (aio_req req)
227{ 349{
228 dSP; 350 dSP;
229 int errorno = errno;
230 351
231 if (req->flags & FLAG_CANCELLED || !SvOK (req->callback)) 352 if (!(req->flags & FLAG_CANCELLED) && SvOK (req->callback))
232 return; 353 {
233
234 errno = req->errorno; 354 errno = req->errorno;
235 355
236 ENTER; 356 ENTER;
237 SAVETMPS; 357 SAVETMPS;
238 PUSHMARK (SP); 358 PUSHMARK (SP);
239 EXTEND (SP, 1); 359 EXTEND (SP, 1);
240 360
241 switch (req->type) 361 switch (req->type)
242 {
243 case REQ_READDIR:
244 { 362 {
245 SV *rv = &PL_sv_undef; 363 case REQ_READDIR:
246
247 if (req->result >= 0)
248 { 364 {
249 char *buf = req->data2ptr; 365 SV *rv = &PL_sv_undef;
250 AV *av = newAV ();
251 366
252 while (req->result) 367 if (req->result >= 0)
253 { 368 {
369 int i;
370 char *buf = req->data2ptr;
371 AV *av = newAV ();
372
373 av_extend (av, req->result - 1);
374
375 for (i = 0; i < req->result; ++i)
376 {
254 SV *sv = newSVpv (buf, 0); 377 SV *sv = newSVpv (buf, 0);
255 378
256 av_push (av, sv); 379 av_store (av, i, sv);
257 buf += SvCUR (sv) + 1; 380 buf += SvCUR (sv) + 1;
258 req->result--; 381 }
382
383 rv = sv_2mortal (newRV_noinc ((SV *)av));
259 } 384 }
260 385
261 rv = sv_2mortal (newRV_noinc ((SV *)av)); 386 PUSHs (rv);
262 } 387 }
388 break;
263 389
264 PUSHs (rv); 390 case REQ_OPEN:
391 {
392 /* convert fd to fh */
393 SV *fh;
394
395 PUSHs (sv_2mortal (newSViv (req->result)));
396 PUTBACK;
397 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
398 SPAGAIN;
399
400 fh = SvREFCNT_inc (POPs);
401
402 PUSHMARK (SP);
403 XPUSHs (sv_2mortal (fh));
404 }
405 break;
406
407 case REQ_GROUP:
408 req->fd = 2; /* mark group as finished */
409
410 if (req->data)
411 {
412 int i;
413 AV *av = (AV *)req->data;
414
415 EXTEND (SP, AvFILL (av) + 1);
416 for (i = 0; i <= AvFILL (av); ++i)
417 PUSHs (*av_fetch (av, i, 0));
418 }
419 break;
420
421 case REQ_NOP:
422 case REQ_BUSY:
423 break;
424
425 default:
426 PUSHs (sv_2mortal (newSViv (req->result)));
427 break;
265 } 428 }
266 break;
267 429
268 case REQ_OPEN:
269 {
270 /* convert fd to fh */
271 SV *fh;
272 430
273 PUSHs (sv_2mortal (newSViv (req->result)));
274 PUTBACK; 431 PUTBACK;
275 call_pv ("IO::AIO::_fd2fh", G_SCALAR | G_EVAL);
276 SPAGAIN;
277
278 fh = SvREFCNT_inc (POPs);
279
280 PUSHMARK (SP);
281 XPUSHs (sv_2mortal (fh));
282 }
283 break;
284
285 case REQ_GROUP:
286 req->fd = 2; /* mark group as finished */
287
288 if (req->data)
289 {
290 int i;
291 AV *av = (AV *)req->data;
292
293 EXTEND (SP, AvFILL (av) + 1);
294 for (i = 0; i <= AvFILL (av); ++i)
295 PUSHs (*av_fetch (av, i, 0));
296 }
297 break;
298
299 case REQ_NOP:
300 case REQ_SLEEP:
301 break;
302
303 default:
304 PUSHs (sv_2mortal (newSViv (req->result)));
305 break;
306 }
307
308
309 PUTBACK;
310 call_sv (req->callback, G_VOID | G_EVAL); 432 call_sv (req->callback, G_VOID | G_EVAL);
311 SPAGAIN; 433 SPAGAIN;
312 434
313 FREETMPS; 435 FREETMPS;
314 LEAVE; 436 LEAVE;
315
316 errno = errorno;
317
318 if (SvTRUE (ERRSV))
319 { 437 }
320 req_free (req);
321 croak (0);
322 }
323}
324 438
325static void req_free (aio_req req)
326{
327 if (req->grp) 439 if (req->grp)
328 { 440 {
329 aio_req grp = req->grp; 441 aio_req grp = req->grp;
330 442
331 /* unlink request */ 443 /* unlink request */
336 grp->grp_first = req->grp_next; 448 grp->grp_first = req->grp_next;
337 449
338 aio_grp_dec (grp); 450 aio_grp_dec (grp);
339 } 451 }
340 452
453 if (SvTRUE (ERRSV))
454 {
455 req_free (req);
456 croak (0);
457 }
458}
459
460static void req_free (aio_req req)
461{
341 if (req->self) 462 if (req->self)
342 { 463 {
343 sv_unmagic (req->self, PERL_MAGIC_ext); 464 sv_unmagic (req->self, PERL_MAGIC_ext);
344 SvREFCNT_dec (req->self); 465 SvREFCNT_dec (req->self);
345 } 466 }
348 SvREFCNT_dec (req->fh); 469 SvREFCNT_dec (req->fh);
349 SvREFCNT_dec (req->fh2); 470 SvREFCNT_dec (req->fh2);
350 SvREFCNT_dec (req->callback); 471 SvREFCNT_dec (req->callback);
351 Safefree (req->statdata); 472 Safefree (req->statdata);
352 473
353 if (req->type == REQ_READDIR && req->result >= 0) 474 if (req->type == REQ_READDIR)
354 free (req->data2ptr); 475 free (req->data2ptr);
355 476
356 Safefree (req); 477 Safefree (req);
357} 478}
358 479
480static void req_cancel_subs (aio_req grp)
481{
482 aio_req sub;
483
484 if (grp->type != REQ_GROUP)
485 return;
486
487 SvREFCNT_dec (grp->fh2);
488 grp->fh2 = 0;
489
490 for (sub = grp->grp_first; sub; sub = sub->grp_next)
491 req_cancel (sub);
492}
493
359static void req_cancel (aio_req req) 494static void req_cancel (aio_req req)
360{ 495{
361 req->flags |= FLAG_CANCELLED; 496 req->flags |= FLAG_CANCELLED;
362 497
363 if (req->type == REQ_GROUP) 498 req_cancel_subs (req);
364 {
365 aio_req sub;
366
367 for (sub = req->grp_first; sub; sub = sub->grp_next)
368 req_cancel (sub);
369 }
370} 499}
371 500
372static int poll_cb () 501static int poll_cb (int max)
373{ 502{
374 dSP; 503 dSP;
375 int count = 0; 504 int count = 0;
376 int do_croak = 0; 505 int do_croak = 0;
377 aio_req req; 506 aio_req req;
378 507
379 for (;;) 508 while (max <= 0 || count < max)
380 { 509 {
381 pthread_mutex_lock (&reslock); 510 LOCK (reslock);
382 req = ress; 511 req = reqq_shift (&res_queue);
383 512
384 if (req) 513 if (req)
385 { 514 {
386 ress = req->next;
387
388 if (!ress) 515 if (!res_queue.size)
389 { 516 {
390 /* read any signals sent by the worker threads */ 517 /* read any signals sent by the worker threads */
391 char buf [32]; 518 char buf [32];
392 while (read (respipe [0], buf, 32) == 32) 519 while (read (respipe [0], buf, 32) == 32)
393 ; 520 ;
394
395 rese = 0;
396 } 521 }
397 } 522 }
398 523
399 pthread_mutex_unlock (&reslock); 524 UNLOCK (reslock);
400 525
401 if (!req) 526 if (!req)
402 break; 527 break;
403 528
404 --nreqs; 529 --nreqs;
439static void *aio_proc(void *arg); 564static void *aio_proc(void *arg);
440 565
441static void start_thread (void) 566static void start_thread (void)
442{ 567{
443 sigset_t fullsigset, oldsigset; 568 sigset_t fullsigset, oldsigset;
444 pthread_t tid;
445 pthread_attr_t attr; 569 pthread_attr_t attr;
570
571 worker *wrk = calloc (1, sizeof (worker));
572
573 if (!wrk)
574 croak ("unable to allocate worker thread data");
446 575
447 pthread_attr_init (&attr); 576 pthread_attr_init (&attr);
448 pthread_attr_setstacksize (&attr, STACKSIZE); 577 pthread_attr_setstacksize (&attr, STACKSIZE);
449 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED); 578 pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
450 579
451 sigfillset (&fullsigset); 580 sigfillset (&fullsigset);
581
582 LOCK (wrklock);
452 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset); 583 sigprocmask (SIG_SETMASK, &fullsigset, &oldsigset);
453 584
454 if (pthread_create (&tid, &attr, aio_proc, 0) == 0) 585 if (pthread_create (&wrk->tid, &attr, aio_proc, (void *)wrk) == 0)
586 {
587 wrk->prev = &wrk_first;
588 wrk->next = wrk_first.next;
589 wrk_first.next->prev = wrk;
590 wrk_first.next = wrk;
455 started++; 591 started++;
592 }
593 else
594 free (wrk);
456 595
457 sigprocmask (SIG_SETMASK, &oldsigset, 0); 596 sigprocmask (SIG_SETMASK, &oldsigset, 0);
597 UNLOCK (wrklock);
458} 598}
459 599
460static void req_send (aio_req req) 600static void req_send (aio_req req)
461{ 601{
462 while (started < wanted && nreqs >= started) 602 while (started < wanted && nreqs >= started)
463 start_thread (); 603 start_thread ();
464 604
465 ++nreqs; 605 ++nreqs;
466 606
467 pthread_mutex_lock (&reqlock); 607 LOCK (reqlock);
468 608 reqq_push (&req_queue, req);
469 req->next = 0;
470
471 if (reqe)
472 {
473 reqe->next = req;
474 reqe = req;
475 }
476 else
477 reqe = reqs = req;
478
479 pthread_cond_signal (&reqwait); 609 pthread_cond_signal (&reqwait);
480 pthread_mutex_unlock (&reqlock); 610 UNLOCK (reqlock);
481
482 if (nreqs > max_outstanding)
483 for (;;)
484 {
485 poll_cb ();
486
487 if (nreqs <= max_outstanding)
488 break;
489
490 poll_wait ();
491 }
492} 611}
493 612
494static void end_thread (void) 613static void end_thread (void)
495{ 614{
496 aio_req req; 615 aio_req req;
616
497 Newz (0, req, 1, aio_cb); 617 Newz (0, req, 1, aio_cb);
618
498 req->type = REQ_QUIT; 619 req->type = REQ_QUIT;
620 req->pri = PRI_MAX + PRI_BIAS;
499 621
500 req_send (req); 622 req_send (req);
501} 623}
502 624
503static void min_parallel (int nthreads) 625static void min_parallel (int nthreads)
520 } 642 }
521 643
522 while (started > wanted) 644 while (started > wanted)
523 { 645 {
524 poll_wait (); 646 poll_wait ();
525 poll_cb (); 647 poll_cb (0);
526 } 648 }
527} 649}
528 650
529static void create_pipe () 651static void create_pipe ()
530{ 652{
555static ssize_t pread (int fd, void *buf, size_t count, off_t offset) 677static ssize_t pread (int fd, void *buf, size_t count, off_t offset)
556{ 678{
557 ssize_t res; 679 ssize_t res;
558 off_t ooffset; 680 off_t ooffset;
559 681
560 pthread_mutex_lock (&preadwritelock); 682 LOCK (preadwritelock);
561 ooffset = lseek (fd, 0, SEEK_CUR); 683 ooffset = lseek (fd, 0, SEEK_CUR);
562 lseek (fd, offset, SEEK_SET); 684 lseek (fd, offset, SEEK_SET);
563 res = read (fd, buf, count); 685 res = read (fd, buf, count);
564 lseek (fd, ooffset, SEEK_SET); 686 lseek (fd, ooffset, SEEK_SET);
565 pthread_mutex_unlock (&preadwritelock); 687 UNLOCK (preadwritelock);
566 688
567 return res; 689 return res;
568} 690}
569 691
570static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset) 692static ssize_t pwrite (int fd, void *buf, size_t count, off_t offset)
571{ 693{
572 ssize_t res; 694 ssize_t res;
573 off_t ooffset; 695 off_t ooffset;
574 696
575 pthread_mutex_lock (&preadwritelock); 697 LOCK (preadwritelock);
576 ooffset = lseek (fd, 0, SEEK_CUR); 698 ooffset = lseek (fd, 0, SEEK_CUR);
577 lseek (fd, offset, SEEK_SET); 699 lseek (fd, offset, SEEK_SET);
578 res = write (fd, buf, count); 700 res = write (fd, buf, count);
579 lseek (fd, offset, SEEK_SET); 701 lseek (fd, offset, SEEK_SET);
580 pthread_mutex_unlock (&preadwritelock); 702 UNLOCK (preadwritelock);
581 703
582 return res; 704 return res;
583} 705}
584#endif 706#endif
585 707
586#if !HAVE_FDATASYNC 708#if !HAVE_FDATASYNC
587# define fdatasync fsync 709# define fdatasync fsync
588#endif 710#endif
589 711
590#if !HAVE_READAHEAD 712#if !HAVE_READAHEAD
591# define readahead aio_readahead 713# define readahead(fd,offset,count) aio_readahead (fd, offset, count, self)
592 714
593static ssize_t readahead (int fd, off_t offset, size_t count) 715static ssize_t aio_readahead (int fd, off_t offset, size_t count, worker *self)
594{ 716{
595 char readahead_buf[4096]; 717 dBUF;
596 718
597 while (count > 0) 719 while (count > 0)
598 { 720 {
599 size_t len = count < sizeof (readahead_buf) ? count : sizeof (readahead_buf); 721 size_t len = count < AIO_BUFSIZE ? count : AIO_BUFSIZE;
600 722
601 pread (fd, readahead_buf, len, offset); 723 pread (fd, aio_buf, len, offset);
602 offset += len; 724 offset += len;
603 count -= len; 725 count -= len;
604 } 726 }
605 727
606 errno = 0; 728 errno = 0;
607} 729}
730
608#endif 731#endif
609 732
610#if !HAVE_READDIR_R 733#if !HAVE_READDIR_R
611# define readdir_r aio_readdir_r 734# define readdir_r aio_readdir_r
612 735
615static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res) 738static int readdir_r (DIR *dirp, struct dirent *ent, struct dirent **res)
616{ 739{
617 struct dirent *e; 740 struct dirent *e;
618 int errorno; 741 int errorno;
619 742
620 pthread_mutex_lock (&readdirlock); 743 LOCK (readdirlock);
621 744
622 e = readdir (dirp); 745 e = readdir (dirp);
623 errorno = errno; 746 errorno = errno;
624 747
625 if (e) 748 if (e)
628 strcpy (ent->d_name, e->d_name); 751 strcpy (ent->d_name, e->d_name);
629 } 752 }
630 else 753 else
631 *res = 0; 754 *res = 0;
632 755
633 pthread_mutex_unlock (&readdirlock); 756 UNLOCK (readdirlock);
634 757
635 errno = errorno; 758 errno = errorno;
636 return e ? 0 : -1; 759 return e ? 0 : -1;
637} 760}
638#endif 761#endif
639 762
640/* sendfile always needs emulation */ 763/* sendfile always needs emulation */
641static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count) 764static ssize_t sendfile_ (int ofd, int ifd, off_t offset, size_t count, worker *self)
642{ 765{
643 ssize_t res; 766 ssize_t res;
644 767
645 if (!count) 768 if (!count)
646 return 0; 769 return 0;
695#endif 818#endif
696 ) 819 )
697 ) 820 )
698 { 821 {
699 /* emulate sendfile. this is a major pain in the ass */ 822 /* emulate sendfile. this is a major pain in the ass */
700 char buf[4096]; 823 dBUF;
824
701 res = 0; 825 res = 0;
702 826
703 while (count) 827 while (count)
704 { 828 {
705 ssize_t cnt; 829 ssize_t cnt;
706 830
707 cnt = pread (ifd, buf, count > 4096 ? 4096 : count, offset); 831 cnt = pread (ifd, aio_buf, count > AIO_BUFSIZE ? AIO_BUFSIZE : count, offset);
708 832
709 if (cnt <= 0) 833 if (cnt <= 0)
710 { 834 {
711 if (cnt && !res) res = -1; 835 if (cnt && !res) res = -1;
712 break; 836 break;
713 } 837 }
714 838
715 cnt = write (ofd, buf, cnt); 839 cnt = write (ofd, aio_buf, cnt);
716 840
717 if (cnt <= 0) 841 if (cnt <= 0)
718 { 842 {
719 if (cnt && !res) res = -1; 843 if (cnt && !res) res = -1;
720 break; 844 break;
728 852
729 return res; 853 return res;
730} 854}
731 855
732/* read a full directory */ 856/* read a full directory */
733static int scandir_ (const char *path, void **namesp) 857static void scandir_ (aio_req req, worker *self)
734{ 858{
735 DIR *dirp = opendir (path); 859 DIR *dirp;
736 union 860 union
737 { 861 {
738 struct dirent d; 862 struct dirent d;
739 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1]; 863 char b [offsetof (struct dirent, d_name) + NAME_MAX + 1];
740 } u; 864 } *u;
741 struct dirent *entp; 865 struct dirent *entp;
742 char *name, *names; 866 char *name, *names;
743 int memlen = 4096; 867 int memlen = 4096;
744 int memofs = 0; 868 int memofs = 0;
745 int res = 0; 869 int res = 0;
746 int errorno; 870 int errorno;
747 871
748 if (!dirp) 872 LOCK (wrklock);
749 return -1; 873 self->dirp = dirp = opendir (req->dataptr);
750 874 self->dbuf = u = malloc (sizeof (*u));
751 names = malloc (memlen); 875 req->data2ptr = names = malloc (memlen);
876 UNLOCK (wrklock);
752 877
878 if (dirp && u && names)
753 for (;;) 879 for (;;)
754 { 880 {
881 errno = 0;
755 errno = 0, readdir_r (dirp, &u.d, &entp); 882 readdir_r (dirp, &u->d, &entp);
756 883
757 if (!entp) 884 if (!entp)
758 break; 885 break;
759 886
760 name = entp->d_name; 887 name = entp->d_name;
761 888
762 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2]))) 889 if (name [0] != '.' || (name [1] && (name [1] != '.' || name [2])))
763 { 890 {
764 int len = strlen (name) + 1; 891 int len = strlen (name) + 1;
765 892
766 res++; 893 res++;
767 894
768 while (memofs + len > memlen) 895 while (memofs + len > memlen)
769 { 896 {
770 memlen *= 2; 897 memlen *= 2;
898 LOCK (wrklock);
771 names = realloc (names, memlen); 899 req->data2ptr = names = realloc (names, memlen);
900 UNLOCK (wrklock);
901
772 if (!names) 902 if (!names)
773 break; 903 break;
774 } 904 }
775 905
776 memcpy (names + memofs, name, len); 906 memcpy (names + memofs, name, len);
777 memofs += len; 907 memofs += len;
778 } 908 }
779 } 909 }
780 910
781 errorno = errno;
782 closedir (dirp);
783
784 if (errorno) 911 if (errno)
785 {
786 free (names);
787 errno = errorno;
788 res = -1; 912 res = -1;
789 } 913
790 914 req->result = res;
791 *namesp = (void *)names;
792 return res;
793} 915}
794 916
795/*****************************************************************************/ 917/*****************************************************************************/
796 918
797static void *aio_proc (void *thr_arg) 919static void *aio_proc (void *thr_arg)
798{ 920{
799 aio_req req; 921 aio_req req;
800 int type; 922 int type;
923 worker *self = (worker *)thr_arg;
801 924
802 do 925 do
803 { 926 {
804 pthread_mutex_lock (&reqlock); 927 LOCK (reqlock);
805 928
806 for (;;) 929 for (;;)
807 { 930 {
808 req = reqs; 931 self->req = req = reqq_shift (&req_queue);
809
810 if (reqs)
811 {
812 reqs = reqs->next;
813 if (!reqs) reqe = 0;
814 }
815 932
816 if (req) 933 if (req)
817 break; 934 break;
818 935
819 pthread_cond_wait (&reqwait, &reqlock); 936 pthread_cond_wait (&reqwait, &reqlock);
820 } 937 }
821 938
822 pthread_mutex_unlock (&reqlock); 939 UNLOCK (reqlock);
823 940
824 errno = 0; /* strictly unnecessary */ 941 errno = 0; /* strictly unnecessary */
825 type = req->type; /* remember type for QUIT check */ 942 type = req->type; /* remember type for QUIT check */
826 943
827 if (!(req->flags & FLAG_CANCELLED)) 944 if (!(req->flags & FLAG_CANCELLED))
829 { 946 {
830 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break; 947 case REQ_READ: req->result = pread (req->fd, req->dataptr, req->length, req->offset); break;
831 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break; 948 case REQ_WRITE: req->result = pwrite (req->fd, req->dataptr, req->length, req->offset); break;
832 949
833 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break; 950 case REQ_READAHEAD: req->result = readahead (req->fd, req->offset, req->length); break;
834 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length); break; 951 case REQ_SENDFILE: req->result = sendfile_ (req->fd, req->fd2, req->offset, req->length, self); break;
835 952
836 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break; 953 case REQ_STAT: req->result = stat (req->dataptr, req->statdata); break;
837 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break; 954 case REQ_LSTAT: req->result = lstat (req->dataptr, req->statdata); break;
838 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break; 955 case REQ_FSTAT: req->result = fstat (req->fd , req->statdata); break;
839 956
845 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break; 962 case REQ_LINK: req->result = link (req->data2ptr, req->dataptr); break;
846 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break; 963 case REQ_SYMLINK: req->result = symlink (req->data2ptr, req->dataptr); break;
847 964
848 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break; 965 case REQ_FDATASYNC: req->result = fdatasync (req->fd); break;
849 case REQ_FSYNC: req->result = fsync (req->fd); break; 966 case REQ_FSYNC: req->result = fsync (req->fd); break;
850 case REQ_READDIR: req->result = scandir_ (req->dataptr, &req->data2ptr); break; 967 case REQ_READDIR: scandir_ (req, self); break;
851 968
852 case REQ_SLEEP: 969 case REQ_BUSY:
853 { 970 {
854 struct timeval tv; 971 struct timeval tv;
855 972
856 tv.tv_sec = req->fd; 973 tv.tv_sec = req->fd;
857 tv.tv_usec = req->fd2; 974 tv.tv_usec = req->fd2;
869 break; 986 break;
870 } 987 }
871 988
872 req->errorno = errno; 989 req->errorno = errno;
873 990
874 pthread_mutex_lock (&reslock); 991 LOCK (reslock);
875 992
876 req->next = 0; 993 if (!reqq_push (&res_queue, req))
877
878 if (rese)
879 {
880 rese->next = req;
881 rese = req;
882 }
883 else
884 {
885 rese = ress = req;
886
887 /* write a dummy byte to the pipe so fh becomes ready */ 994 /* write a dummy byte to the pipe so fh becomes ready */
888 write (respipe [1], &respipe, 1); 995 write (respipe [1], &respipe, 1);
889 }
890 996
891 pthread_mutex_unlock (&reslock); 997 self->req = 0;
998 worker_clear (self);
999
1000 UNLOCK (reslock);
892 } 1001 }
893 while (type != REQ_QUIT); 1002 while (type != REQ_QUIT);
894 1003
1004 LOCK (wrklock);
1005 worker_free (self);
1006 UNLOCK (wrklock);
1007
895 return 0; 1008 return 0;
896} 1009}
897 1010
898/*****************************************************************************/ 1011/*****************************************************************************/
899 1012
900static void atfork_prepare (void) 1013static void atfork_prepare (void)
901{ 1014{
902 pthread_mutex_lock (&reqlock); 1015 LOCK (wrklock);
903 pthread_mutex_lock (&reslock); 1016 LOCK (reqlock);
1017 LOCK (reslock);
904#if !HAVE_PREADWRITE 1018#if !HAVE_PREADWRITE
905 pthread_mutex_lock (&preadwritelock); 1019 LOCK (preadwritelock);
906#endif 1020#endif
907#if !HAVE_READDIR_R 1021#if !HAVE_READDIR_R
908 pthread_mutex_lock (&readdirlock); 1022 LOCK (readdirlock);
909#endif 1023#endif
910} 1024}
911 1025
912static void atfork_parent (void) 1026static void atfork_parent (void)
913{ 1027{
914#if !HAVE_READDIR_R 1028#if !HAVE_READDIR_R
915 pthread_mutex_unlock (&readdirlock); 1029 UNLOCK (readdirlock);
916#endif 1030#endif
917#if !HAVE_PREADWRITE 1031#if !HAVE_PREADWRITE
918 pthread_mutex_unlock (&preadwritelock); 1032 UNLOCK (preadwritelock);
919#endif 1033#endif
920 pthread_mutex_unlock (&reslock); 1034 UNLOCK (reslock);
921 pthread_mutex_unlock (&reqlock); 1035 UNLOCK (reqlock);
1036 UNLOCK (wrklock);
922} 1037}
923 1038
924static void atfork_child (void) 1039static void atfork_child (void)
925{ 1040{
926 aio_req prv; 1041 aio_req prv;
927 1042
1043 while (prv = reqq_shift (&req_queue))
1044 req_free (prv);
1045
1046 while (prv = reqq_shift (&res_queue))
1047 req_free (prv);
1048
1049 while (wrk_first.next != &wrk_first)
1050 {
1051 worker *wrk = wrk_first.next;
1052
1053 if (wrk->req)
1054 req_free (wrk->req);
1055
1056 worker_clear (wrk);
1057 worker_free (wrk);
1058 }
1059
928 started = 0; 1060 started = 0;
929 1061 nreqs = 0;
930 while (reqs)
931 {
932 prv = reqs;
933 reqs = prv->next;
934 req_free (prv);
935 }
936
937 reqs = reqe = 0;
938
939 while (ress)
940 {
941 prv = ress;
942 ress = prv->next;
943 req_free (prv);
944 }
945
946 ress = rese = 0;
947 1062
948 close (respipe [0]); 1063 close (respipe [0]);
949 close (respipe [1]); 1064 close (respipe [1]);
950 create_pipe (); 1065 create_pipe ();
951 1066
996void 1111void
997max_parallel (nthreads) 1112max_parallel (nthreads)
998 int nthreads 1113 int nthreads
999 PROTOTYPE: $ 1114 PROTOTYPE: $
1000 1115
1001int
1002max_outstanding (nreqs)
1003 int nreqs
1004 PROTOTYPE: $
1005 CODE:
1006 RETVAL = max_outstanding;
1007 max_outstanding = nreqs;
1008
1009void 1116void
1010aio_open (pathname,flags,mode,callback=&PL_sv_undef) 1117aio_open (pathname,flags,mode,callback=&PL_sv_undef)
1011 SV * pathname 1118 SV * pathname
1012 int flags 1119 int flags
1013 int mode 1120 int mode
1227 1334
1228 REQ_SEND; 1335 REQ_SEND;
1229} 1336}
1230 1337
1231void 1338void
1232aio_sleep (delay,callback=&PL_sv_undef) 1339aio_busy (delay,callback=&PL_sv_undef)
1233 double delay 1340 double delay
1234 SV * callback 1341 SV * callback
1235 PPCODE: 1342 PPCODE:
1236{ 1343{
1237 dREQ; 1344 dREQ;
1238 1345
1239 req->type = REQ_SLEEP; 1346 req->type = REQ_BUSY;
1240 req->fd = delay < 0. ? 0 : delay; 1347 req->fd = delay < 0. ? 0 : delay;
1241 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd); 1348 req->fd2 = delay < 0. ? 0 : 1000. * (delay - req->fd);
1242 1349
1243 REQ_SEND; 1350 REQ_SEND;
1244} 1351}
1267 req->type = REQ_NOP; 1374 req->type = REQ_NOP;
1268 1375
1269 REQ_SEND; 1376 REQ_SEND;
1270} 1377}
1271 1378
1272#if 0
1273
1274void 1379void
1275aio_pri (int pri = DEFAULT_PRI) 1380aioreq_pri (int pri = DEFAULT_PRI)
1276 CODE: 1381 CODE:
1277 if (pri < PRI_MIN) pri = PRI_MIN; 1382 if (pri < PRI_MIN) pri = PRI_MIN;
1278 if (pri > PRI_MAX) pri = PRI_MAX; 1383 if (pri > PRI_MAX) pri = PRI_MAX;
1279 next_pri = pri + PRI_BIAS; 1384 next_pri = pri + PRI_BIAS;
1280 1385
1281#endif 1386void
1387aioreq_nice (int nice = 0)
1388 CODE:
1389 nice = next_pri - nice;
1390 if (nice < PRI_MIN) nice = PRI_MIN;
1391 if (nice > PRI_MAX) nice = PRI_MAX;
1392 next_pri = nice + PRI_BIAS;
1282 1393
1283void 1394void
1284flush () 1395flush ()
1285 PROTOTYPE: 1396 PROTOTYPE:
1286 CODE: 1397 CODE:
1287 while (nreqs) 1398 while (nreqs)
1288 { 1399 {
1289 poll_wait (); 1400 poll_wait ();
1290 poll_cb (); 1401 poll_cb (0);
1291 } 1402 }
1292 1403
1293void 1404void
1294poll() 1405poll()
1295 PROTOTYPE: 1406 PROTOTYPE:
1296 CODE: 1407 CODE:
1297 if (nreqs) 1408 if (nreqs)
1298 { 1409 {
1299 poll_wait (); 1410 poll_wait ();
1300 poll_cb (); 1411 poll_cb (0);
1301 } 1412 }
1302 1413
1303int 1414int
1304poll_fileno() 1415poll_fileno()
1305 PROTOTYPE: 1416 PROTOTYPE:
1310 1421
1311int 1422int
1312poll_cb(...) 1423poll_cb(...)
1313 PROTOTYPE: 1424 PROTOTYPE:
1314 CODE: 1425 CODE:
1315 RETVAL = poll_cb (); 1426 RETVAL = poll_cb (0);
1427 OUTPUT:
1428 RETVAL
1429
1430int
1431poll_some(int max = 0)
1432 PROTOTYPE: $
1433 CODE:
1434 RETVAL = poll_cb (max);
1316 OUTPUT: 1435 OUTPUT:
1317 RETVAL 1436 RETVAL
1318 1437
1319void 1438void
1320poll_wait() 1439poll_wait()
1335 1454
1336MODULE = IO::AIO PACKAGE = IO::AIO::REQ 1455MODULE = IO::AIO PACKAGE = IO::AIO::REQ
1337 1456
1338void 1457void
1339cancel (aio_req_ornot req) 1458cancel (aio_req_ornot req)
1340 PROTOTYPE:
1341 CODE: 1459 CODE:
1342 req_cancel (req); 1460 req_cancel (req);
1343 1461
1344void 1462void
1345cb (aio_req_ornot req, SV *callback=&PL_sv_undef) 1463cb (aio_req_ornot req, SV *callback=&PL_sv_undef)
1381 } 1499 }
1382 } 1500 }
1383} 1501}
1384 1502
1385void 1503void
1504cancel_subs (aio_req_ornot req)
1505 CODE:
1506 req_cancel_subs (req);
1507
1508void
1386result (aio_req grp, ...) 1509result (aio_req grp, ...)
1387 CODE: 1510 CODE:
1388{ 1511{
1389 int i; 1512 int i;
1390 AV *av = newAV (); 1513 AV *av = newAV ();
1395 SvREFCNT_dec (grp->data); 1518 SvREFCNT_dec (grp->data);
1396 grp->data = (SV *)av; 1519 grp->data = (SV *)av;
1397} 1520}
1398 1521
1399void 1522void
1400feed_limit (aio_req grp, int limit) 1523limit (aio_req grp, int limit)
1401 CODE: 1524 CODE:
1402 grp->fd2 = limit; 1525 grp->fd2 = limit;
1403 aio_grp_feed (grp); 1526 aio_grp_feed (grp);
1404 1527
1405void 1528void

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines